Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 672be01..92f021a 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -303,15 +303,6 @@
 
 ---------------------------
 
-What: CONFIG_NF_CT_ACCT
-When: 2.6.29
-Why:  Accounting can now be enabled/disabled without kernel recompilation.
-      Currently used only to set a default value for a feature that is also
-      controlled by a kernel/module/sysfs/sysctl parameter.
-Who:  Krzysztof Piotr Oledzki <ole@ans.pl>
-
----------------------------
-
 What:	sysfs ui for changing p4-clockmod parameters
 When:	September 2009
 Why:	See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and
diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt
index 3ba0b94..f2430a7 100644
--- a/Documentation/filesystems/nfs/nfsroot.txt
+++ b/Documentation/filesystems/nfs/nfsroot.txt
@@ -124,6 +124,8 @@
 
   <hostname>	Name of the client. May be supplied by autoconfiguration,
   		but its absence will not trigger autoconfiguration.
+		If specified and DHCP is used, the user provided hostname will
+		be carried in the DHCP request to hopefully update DNS record.
 
   		Default: Client IP address is used in ASCII notation.
 
diff --git a/Documentation/isdn/INTERFACE.CAPI b/Documentation/isdn/INTERFACE.CAPI
index f172091..309eb5e 100644
--- a/Documentation/isdn/INTERFACE.CAPI
+++ b/Documentation/isdn/INTERFACE.CAPI
@@ -113,12 +113,16 @@
 int (*load_firmware)(struct capi_ctr *ctrlr, capiloaddata *ldata)
 	(optional) pointer to a callback function for sending firmware and
 	configuration data to the device
+	The function may return before the operation has completed.
+	Completion must be signalled by a call to capi_ctr_ready().
 	Return value: 0 on success, error code on error
 	Called in process context.
 
 void (*reset_ctr)(struct capi_ctr *ctrlr)
-	(optional) pointer to a callback function for performing a reset on
-	the device, releasing all registered applications
+	(optional) pointer to a callback function for stopping the device,
+	releasing all registered applications
+	The function may return before the operation has completed.
+	Completion must be signalled by a call to capi_ctr_down().
 	Called in process context.
 
 void (*register_appl)(struct capi_ctr *ctrlr, u16 applid,
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1808f11..cee6251 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1597,8 +1597,7 @@
 			[NETFILTER] Enable connection tracking flow accounting
 			0 to disable accounting
 			1 to enable accounting
-			Default value depends on CONFIG_NF_CT_ACCT that is
-			going to be removed in 2.6.29.
+			Default value is 0.
 
 	nfsaddrs=	[NFS]
 			See Documentation/filesystems/nfs/nfsroot.txt.
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200
index 80c7285..e4d3267 100644
--- a/Documentation/networking/README.ipw2200
+++ b/Documentation/networking/README.ipw2200
@@ -171,7 +171,7 @@
   
   led
 	Can be used to turn on experimental LED code.
-	0 = Off, 1 = On.  Default is 0.
+	0 = Off, 1 = On.  Default is 1.
 
   mode
 	Can be used to set the default mode of the adapter.  
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 61f516b..d091478 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -49,6 +49,7 @@
 3.3	Configuring Bonding Manually with Ifenslave
 3.3.1		Configuring Multiple Bonds Manually
 3.4	Configuring Bonding Manually via Sysfs
+3.5	Overriding Configuration for Special Cases
 
 4. Querying Bonding Configuration
 4.1	Bonding Configuration
@@ -1318,8 +1319,87 @@
 echo +eth2 > /sys/class/net/bond1/bonding/slaves
 echo +eth3 > /sys/class/net/bond1/bonding/slaves
 
+3.5 Overriding Configuration for Special Cases
+----------------------------------------------
+When using the bonding driver, the physical port which transmits a frame is
+typically selected by the bonding driver, and is not relevant to the user or
+system administrator.  The output port is simply selected using the policies of
+the selected bonding mode.  On occasion however, it is helpful to direct certain
+classes of traffic to certain physical interfaces on output to implement
+slightly more complex policies.  For example, to reach a web server over a
+bonded interface in which eth0 connects to a private network, while eth1
+connects via a public network, it may be desirous to bias the bond to send said
+traffic over eth0 first, using eth1 only as a fall back, while all other traffic
+can safely be sent over either interface.  Such configurations may be achieved
+using the traffic control utilities inherent in linux.
 
-4. Querying Bonding Configuration 
+By default the bonding driver is multiqueue aware and 16 queues are created
+when the driver initializes (see Documentation/networking/multiqueue.txt
+for details).  If more or less queues are desired the module parameter
+tx_queues can be used to change this value.  There is no sysfs parameter
+available as the allocation is done at module init time.
+
+The output of the file /proc/net/bonding/bondX has changed so the output Queue
+ID is now printed for each slave:
+
+Bonding Mode: fault-tolerance (active-backup)
+Primary Slave: None
+Currently Active Slave: eth0
+MII Status: up
+MII Polling Interval (ms): 0
+Up Delay (ms): 0
+Down Delay (ms): 0
+
+Slave Interface: eth0
+MII Status: up
+Link Failure Count: 0
+Permanent HW addr: 00:1a:a0:12:8f:cb
+Slave queue ID: 0
+
+Slave Interface: eth1
+MII Status: up
+Link Failure Count: 0
+Permanent HW addr: 00:1a:a0:12:8f:cc
+Slave queue ID: 2
+
+The queue_id for a slave can be set using the command:
+
+# echo "eth1:2" > /sys/class/net/bond0/bonding/queue_id
+
+Any interface that needs a queue_id set should set it with multiple calls
+like the one above until proper priorities are set for all interfaces.  On
+distributions that allow configuration via initscripts, multiple 'queue_id'
+arguments can be added to BONDING_OPTS to set all needed slave queues.
+
+These queue id's can be used in conjunction with the tc utility to configure
+a multiqueue qdisc and filters to bias certain traffic to transmit on certain
+slave devices.  For instance, say we wanted, in the above configuration to
+force all traffic bound to 192.168.1.100 to use eth1 in the bond as its output
+device. The following commands would accomplish this:
+
+# tc qdisc add dev bond0 handle 1 root multiq
+
+# tc filter add dev bond0 protocol ip parent 1: prio 1 u32 match ip dst \
+	192.168.1.100 action skbedit queue_mapping 2
+
+These commands tell the kernel to attach a multiqueue queue discipline to the
+bond0 interface and filter traffic enqueued to it, such that packets with a dst
+ip of 192.168.1.100 have their output queue mapping value overwritten to 2.
+This value is then passed into the driver, causing the normal output path
+selection policy to be overridden, selecting instead qid 2, which maps to eth1.
+
+Note that qid values begin at 1.  Qid 0 is reserved to initiate to the driver
+that normal output policy selection should take place.  One benefit to simply
+leaving the qid for a slave to 0 is the multiqueue awareness in the bonding
+driver that is now present.  This awareness allows tc filters to be placed on
+slave devices as well as bond devices and the bonding driver will simply act as
+a pass-through for selecting output queues on the slave device rather than 
+output port selection.
+
+This feature first appeared in bonding driver version 3.7.0 and support for
+output slave selection was limited to round-robin and active-backup modes.
+
+4 Querying Bonding Configuration
 =================================
 
 4.1 Bonding Configuration
diff --git a/Documentation/networking/caif/spi_porting.txt b/Documentation/networking/caif/spi_porting.txt
new file mode 100644
index 0000000..61d7c92
--- /dev/null
+++ b/Documentation/networking/caif/spi_porting.txt
@@ -0,0 +1,208 @@
+- CAIF SPI porting -
+
+- CAIF SPI basics:
+
+Running CAIF over SPI needs some extra setup, owing to the nature of SPI.
+Two extra GPIOs have been added in order to negotiate the transfers
+ between the master and the slave. The minimum requirement for running
+CAIF over SPI is a SPI slave chip and two GPIOs (more details below).
+Please note that running as a slave implies that you need to keep up
+with the master clock. An overrun or underrun event is fatal.
+
+- CAIF SPI framework:
+
+To make porting as easy as possible, the CAIF SPI has been divided in
+two parts. The first part (called the interface part) deals with all
+generic functionality such as length framing, SPI frame negotiation
+and SPI frame delivery and transmission. The other part is the CAIF
+SPI slave device part, which is the module that you have to write if
+you want to run SPI CAIF on a new hardware. This part takes care of
+the physical hardware, both with regard to SPI and to GPIOs.
+
+- Implementing a CAIF SPI device:
+
+	- Functionality provided by the CAIF SPI slave device:
+
+	In order to implement a SPI device you will, as a minimum,
+	need to implement the following
+	functions:
+
+	int (*init_xfer) (struct cfspi_xfer * xfer, struct cfspi_dev *dev):
+
+	This function is called by the CAIF SPI interface to give
+	you a chance to set up your hardware to be ready to receive
+	a stream of data from the master. The xfer structure contains
+	both physical and logical adresses, as well as the total length
+	of the transfer in both directions.The dev parameter can be used
+	to map to different CAIF SPI slave devices.
+
+	void (*sig_xfer) (bool xfer, struct cfspi_dev *dev):
+
+	This function is called by the CAIF SPI interface when the output
+	(SPI_INT) GPIO needs to change state. The boolean value of the xfer
+	variable indicates whether the GPIO should be asserted (HIGH) or
+	deasserted (LOW). The dev parameter can be used to map to different CAIF
+	SPI slave devices.
+
+	- Functionality provided by the CAIF SPI interface:
+
+	void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
+
+	This function is called by the CAIF SPI slave device in order to
+	signal a change of state of the input GPIO (SS) to the interface.
+	Only active edges are mandatory to be reported.
+	This function can be called from IRQ context (recommended in order
+	not to introduce latency). The ifc parameter should be the pointer
+	returned from the platform probe function in the SPI device structure.
+
+	void (*xfer_done_cb) (struct cfspi_ifc *ifc);
+
+	This function is called by the CAIF SPI slave device in order to
+	report that a transfer is completed. This function should only be
+	called once both the transmission and the reception are completed.
+	This function can be called from IRQ context (recommended in order
+	not to introduce latency). The ifc parameter should be the pointer
+	returned from the platform probe function in the SPI device structure.
+
+	- Connecting the bits and pieces:
+
+		- Filling in the SPI slave device structure:
+
+		Connect the necessary callback functions.
+		Indicate clock speed (used to calculate toggle delays).
+		Chose a suitable name (helps debugging if you use several CAIF
+		SPI slave devices).
+		Assign your private data (can be used to map to your structure).
+
+		- Filling in the SPI slave platform device structure:
+		Add name of driver to connect to ("cfspi_sspi").
+		Assign the SPI slave device structure as platform data.
+
+- Padding:
+
+In order to optimize throughput, a number of SPI padding options are provided.
+Padding can be enabled independently for uplink and downlink transfers.
+Padding can be enabled for the head, the tail and for the total frame size.
+The padding needs to be correctly configured on both sides of the link.
+The padding can be changed via module parameters in cfspi_sspi.c or via
+the sysfs directory of the cfspi_sspi driver (before device registration).
+
+- CAIF SPI device template:
+
+/*
+ *	Copyright (C) ST-Ericsson AB 2010
+ *	Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ *	License terms: GNU General Public License (GPL), version 2.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <net/caif/caif_spi.h>
+
+MODULE_LICENSE("GPL");
+
+struct sspi_struct {
+	struct cfspi_dev sdev;
+	struct cfspi_xfer *xfer;
+};
+
+static struct sspi_struct slave;
+static struct platform_device slave_device;
+
+static irqreturn_t sspi_irq(int irq, void *arg)
+{
+	/* You only need to trigger on an edge to the active state of the
+	 * SS signal. Once a edge is detected, the ss_cb() function should be
+	 * called with the parameter assert set to true. It is OK
+	 * (and even advised) to call the ss_cb() function in IRQ context in
+	 * order not to add any delay. */
+
+	return IRQ_HANDLED;
+}
+
+static void sspi_complete(void *context)
+{
+	/* Normally the DMA or the SPI framework will call you back
+	 * in something similar to this. The only thing you need to
+	 * do is to call the xfer_done_cb() function, providing the pointer
+	 * to the CAIF SPI interface. It is OK to call this function
+	 * from IRQ context. */
+}
+
+static int sspi_init_xfer(struct cfspi_xfer *xfer, struct cfspi_dev *dev)
+{
+	/* Store transfer info. For a normal implementation you should
+	 * set up your DMA here and make sure that you are ready to
+	 * receive the data from the master SPI. */
+
+	struct sspi_struct *sspi = (struct sspi_struct *)dev->priv;
+
+	sspi->xfer = xfer;
+
+	return 0;
+}
+
+void sspi_sig_xfer(bool xfer, struct cfspi_dev *dev)
+{
+	/* If xfer is true then you should assert the SPI_INT to indicate to
+	 * the master that you are ready to recieve the data from the master
+	 * SPI. If xfer is false then you should de-assert SPI_INT to indicate
+	 * that the transfer is done.
+	 */
+
+	struct sspi_struct *sspi = (struct sspi_struct *)dev->priv;
+}
+
+static void sspi_release(struct device *dev)
+{
+	/*
+	 * Here you should release your SPI device resources.
+	 */
+}
+
+static int __init sspi_init(void)
+{
+	/* Here you should initialize your SPI device by providing the
+	 * necessary functions, clock speed, name and private data. Once
+	 * done, you can register your device with the
+	 * platform_device_register() function. This function will return
+	 * with the CAIF SPI interface initialized. This is probably also
+	 * the place where you should set up your GPIOs, interrupts and SPI
+	 * resources. */
+
+	int res = 0;
+
+	/* Initialize slave device. */
+	slave.sdev.init_xfer = sspi_init_xfer;
+	slave.sdev.sig_xfer = sspi_sig_xfer;
+	slave.sdev.clk_mhz = 13;
+	slave.sdev.priv = &slave;
+	slave.sdev.name = "spi_sspi";
+	slave_device.dev.release = sspi_release;
+
+	/* Initialize platform device. */
+	slave_device.name = "cfspi_sspi";
+	slave_device.dev.platform_data = &slave.sdev;
+
+	/* Register platform device. */
+	res = platform_device_register(&slave_device);
+	if (res) {
+		printk(KERN_WARNING "sspi_init: failed to register dev.\n");
+		return -ENODEV;
+	}
+
+	return res;
+}
+
+static void __exit sspi_exit(void)
+{
+	platform_device_del(&slave_device);
+}
+
+module_init(sspi_init);
+module_exit(sspi_exit);
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index d0536b5..f350c69 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -903,7 +903,7 @@
 arp_notify - BOOLEAN
 	Define mode for notification of address and device changes.
 	0 - (default): do nothing
-	1 - Generate gratuitous arp replies when device is brought up
+	1 - Generate gratuitous arp requests when device is brought up
 	    or hardware address changes.
 
 arp_accept - BOOLEAN
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 98f71a5..2546aa4 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -493,6 +493,32 @@
     pfd.events = POLLOUT;
     retval = poll(&pfd, 1, timeout);
 
+-------------------------------------------------------------------------------
++ PACKET_TIMESTAMP
+-------------------------------------------------------------------------------
+
+The PACKET_TIMESTAMP setting determines the source of the timestamp in
+the packet meta information.  If your NIC is capable of timestamping
+packets in hardware, you can request those hardware timestamps to used.
+Note: you may need to enable the generation of hardware timestamps with
+SIOCSHWTSTAMP.
+
+PACKET_TIMESTAMP accepts the same integer bit field as
+SO_TIMESTAMPING.  However, only the SOF_TIMESTAMPING_SYS_HARDWARE
+and SOF_TIMESTAMPING_RAW_HARDWARE values are recognized by
+PACKET_TIMESTAMP.  SOF_TIMESTAMPING_SYS_HARDWARE takes precedence over
+SOF_TIMESTAMPING_RAW_HARDWARE if both bits are set.
+
+    int req = 0;
+    req |= SOF_TIMESTAMPING_SYS_HARDWARE;
+    setsockopt(fd, SOL_PACKET, PACKET_TIMESTAMP, (void *) &req, sizeof(req))
+
+If PACKET_TIMESTAMP is not set, a software timestamp generated inside
+the networking stack is used (the behavior before this setting was added).
+
+See include/linux/net_tstamp.h and Documentation/networking/timestamping
+for more information on hardware timestamps.
+
 --------------------------------------------------------------------------------
 + THANKS
 --------------------------------------------------------------------------------
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index 61bb645..75e4fd7 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -151,6 +151,8 @@
 
  pgset stop    	          aborts injection. Also, ^C aborts generator.
 
+ pgset "rate 300M"        set rate to 300 Mb/s
+ pgset "ratep 1000000"    set rate to 1Mpps
 
 Example scripts
 ===============
@@ -241,6 +243,9 @@
 flows
 flowlen
 
+rate
+ratep
+
 References:
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index 48c4f03..81e1f7d 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -101,10 +101,7 @@
  * MicroBlaze doesn't handle unaligned accesses in hardware.
  *
  * Based on this we force the IP header alignment in network drivers.
- * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
- * cacheline alignment of buffers.
  */
 #define NET_IP_ALIGN	2
-#define NET_SKB_PAD	L1_CACHE_BYTES
 
 #endif /* _ASM_MICROBLAZE_SYSTEM_H */
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index a6297c6..6c294ac 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -515,11 +515,8 @@
  * powers of 2 writes until it reaches sufficient alignment).
  *
  * Based on this we disable the IP header alignment in network drivers.
- * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
- * cacheline alignment of buffers.
  */
 #define NET_IP_ALIGN	0
-#define NET_SKB_PAD	L1_CACHE_BYTES
 
 #define cmpxchg64(ptr, o, n)						\
   ({									\
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 9a9586f..f02e89c 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -85,7 +85,8 @@
 
 	rcu_read_lock();
 	for_each_netdev_rcu(&init_net, dev) {
-		const struct net_device_stats *stats = dev_get_stats(dev);
+		struct rtnl_link_stats64 temp;
+		const struct net_device_stats *stats = dev_get_stats(dev, &temp);
 
 		rx_packets += stats->rx_packets;
 		tx_packets += stats->tx_packets;
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index b8fe48e..1db9bd2 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -457,4 +457,11 @@
 	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
 }
 
+/*
+ * We handle most unaligned accesses in hardware.  On the other hand
+ * unaligned DMA can be quite expensive on some Nehalem processors.
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN	0
 #endif /* _ASM_X86_SYSTEM_H */
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index f1a0a00..be7461c 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -177,7 +177,7 @@
 
 config ATM_NICSTAR
 	tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
-	depends on PCI && !64BIT && VIRT_TO_BUS
+	depends on PCI
 	help
 	  The NICStAR chipset family is used in a large number of ATM NICs for
 	  25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 56c2e99..ea9cbe5 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -67,6 +67,7 @@
 #include <linux/timer.h>
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
+#include <linux/bitmap.h>
 #include <linux/slab.h>
 #include <asm/io.h>
 #include <asm/byteorder.h>
@@ -778,61 +779,39 @@
 static int __devinit
 he_init_group(struct he_dev *he_dev, int group)
 {
+	struct he_buff *heb, *next;
+	dma_addr_t mapping;
 	int i;
 
-	/* small buffer pool */
-	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
-			CONFIG_RBPS_BUFSIZE, 8, 0);
-	if (he_dev->rbps_pool == NULL) {
-		hprintk("unable to create rbps pages\n");
+	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
+	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
+	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
+	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
+		  G0_RBPS_BS + (group * 32));
+
+	/* bitmap table */
+	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
+				     * sizeof(unsigned long), GFP_KERNEL);
+	if (!he_dev->rbpl_table) {
+		hprintk("unable to allocate rbpl bitmap table\n");
 		return -ENOMEM;
 	}
+	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
 
-	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
-		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
-	if (he_dev->rbps_base == NULL) {
-		hprintk("failed to alloc rbps_base\n");
-		goto out_destroy_rbps_pool;
+	/* rbpl_virt 64-bit pointers */
+	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
+				    * sizeof(struct he_buff *), GFP_KERNEL);
+	if (!he_dev->rbpl_virt) {
+		hprintk("unable to allocate rbpl virt table\n");
+		goto out_free_rbpl_table;
 	}
-	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
-	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
-	if (he_dev->rbps_virt == NULL) {
-		hprintk("failed to alloc rbps_virt\n");
-		goto out_free_rbps_base;
-	}
-
-	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
-		dma_addr_t dma_handle;
-		void *cpuaddr;
-
-		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
-		if (cpuaddr == NULL)
-			goto out_free_rbps_virt;
-
-		he_dev->rbps_virt[i].virt = cpuaddr;
-		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
-		he_dev->rbps_base[i].phys = dma_handle;
-
-	}
-	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
-
-	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
-	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
-						G0_RBPS_T + (group * 32));
-	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
-						G0_RBPS_BS + (group * 32));
-	he_writel(he_dev,
-			RBP_THRESH(CONFIG_RBPS_THRESH) |
-			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
-			RBP_INT_ENB,
-						G0_RBPS_QI + (group * 32));
 
 	/* large buffer pool */
 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
-			CONFIG_RBPL_BUFSIZE, 8, 0);
+					    CONFIG_RBPL_BUFSIZE, 64, 0);
 	if (he_dev->rbpl_pool == NULL) {
 		hprintk("unable to create rbpl pool\n");
-		goto out_free_rbps_virt;
+		goto out_free_rbpl_virt;
 	}
 
 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
@@ -842,30 +821,29 @@
 		goto out_destroy_rbpl_pool;
 	}
 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
-	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
-	if (he_dev->rbpl_virt == NULL) {
-		hprintk("failed to alloc rbpl_virt\n");
-		goto out_free_rbpl_base;
-	}
+
+	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
 
 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
-		dma_addr_t dma_handle;
-		void *cpuaddr;
 
-		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
-		if (cpuaddr == NULL)
-			goto out_free_rbpl_virt;
+		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
+		if (!heb)
+			goto out_free_rbpl;
+		heb->mapping = mapping;
+		list_add(&heb->entry, &he_dev->rbpl_outstanding);
 
-		he_dev->rbpl_virt[i].virt = cpuaddr;
-		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
-		he_dev->rbpl_base[i].phys = dma_handle;
+		set_bit(i, he_dev->rbpl_table);
+		he_dev->rbpl_virt[i] = heb;
+		he_dev->rbpl_hint = i + 1;
+		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
+		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
 	}
 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
 
 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
 						G0_RBPL_T + (group * 32));
-	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
+	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
 						G0_RBPL_BS + (group * 32));
 	he_writel(he_dev,
 			RBP_THRESH(CONFIG_RBPL_THRESH) |
@@ -879,7 +857,7 @@
 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
 	if (he_dev->rbrq_base == NULL) {
 		hprintk("failed to allocate rbrq\n");
-		goto out_free_rbpl_virt;
+		goto out_free_rbpl;
 	}
 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
 
@@ -920,33 +898,20 @@
 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
 			sizeof(struct he_rbrq), he_dev->rbrq_base,
 			he_dev->rbrq_phys);
-	i = CONFIG_RBPL_SIZE;
-out_free_rbpl_virt:
-	while (i--)
-		pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt,
-				he_dev->rbpl_base[i].phys);
-	kfree(he_dev->rbpl_virt);
+out_free_rbpl:
+	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
+		pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
 
-out_free_rbpl_base:
 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
 			sizeof(struct he_rbp), he_dev->rbpl_base,
 			he_dev->rbpl_phys);
 out_destroy_rbpl_pool:
 	pci_pool_destroy(he_dev->rbpl_pool);
+out_free_rbpl_virt:
+	kfree(he_dev->rbpl_virt);
+out_free_rbpl_table:
+	kfree(he_dev->rbpl_table);
 
-	i = CONFIG_RBPS_SIZE;
-out_free_rbps_virt:
-	while (i--)
-		pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt,
-				he_dev->rbps_base[i].phys);
-	kfree(he_dev->rbps_virt);
-
-out_free_rbps_base:
-	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE *
-			sizeof(struct he_rbp), he_dev->rbps_base,
-			he_dev->rbps_phys);
-out_destroy_rbps_pool:
-	pci_pool_destroy(he_dev->rbps_pool);
 	return -ENOMEM;
 }
 
@@ -1576,9 +1541,10 @@
 static void
 he_stop(struct he_dev *he_dev)
 {
-	u16 command;
-	u32 gen_cntl_0, reg;
+	struct he_buff *heb, *next;
 	struct pci_dev *pci_dev;
+	u32 gen_cntl_0, reg;
+	u16 command;
 
 	pci_dev = he_dev->pci_dev;
 
@@ -1619,37 +1585,19 @@
 						he_dev->hsp, he_dev->hsp_phys);
 
 	if (he_dev->rbpl_base) {
-		int i;
+		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
+			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
 
-		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
-			void *cpuaddr = he_dev->rbpl_virt[i].virt;
-			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
-
-			pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
-		}
 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
 	}
 
+	kfree(he_dev->rbpl_virt);
+	kfree(he_dev->rbpl_table);
+
 	if (he_dev->rbpl_pool)
 		pci_pool_destroy(he_dev->rbpl_pool);
 
-	if (he_dev->rbps_base) {
-		int i;
-
-		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
-			void *cpuaddr = he_dev->rbps_virt[i].virt;
-			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
-
-			pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
-		}
-		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
-			* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
-	}
-
-	if (he_dev->rbps_pool)
-		pci_pool_destroy(he_dev->rbps_pool);
-
 	if (he_dev->rbrq_base)
 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
 							he_dev->rbrq_base, he_dev->rbrq_phys);
@@ -1679,13 +1627,13 @@
 __alloc_tpd(struct he_dev *he_dev)
 {
 	struct he_tpd *tpd;
-	dma_addr_t dma_handle; 
+	dma_addr_t mapping;
 
-	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
+	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
 	if (tpd == NULL)
 		return NULL;
 			
-	tpd->status = TPD_ADDR(dma_handle);
+	tpd->status = TPD_ADDR(mapping);
 	tpd->reserved = 0; 
 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
@@ -1714,13 +1662,12 @@
 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
 				((unsigned long)he_dev->rbrq_base |
 					he_dev->hsp->group[group].rbrq_tail);
-	struct he_rbp *rbp = NULL;
 	unsigned cid, lastcid = -1;
-	unsigned buf_len = 0;
 	struct sk_buff *skb;
 	struct atm_vcc *vcc = NULL;
 	struct he_vcc *he_vcc;
-	struct he_iovec *iov;
+	struct he_buff *heb, *next;
+	int i;
 	int pdus_assembled = 0;
 	int updated = 0;
 
@@ -1740,44 +1687,35 @@
 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
 
-		if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
-			rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
-		else
-			rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
-		
-		buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
-		cid = RBRQ_CID(he_dev->rbrq_head);
+		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
+		heb = he_dev->rbpl_virt[i];
 
+		cid = RBRQ_CID(he_dev->rbrq_head);
 		if (cid != lastcid)
 			vcc = __find_vcc(he_dev, cid);
 		lastcid = cid;
 
-		if (vcc == NULL) {
-			hprintk("vcc == NULL  (cid 0x%x)\n", cid);
-			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
-					rbp->status &= ~RBP_LOANED;
+		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
+			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
+			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
+				clear_bit(i, he_dev->rbpl_table);
+				list_del(&heb->entry);
+				pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+			}
 					
 			goto next_rbrq_entry;
 		}
 
-		he_vcc = HE_VCC(vcc);
-		if (he_vcc == NULL) {
-			hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
-			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
-					rbp->status &= ~RBP_LOANED;
-			goto next_rbrq_entry;
-		}
-
 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
 				atomic_inc(&vcc->stats->rx_drop);
 			goto return_host_buffers;
 		}
 
-		he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
-		he_vcc->iov_tail->iov_len = buf_len;
-		he_vcc->pdu_len += buf_len;
-		++he_vcc->iov_tail;
+		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
+		clear_bit(i, he_dev->rbpl_table);
+		list_move_tail(&heb->entry, &he_vcc->buffers);
+		he_vcc->pdu_len += heb->len;
 
 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
 			lastcid = -1;
@@ -1786,12 +1724,6 @@
 			goto return_host_buffers;
 		}
 
-#ifdef notdef
-		if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
-			hprintk("iovec full!  cid 0x%x\n", cid);
-			goto return_host_buffers;
-		}
-#endif
 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
 			goto next_rbrq_entry;
 
@@ -1819,15 +1751,8 @@
 
 		__net_timestamp(skb);
 
-		for (iov = he_vcc->iov_head;
-				iov < he_vcc->iov_tail; ++iov) {
-			if (iov->iov_base & RBP_SMALLBUF)
-				memcpy(skb_put(skb, iov->iov_len),
-					he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
-			else
-				memcpy(skb_put(skb, iov->iov_len),
-					he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
-		}
+		list_for_each_entry(heb, &he_vcc->buffers, entry)
+			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
 
 		switch (vcc->qos.aal) {
 			case ATM_AAL0:
@@ -1867,17 +1792,9 @@
 return_host_buffers:
 		++pdus_assembled;
 
-		for (iov = he_vcc->iov_head;
-				iov < he_vcc->iov_tail; ++iov) {
-			if (iov->iov_base & RBP_SMALLBUF)
-				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
-			else
-				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
-
-			rbp->status &= ~RBP_LOANED;
-		}
-
-		he_vcc->iov_tail = he_vcc->iov_head;
+		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
+			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+		INIT_LIST_HEAD(&he_vcc->buffers);
 		he_vcc->pdu_len = 0;
 
 next_rbrq_entry:
@@ -1978,27 +1895,46 @@
 	}
 }
 
-
 static void
 he_service_rbpl(struct he_dev *he_dev, int group)
 {
-	struct he_rbp *newtail;
+	struct he_rbp *new_tail;
 	struct he_rbp *rbpl_head;
+	struct he_buff *heb;
+	dma_addr_t mapping;
+	int i;
 	int moved = 0;
 
 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
 
 	for (;;) {
-		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
+		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
 						RBPL_MASK(he_dev->rbpl_tail+1));
 
 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
-		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
+		if (new_tail == rbpl_head)
 			break;
 
-		newtail->status |= RBP_LOANED;
-		he_dev->rbpl_tail = newtail;
+		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
+		if (i > (RBPL_TABLE_SIZE - 1)) {
+			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
+			if (i > (RBPL_TABLE_SIZE - 1))
+				break;
+		}
+		he_dev->rbpl_hint = i + 1;
+
+		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
+		if (!heb)
+			break;
+		heb->mapping = mapping;
+		list_add(&heb->entry, &he_dev->rbpl_outstanding);
+		he_dev->rbpl_virt[i] = heb;
+		set_bit(i, he_dev->rbpl_table);
+		new_tail->idx = i << RBP_IDX_OFFSET;
+		new_tail->phys = mapping + offsetof(struct he_buff, data);
+
+		he_dev->rbpl_tail = new_tail;
 		++moved;
 	} 
 
@@ -2007,33 +1943,6 @@
 }
 
 static void
-he_service_rbps(struct he_dev *he_dev, int group)
-{
-	struct he_rbp *newtail;
-	struct he_rbp *rbps_head;
-	int moved = 0;
-
-	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
-					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
-
-	for (;;) {
-		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
-						RBPS_MASK(he_dev->rbps_tail+1));
-
-		/* table 3.42 -- rbps_tail should never be set to rbps_head */
-		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
-			break;
-
-		newtail->status |= RBP_LOANED;
-		he_dev->rbps_tail = newtail;
-		++moved;
-	} 
-
-	if (moved)
-		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
-}
-
-static void
 he_tasklet(unsigned long data)
 {
 	unsigned long flags;
@@ -2055,10 +1964,8 @@
 				HPRINTK("rbrq%d threshold\n", group);
 				/* fall through */
 			case ITYPE_RBRQ_TIMER:
-				if (he_service_rbrq(he_dev, group)) {
+				if (he_service_rbrq(he_dev, group))
 					he_service_rbpl(he_dev, group);
-					he_service_rbps(he_dev, group);
-				}
 				break;
 			case ITYPE_TBRQ_THRESH:
 				HPRINTK("tbrq%d threshold\n", group);
@@ -2070,7 +1977,7 @@
 				he_service_rbpl(he_dev, group);
 				break;
 			case ITYPE_RBPS_THRESH:
-				he_service_rbps(he_dev, group);
+				/* shouldn't happen unless small buffers enabled */
 				break;
 			case ITYPE_PHY:
 				HPRINTK("phy interrupt\n");
@@ -2098,7 +2005,6 @@
 
 				he_service_rbrq(he_dev, 0);
 				he_service_rbpl(he_dev, 0);
-				he_service_rbps(he_dev, 0);
 				he_service_tbrq(he_dev, 0);
 				break;
 			default:
@@ -2252,7 +2158,7 @@
 		return -ENOMEM;
 	}
 
-	he_vcc->iov_tail = he_vcc->iov_head;
+	INIT_LIST_HEAD(&he_vcc->buffers);
 	he_vcc->pdu_len = 0;
 	he_vcc->rc_index = -1;
 
@@ -2406,8 +2312,8 @@
 			goto open_failed;
 		}
 
-		rsr1 = RSR1_GROUP(0);
-		rsr4 = RSR4_GROUP(0);
+		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
+		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
 
diff --git a/drivers/atm/he.h b/drivers/atm/he.h
index c2983e0..110a27d 100644
--- a/drivers/atm/he.h
+++ b/drivers/atm/he.h
@@ -67,11 +67,6 @@
 #define CONFIG_RBPL_BUFSIZE	4096
 #define RBPL_MASK(x)		(((unsigned long)(x))&((CONFIG_RBPL_SIZE<<3)-1))
 
-#define CONFIG_RBPS_SIZE	1024
-#define CONFIG_RBPS_THRESH	64
-#define CONFIG_RBPS_BUFSIZE	128
-#define RBPS_MASK(x)		(((unsigned long)(x))&((CONFIG_RBPS_SIZE<<3)-1))
-
 /* 5.1.3 initialize connection memory */
 
 #define CONFIG_RSRA		0x00000
@@ -203,36 +198,37 @@
 	} group[HE_NUM_GROUPS];
 };
 
-/* figure 2.9 receive buffer pools */
+/*
+ * figure 2.9 receive buffer pools
+ *
+ * since a virtual address might be more than 32 bits, we store an index
+ * in the virt member of he_rbp.  NOTE: the lower six bits in the  rbrq
+ * addr member are used for buffer status further limiting us to 26 bits.
+ */
 
 struct he_rbp {
 	volatile u32 phys;
-	volatile u32 status;
+	volatile u32 idx;	/* virt */
 };
 
-/* NOTE: it is suggested that virt be the virtual address of the host
-   buffer.  on a 64-bit machine, this would not work.  Instead, we
-   store the real virtual address in another list, and store an index
-   (and buffer status) in the virt member.
-*/
+#define RBP_IDX_OFFSET 6
 
-#define RBP_INDEX_OFF	6
-#define RBP_INDEX(x)	(((long)(x) >> RBP_INDEX_OFF) & 0xffff)
-#define RBP_LOANED	0x80000000
-#define RBP_SMALLBUF	0x40000000
+/*
+ * the he dma engine will try to hold an extra 16 buffers in its local
+ * caches.  and add a couple buffers for safety.
+ */
 
-struct he_virt {
-	void *virt;
+#define RBPL_TABLE_SIZE (CONFIG_RBPL_SIZE + 16 + 2)
+
+struct he_buff {
+	struct list_head entry;
+	dma_addr_t mapping;
+	unsigned long len;
+	u8 data[];
 };
 
-#define RBPL_ALIGNMENT CONFIG_RBPL_SIZE
-#define RBPS_ALIGNMENT CONFIG_RBPS_SIZE
-
 #ifdef notyet
 struct he_group {
-	u32 rpbs_size, rpbs_qsize;
-	struct he_rbp rbps_ba;
-
 	u32 rpbl_size, rpbl_qsize;
 	struct he_rpb_entry *rbpl_ba;
 };
@@ -297,18 +293,15 @@
 	struct he_rbrq *rbrq_base, *rbrq_head;
 	int rbrq_peak;
 
+	struct he_buff **rbpl_virt;
+	unsigned long *rbpl_table;
+	unsigned long rbpl_hint;
 	struct pci_pool *rbpl_pool;
 	dma_addr_t rbpl_phys;
 	struct he_rbp *rbpl_base, *rbpl_tail;
-	struct he_virt *rbpl_virt;
+	struct list_head rbpl_outstanding;
 	int rbpl_peak;
 
-	struct pci_pool *rbps_pool;
-	dma_addr_t rbps_phys;
-	struct he_rbp *rbps_base, *rbps_tail;
-	struct he_virt *rbps_virt;
-	int rbps_peak;
-
 	dma_addr_t tbrq_phys;
 	struct he_tbrq *tbrq_base, *tbrq_head;
 	int tbrq_peak;
@@ -321,20 +314,12 @@
 	struct he_dev *next;
 };
 
-struct he_iovec
-{
-	u32 iov_base;
-	u32 iov_len;
-};
-
 #define HE_MAXIOV 20
 
 struct he_vcc
 {
-	struct he_iovec iov_head[HE_MAXIOV];
-	struct he_iovec *iov_tail;
+	struct list_head buffers;
 	int pdu_len;
-
 	int rc_index;
 
 	wait_queue_head_t rx_waitq;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index b7473a6..59876c6 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -1,5 +1,4 @@
-/******************************************************************************
- *
+/*
  * nicstar.c
  *
  * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards.
@@ -16,12 +15,10 @@
  *
  *
  * (C) INESC 1999
- *
- *
- ******************************************************************************/
+ */
 
-
-/**** IMPORTANT INFORMATION ***************************************************
+/*
+ * IMPORTANT INFORMATION
  *
  * There are currently three types of spinlocks:
  *
@@ -31,9 +28,9 @@
  *
  * These must NEVER be grabbed in reverse order.
  *
- ******************************************************************************/
+ */
 
-/* Header files ***************************************************************/
+/* Header files */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -41,6 +38,7 @@
 #include <linux/atmdev.h>
 #include <linux/atm.h>
 #include <linux/pci.h>
+#include <linux/dma-mapping.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/delay.h>
@@ -50,6 +48,7 @@
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
+#include <linux/idr.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/atomic.h>
@@ -61,16 +60,11 @@
 #include "idt77105.h"
 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
 
-#if BITS_PER_LONG != 32
-#  error FIXME: this driver requires a 32-bit platform
-#endif
-
-/* Additional code ************************************************************/
+/* Additional code */
 
 #include "nicstarmac.c"
 
-
-/* Configurable parameters ****************************************************/
+/* Configurable parameters */
 
 #undef PHY_LOOPBACK
 #undef TX_DEBUG
@@ -78,11 +72,10 @@
 #undef GENERAL_DEBUG
 #undef EXTRA_DEBUG
 
-#undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
-                             you're going to use only raw ATM */
+#undef NS_USE_DESTRUCTORS	/* For now keep this undefined unless you know
+				   you're going to use only raw ATM */
 
-
-/* Do not touch these *********************************************************/
+/* Do not touch these */
 
 #ifdef TX_DEBUG
 #define TXPRINTK(args...) printk(args)
@@ -108,2908 +101,2786 @@
 #define XPRINTK(args...)
 #endif /* EXTRA_DEBUG */
 
-
-/* Macros *********************************************************************/
+/* Macros */
 
 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
 
 #define NS_DELAY mdelay(1)
 
-#define ALIGN_BUS_ADDR(addr, alignment) \
-        ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1))
-#define ALIGN_ADDRESS(addr, alignment) \
-        bus_to_virt(ALIGN_BUS_ADDR(virt_to_bus(addr), alignment))
-
-#undef CEIL
+#define PTR_DIFF(a, b)	((u32)((unsigned long)(a) - (unsigned long)(b)))
 
 #ifndef ATM_SKB
 #define ATM_SKB(s) (&(s)->atm)
 #endif
 
+#define scq_virt_to_bus(scq, p) \
+		(scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org))
 
-/* Function declarations ******************************************************/
+/* Function declarations */
 
-static u32 ns_read_sram(ns_dev *card, u32 sram_address);
-static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count);
+static u32 ns_read_sram(ns_dev * card, u32 sram_address);
+static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
+			  int count);
 static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
-static void __devinit ns_init_card_error(ns_dev *card, int error);
-static scq_info *get_scq(int size, u32 scd);
-static void free_scq(scq_info *scq, struct atm_vcc *vcc);
+static void __devinit ns_init_card_error(ns_dev * card, int error);
+static scq_info *get_scq(ns_dev *card, int size, u32 scd);
+static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
 static void push_rxbufs(ns_dev *, struct sk_buff *);
 static irqreturn_t ns_irq_handler(int irq, void *dev_id);
 static int ns_open(struct atm_vcc *vcc);
 static void ns_close(struct atm_vcc *vcc);
-static void fill_tst(ns_dev *card, int n, vc_map *vc);
+static void fill_tst(ns_dev * card, int n, vc_map * vc);
 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
-static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
-                     struct sk_buff *skb);
-static void process_tsq(ns_dev *card);
-static void drain_scq(ns_dev *card, scq_info *scq, int pos);
-static void process_rsq(ns_dev *card);
-static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe);
+static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
+		     struct sk_buff *skb);
+static void process_tsq(ns_dev * card);
+static void drain_scq(ns_dev * card, scq_info * scq, int pos);
+static void process_rsq(ns_dev * card);
+static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
 #ifdef NS_USE_DESTRUCTORS
 static void ns_sb_destructor(struct sk_buff *sb);
 static void ns_lb_destructor(struct sk_buff *lb);
 static void ns_hb_destructor(struct sk_buff *hb);
 #endif /* NS_USE_DESTRUCTORS */
-static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb);
-static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count);
-static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb);
-static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb);
-static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb);
-static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page);
-static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
-static void which_list(ns_dev *card, struct sk_buff *skb);
+static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
+static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
+static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
+static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb);
+static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb);
+static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page);
+static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
+#ifdef EXTRA_DEBUG
+static void which_list(ns_dev * card, struct sk_buff *skb);
+#endif
 static void ns_poll(unsigned long arg);
 static int ns_parse_mac(char *mac, unsigned char *esi);
 static short ns_h2i(char c);
 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
-                       unsigned long addr);
+		       unsigned long addr);
 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
 
-
-
-/* Global variables ***********************************************************/
+/* Global variables */
 
 static struct ns_dev *cards[NS_MAX_CARDS];
 static unsigned num_cards;
-static struct atmdev_ops atm_ops =
-{
-   .open	= ns_open,
-   .close	= ns_close,
-   .ioctl	= ns_ioctl,
-   .send	= ns_send,
-   .phy_put	= ns_phy_put,
-   .phy_get	= ns_phy_get,
-   .proc_read	= ns_proc_read,
-   .owner	= THIS_MODULE,
+static struct atmdev_ops atm_ops = {
+	.open = ns_open,
+	.close = ns_close,
+	.ioctl = ns_ioctl,
+	.send = ns_send,
+	.phy_put = ns_phy_put,
+	.phy_get = ns_phy_get,
+	.proc_read = ns_proc_read,
+	.owner = THIS_MODULE,
 };
+
 static struct timer_list ns_timer;
 static char *mac[NS_MAX_CARDS];
 module_param_array(mac, charp, NULL, 0);
 MODULE_LICENSE("GPL");
 
-
-/* Functions*******************************************************************/
+/* Functions */
 
 static int __devinit nicstar_init_one(struct pci_dev *pcidev,
 				      const struct pci_device_id *ent)
 {
-   static int index = -1;
-   unsigned int error;
+	static int index = -1;
+	unsigned int error;
 
-   index++;
-   cards[index] = NULL;
+	index++;
+	cards[index] = NULL;
 
-   error = ns_init_card(index, pcidev);
-   if (error) {
-      cards[index--] = NULL;	/* don't increment index */
-      goto err_out;
-   }
+	error = ns_init_card(index, pcidev);
+	if (error) {
+		cards[index--] = NULL;	/* don't increment index */
+		goto err_out;
+	}
 
-   return 0;
+	return 0;
 err_out:
-   return -ENODEV;
+	return -ENODEV;
 }
 
-
-
 static void __devexit nicstar_remove_one(struct pci_dev *pcidev)
 {
-   int i, j;
-   ns_dev *card = pci_get_drvdata(pcidev);
-   struct sk_buff *hb;
-   struct sk_buff *iovb;
-   struct sk_buff *lb;
-   struct sk_buff *sb;
-   
-   i = card->index;
+	int i, j;
+	ns_dev *card = pci_get_drvdata(pcidev);
+	struct sk_buff *hb;
+	struct sk_buff *iovb;
+	struct sk_buff *lb;
+	struct sk_buff *sb;
 
-   if (cards[i] == NULL)
-      return;
+	i = card->index;
 
-   if (card->atmdev->phy && card->atmdev->phy->stop)
-      card->atmdev->phy->stop(card->atmdev);
+	if (cards[i] == NULL)
+		return;
 
-   /* Stop everything */
-   writel(0x00000000, card->membase + CFG);
+	if (card->atmdev->phy && card->atmdev->phy->stop)
+		card->atmdev->phy->stop(card->atmdev);
 
-   /* De-register device */
-   atm_dev_deregister(card->atmdev);
+	/* Stop everything */
+	writel(0x00000000, card->membase + CFG);
 
-   /* Disable PCI device */
-   pci_disable_device(pcidev);
-   
-   /* Free up resources */
-   j = 0;
-   PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
-   while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
-   {
-      dev_kfree_skb_any(hb);
-      j++;
-   }
-   PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
-   j = 0;
-   PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count);
-   while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
-   {
-      dev_kfree_skb_any(iovb);
-      j++;
-   }
-   PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
-   while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
-      dev_kfree_skb_any(lb);
-   while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
-      dev_kfree_skb_any(sb);
-   free_scq(card->scq0, NULL);
-   for (j = 0; j < NS_FRSCD_NUM; j++)
-   {
-      if (card->scd2vc[j] != NULL)
-         free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
-   }
-   kfree(card->rsq.org);
-   kfree(card->tsq.org);
-   free_irq(card->pcidev->irq, card);
-   iounmap(card->membase);
-   kfree(card);
+	/* De-register device */
+	atm_dev_deregister(card->atmdev);
+
+	/* Disable PCI device */
+	pci_disable_device(pcidev);
+
+	/* Free up resources */
+	j = 0;
+	PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
+	while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) {
+		dev_kfree_skb_any(hb);
+		j++;
+	}
+	PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
+	j = 0;
+	PRINTK("nicstar%d: freeing %d iovec buffers.\n", i,
+	       card->iovpool.count);
+	while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) {
+		dev_kfree_skb_any(iovb);
+		j++;
+	}
+	PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
+	while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
+		dev_kfree_skb_any(lb);
+	while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
+		dev_kfree_skb_any(sb);
+	free_scq(card, card->scq0, NULL);
+	for (j = 0; j < NS_FRSCD_NUM; j++) {
+		if (card->scd2vc[j] != NULL)
+			free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
+	}
+	idr_remove_all(&card->idr);
+	idr_destroy(&card->idr);
+	pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+			    card->rsq.org, card->rsq.dma);
+	pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+			    card->tsq.org, card->tsq.dma);
+	free_irq(card->pcidev->irq, card);
+	iounmap(card->membase);
+	kfree(card);
 }
 
-
-
-static struct pci_device_id nicstar_pci_tbl[] __devinitdata =
-{
+static struct pci_device_id nicstar_pci_tbl[] __devinitdata = {
 	{PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201,
 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{0,}			/* terminate list */
 };
+
 MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl);
 
-
-
 static struct pci_driver nicstar_driver = {
-	.name		= "nicstar",
-	.id_table	= nicstar_pci_tbl,
-	.probe		= nicstar_init_one,
-	.remove		= __devexit_p(nicstar_remove_one),
+	.name = "nicstar",
+	.id_table = nicstar_pci_tbl,
+	.probe = nicstar_init_one,
+	.remove = __devexit_p(nicstar_remove_one),
 };
 
-
-
 static int __init nicstar_init(void)
 {
-   unsigned error = 0;	/* Initialized to remove compile warning */
+	unsigned error = 0;	/* Initialized to remove compile warning */
 
-   XPRINTK("nicstar: nicstar_init() called.\n");
+	XPRINTK("nicstar: nicstar_init() called.\n");
 
-   error = pci_register_driver(&nicstar_driver);
-   
-   TXPRINTK("nicstar: TX debug enabled.\n");
-   RXPRINTK("nicstar: RX debug enabled.\n");
-   PRINTK("nicstar: General debug enabled.\n");
+	error = pci_register_driver(&nicstar_driver);
+
+	TXPRINTK("nicstar: TX debug enabled.\n");
+	RXPRINTK("nicstar: RX debug enabled.\n");
+	PRINTK("nicstar: General debug enabled.\n");
 #ifdef PHY_LOOPBACK
-   printk("nicstar: using PHY loopback.\n");
+	printk("nicstar: using PHY loopback.\n");
 #endif /* PHY_LOOPBACK */
-   XPRINTK("nicstar: nicstar_init() returned.\n");
+	XPRINTK("nicstar: nicstar_init() returned.\n");
 
-   if (!error) {
-      init_timer(&ns_timer);
-      ns_timer.expires = jiffies + NS_POLL_PERIOD;
-      ns_timer.data = 0UL;
-      ns_timer.function = ns_poll;
-      add_timer(&ns_timer);
-   }
-   
-   return error;
+	if (!error) {
+		init_timer(&ns_timer);
+		ns_timer.expires = jiffies + NS_POLL_PERIOD;
+		ns_timer.data = 0UL;
+		ns_timer.function = ns_poll;
+		add_timer(&ns_timer);
+	}
+
+	return error;
 }
 
-
-
 static void __exit nicstar_cleanup(void)
 {
-   XPRINTK("nicstar: nicstar_cleanup() called.\n");
+	XPRINTK("nicstar: nicstar_cleanup() called.\n");
 
-   del_timer(&ns_timer);
+	del_timer(&ns_timer);
 
-   pci_unregister_driver(&nicstar_driver);
+	pci_unregister_driver(&nicstar_driver);
 
-   XPRINTK("nicstar: nicstar_cleanup() returned.\n");
+	XPRINTK("nicstar: nicstar_cleanup() returned.\n");
 }
 
-
-
-static u32 ns_read_sram(ns_dev *card, u32 sram_address)
+static u32 ns_read_sram(ns_dev * card, u32 sram_address)
 {
-   unsigned long flags;
-   u32 data;
-   sram_address <<= 2;
-   sram_address &= 0x0007FFFC;	/* address must be dword aligned */
-   sram_address |= 0x50000000;	/* SRAM read command */
-   spin_lock_irqsave(&card->res_lock, flags);
-   while (CMD_BUSY(card));
-   writel(sram_address, card->membase + CMD);
-   while (CMD_BUSY(card));
-   data = readl(card->membase + DR0);
-   spin_unlock_irqrestore(&card->res_lock, flags);
-   return data;
+	unsigned long flags;
+	u32 data;
+	sram_address <<= 2;
+	sram_address &= 0x0007FFFC;	/* address must be dword aligned */
+	sram_address |= 0x50000000;	/* SRAM read command */
+	spin_lock_irqsave(&card->res_lock, flags);
+	while (CMD_BUSY(card)) ;
+	writel(sram_address, card->membase + CMD);
+	while (CMD_BUSY(card)) ;
+	data = readl(card->membase + DR0);
+	spin_unlock_irqrestore(&card->res_lock, flags);
+	return data;
 }
 
-
-   
-static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count)
+static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
+			  int count)
 {
-   unsigned long flags;
-   int i, c;
-   count--;	/* count range now is 0..3 instead of 1..4 */
-   c = count;
-   c <<= 2;	/* to use increments of 4 */
-   spin_lock_irqsave(&card->res_lock, flags);
-   while (CMD_BUSY(card));
-   for (i = 0; i <= c; i += 4)
-      writel(*(value++), card->membase + i);
-   /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
-            so card->membase + DR0 == card->membase */
-   sram_address <<= 2;
-   sram_address &= 0x0007FFFC;
-   sram_address |= (0x40000000 | count);
-   writel(sram_address, card->membase + CMD);
-   spin_unlock_irqrestore(&card->res_lock, flags);
+	unsigned long flags;
+	int i, c;
+	count--;		/* count range now is 0..3 instead of 1..4 */
+	c = count;
+	c <<= 2;		/* to use increments of 4 */
+	spin_lock_irqsave(&card->res_lock, flags);
+	while (CMD_BUSY(card)) ;
+	for (i = 0; i <= c; i += 4)
+		writel(*(value++), card->membase + i);
+	/* Note: DR# registers are the first 4 dwords in nicstar's memspace,
+	   so card->membase + DR0 == card->membase */
+	sram_address <<= 2;
+	sram_address &= 0x0007FFFC;
+	sram_address |= (0x40000000 | count);
+	writel(sram_address, card->membase + CMD);
+	spin_unlock_irqrestore(&card->res_lock, flags);
 }
 
-
 static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
 {
-   int j;
-   struct ns_dev *card = NULL;
-   unsigned char pci_latency;
-   unsigned error;
-   u32 data;
-   u32 u32d[4];
-   u32 ns_cfg_rctsize;
-   int bcount;
-   unsigned long membase;
+	int j;
+	struct ns_dev *card = NULL;
+	unsigned char pci_latency;
+	unsigned error;
+	u32 data;
+	u32 u32d[4];
+	u32 ns_cfg_rctsize;
+	int bcount;
+	unsigned long membase;
 
-   error = 0;
+	error = 0;
 
-   if (pci_enable_device(pcidev))
-   {
-      printk("nicstar%d: can't enable PCI device\n", i);
-      error = 2;
-      ns_init_card_error(card, error);
-      return error;
-   }
+	if (pci_enable_device(pcidev)) {
+		printk("nicstar%d: can't enable PCI device\n", i);
+		error = 2;
+		ns_init_card_error(card, error);
+		return error;
+	}
+        if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) ||
+	    (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) {
+                printk(KERN_WARNING
+		       "nicstar%d: No suitable DMA available.\n", i);
+		error = 2;
+		ns_init_card_error(card, error);
+		return error;
+        }
 
-   if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL)
-   {
-      printk("nicstar%d: can't allocate memory for device structure.\n", i);
-      error = 2;
-      ns_init_card_error(card, error);
-      return error;
-   }
-   cards[i] = card;
-   spin_lock_init(&card->int_lock);
-   spin_lock_init(&card->res_lock);
-      
-   pci_set_drvdata(pcidev, card);
-   
-   card->index = i;
-   card->atmdev = NULL;
-   card->pcidev = pcidev;
-   membase = pci_resource_start(pcidev, 1);
-   card->membase = ioremap(membase, NS_IOREMAP_SIZE);
-   if (!card->membase)
-   {
-      printk("nicstar%d: can't ioremap() membase.\n",i);
-      error = 3;
-      ns_init_card_error(card, error);
-      return error;
-   }
-   PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase);
+	if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) {
+		printk
+		    ("nicstar%d: can't allocate memory for device structure.\n",
+		     i);
+		error = 2;
+		ns_init_card_error(card, error);
+		return error;
+	}
+	cards[i] = card;
+	spin_lock_init(&card->int_lock);
+	spin_lock_init(&card->res_lock);
 
-   pci_set_master(pcidev);
+	pci_set_drvdata(pcidev, card);
 
-   if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0)
-   {
-      printk("nicstar%d: can't read PCI latency timer.\n", i);
-      error = 6;
-      ns_init_card_error(card, error);
-      return error;
-   }
+	card->index = i;
+	card->atmdev = NULL;
+	card->pcidev = pcidev;
+	membase = pci_resource_start(pcidev, 1);
+	card->membase = ioremap(membase, NS_IOREMAP_SIZE);
+	if (!card->membase) {
+		printk("nicstar%d: can't ioremap() membase.\n", i);
+		error = 3;
+		ns_init_card_error(card, error);
+		return error;
+	}
+	PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase);
+
+	pci_set_master(pcidev);
+
+	if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) {
+		printk("nicstar%d: can't read PCI latency timer.\n", i);
+		error = 6;
+		ns_init_card_error(card, error);
+		return error;
+	}
 #ifdef NS_PCI_LATENCY
-   if (pci_latency < NS_PCI_LATENCY)
-   {
-      PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
-      for (j = 1; j < 4; j++)
-      {
-         if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
-	    break;
-      }
-      if (j == 4)
-      {
-         printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
-         error = 7;
-         ns_init_card_error(card, error);
-	 return error;
-      }
-   }
+	if (pci_latency < NS_PCI_LATENCY) {
+		PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i,
+		       NS_PCI_LATENCY);
+		for (j = 1; j < 4; j++) {
+			if (pci_write_config_byte
+			    (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
+				break;
+		}
+		if (j == 4) {
+			printk
+			    ("nicstar%d: can't set PCI latency timer to %d.\n",
+			     i, NS_PCI_LATENCY);
+			error = 7;
+			ns_init_card_error(card, error);
+			return error;
+		}
+	}
 #endif /* NS_PCI_LATENCY */
-      
-   /* Clear timer overflow */
-   data = readl(card->membase + STAT);
-   if (data & NS_STAT_TMROF)
-      writel(NS_STAT_TMROF, card->membase + STAT);
 
-   /* Software reset */
-   writel(NS_CFG_SWRST, card->membase + CFG);
-   NS_DELAY;
-   writel(0x00000000, card->membase + CFG);
+	/* Clear timer overflow */
+	data = readl(card->membase + STAT);
+	if (data & NS_STAT_TMROF)
+		writel(NS_STAT_TMROF, card->membase + STAT);
 
-   /* PHY reset */
-   writel(0x00000008, card->membase + GP);
-   NS_DELAY;
-   writel(0x00000001, card->membase + GP);
-   NS_DELAY;
-   while (CMD_BUSY(card));
-   writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD);	/* Sync UTOPIA with SAR clock */
-   NS_DELAY;
-      
-   /* Detect PHY type */
-   while (CMD_BUSY(card));
-   writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
-   while (CMD_BUSY(card));
-   data = readl(card->membase + DR0);
-   switch(data) {
-      case 0x00000009:
-         printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
-         card->max_pcr = ATM_25_PCR;
-         while(CMD_BUSY(card));
-         writel(0x00000008, card->membase + DR0);
-         writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
-         /* Clear an eventual pending interrupt */
-         writel(NS_STAT_SFBQF, card->membase + STAT);
+	/* Software reset */
+	writel(NS_CFG_SWRST, card->membase + CFG);
+	NS_DELAY;
+	writel(0x00000000, card->membase + CFG);
+
+	/* PHY reset */
+	writel(0x00000008, card->membase + GP);
+	NS_DELAY;
+	writel(0x00000001, card->membase + GP);
+	NS_DELAY;
+	while (CMD_BUSY(card)) ;
+	writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD);	/* Sync UTOPIA with SAR clock */
+	NS_DELAY;
+
+	/* Detect PHY type */
+	while (CMD_BUSY(card)) ;
+	writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
+	while (CMD_BUSY(card)) ;
+	data = readl(card->membase + DR0);
+	switch (data) {
+	case 0x00000009:
+		printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
+		card->max_pcr = ATM_25_PCR;
+		while (CMD_BUSY(card)) ;
+		writel(0x00000008, card->membase + DR0);
+		writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
+		/* Clear an eventual pending interrupt */
+		writel(NS_STAT_SFBQF, card->membase + STAT);
 #ifdef PHY_LOOPBACK
-         while(CMD_BUSY(card));
-         writel(0x00000022, card->membase + DR0);
-         writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
+		while (CMD_BUSY(card)) ;
+		writel(0x00000022, card->membase + DR0);
+		writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
 #endif /* PHY_LOOPBACK */
-	 break;
-      case 0x00000030:
-      case 0x00000031:
-         printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
-         card->max_pcr = ATM_OC3_PCR;
+		break;
+	case 0x00000030:
+	case 0x00000031:
+		printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
+		card->max_pcr = ATM_OC3_PCR;
 #ifdef PHY_LOOPBACK
-         while(CMD_BUSY(card));
-         writel(0x00000002, card->membase + DR0);
-         writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
+		while (CMD_BUSY(card)) ;
+		writel(0x00000002, card->membase + DR0);
+		writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
 #endif /* PHY_LOOPBACK */
-	 break;
-      default:
-         printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
-         error = 8;
-         ns_init_card_error(card, error);
-         return error;
-   }
-   writel(0x00000000, card->membase + GP);
+		break;
+	default:
+		printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
+		error = 8;
+		ns_init_card_error(card, error);
+		return error;
+	}
+	writel(0x00000000, card->membase + GP);
 
-   /* Determine SRAM size */
-   data = 0x76543210;
-   ns_write_sram(card, 0x1C003, &data, 1);
-   data = 0x89ABCDEF;
-   ns_write_sram(card, 0x14003, &data, 1);
-   if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
-       ns_read_sram(card, 0x1C003) == 0x76543210)
-       card->sram_size = 128;
-   else
-      card->sram_size = 32;
-   PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
+	/* Determine SRAM size */
+	data = 0x76543210;
+	ns_write_sram(card, 0x1C003, &data, 1);
+	data = 0x89ABCDEF;
+	ns_write_sram(card, 0x14003, &data, 1);
+	if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
+	    ns_read_sram(card, 0x1C003) == 0x76543210)
+		card->sram_size = 128;
+	else
+		card->sram_size = 32;
+	PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
 
-   card->rct_size = NS_MAX_RCTSIZE;
+	card->rct_size = NS_MAX_RCTSIZE;
 
 #if (NS_MAX_RCTSIZE == 4096)
-   if (card->sram_size == 128)
-      printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i);
+	if (card->sram_size == 128)
+		printk
+		    ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n",
+		     i);
 #elif (NS_MAX_RCTSIZE == 16384)
-   if (card->sram_size == 32)
-   {
-      printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i);
-      card->rct_size = 4096;
-   }
+	if (card->sram_size == 32) {
+		printk
+		    ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n",
+		     i);
+		card->rct_size = 4096;
+	}
 #else
 #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
 #endif
 
-   card->vpibits = NS_VPIBITS;
-   if (card->rct_size == 4096)
-      card->vcibits = 12 - NS_VPIBITS;
-   else /* card->rct_size == 16384 */
-      card->vcibits = 14 - NS_VPIBITS;
+	card->vpibits = NS_VPIBITS;
+	if (card->rct_size == 4096)
+		card->vcibits = 12 - NS_VPIBITS;
+	else			/* card->rct_size == 16384 */
+		card->vcibits = 14 - NS_VPIBITS;
 
-   /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
-   if (mac[i] == NULL)
-      nicstar_init_eprom(card->membase);
+	/* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
+	if (mac[i] == NULL)
+		nicstar_init_eprom(card->membase);
 
-   /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
-   writel(0x00000000, card->membase + VPM);
-      
-   /* Initialize TSQ */
-   card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL);
-   if (card->tsq.org == NULL)
-   {
-      printk("nicstar%d: can't allocate TSQ.\n", i);
-      error = 10;
-      ns_init_card_error(card, error);
-      return error;
-   }
-   card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT);
-   card->tsq.next = card->tsq.base;
-   card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
-   for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
-      ns_tsi_init(card->tsq.base + j);
-   writel(0x00000000, card->membase + TSQH);
-   writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB);
-   PRINTK("nicstar%d: TSQ base at 0x%x  0x%x  0x%x.\n", i, (u32) card->tsq.base,
-          (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB));
-      
-   /* Initialize RSQ */
-   card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL);
-   if (card->rsq.org == NULL)
-   {
-      printk("nicstar%d: can't allocate RSQ.\n", i);
-      error = 11;
-      ns_init_card_error(card, error);
-      return error;
-   }
-   card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT);
-   card->rsq.next = card->rsq.base;
-   card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
-   for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
-      ns_rsqe_init(card->rsq.base + j);
-   writel(0x00000000, card->membase + RSQH);
-   writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB);
-   PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base);
-      
-   /* Initialize SCQ0, the only VBR SCQ used */
-   card->scq1 = NULL;
-   card->scq2 = NULL;
-   card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0);
-   if (card->scq0 == NULL)
-   {
-      printk("nicstar%d: can't get SCQ0.\n", i);
-      error = 12;
-      ns_init_card_error(card, error);
-      return error;
-   }
-   u32d[0] = (u32) virt_to_bus(card->scq0->base);
-   u32d[1] = (u32) 0x00000000;
-   u32d[2] = (u32) 0xffffffff;
-   u32d[3] = (u32) 0x00000000;
-   ns_write_sram(card, NS_VRSCD0, u32d, 4);
-   ns_write_sram(card, NS_VRSCD1, u32d, 4);	/* These last two won't be used */
-   ns_write_sram(card, NS_VRSCD2, u32d, 4);	/* but are initialized, just in case... */
-   card->scq0->scd = NS_VRSCD0;
-   PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base);
+	/* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
+	writel(0x00000000, card->membase + VPM);
 
-   /* Initialize TSTs */
-   card->tst_addr = NS_TST0;
-   card->tst_free_entries = NS_TST_NUM_ENTRIES;
-   data = NS_TST_OPCODE_VARIABLE;
-   for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
-      ns_write_sram(card, NS_TST0 + j, &data, 1);
-   data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
-   ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
-   for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
-      ns_write_sram(card, NS_TST1 + j, &data, 1);
-   data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
-   ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
-   for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
-      card->tste2vc[j] = NULL;
-   writel(NS_TST0 << 2, card->membase + TSTB);
+	/* Initialize TSQ */
+	card->tsq.org = pci_alloc_consistent(card->pcidev,
+					     NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+					     &card->tsq.dma);
+	if (card->tsq.org == NULL) {
+		printk("nicstar%d: can't allocate TSQ.\n", i);
+		error = 10;
+		ns_init_card_error(card, error);
+		return error;
+	}
+	card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT);
+	card->tsq.next = card->tsq.base;
+	card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
+	for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
+		ns_tsi_init(card->tsq.base + j);
+	writel(0x00000000, card->membase + TSQH);
+	writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB);
+	PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
 
+	/* Initialize RSQ */
+	card->rsq.org = pci_alloc_consistent(card->pcidev,
+					     NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+					     &card->rsq.dma);
+	if (card->rsq.org == NULL) {
+		printk("nicstar%d: can't allocate RSQ.\n", i);
+		error = 11;
+		ns_init_card_error(card, error);
+		return error;
+	}
+	card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT);
+	card->rsq.next = card->rsq.base;
+	card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
+	for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
+		ns_rsqe_init(card->rsq.base + j);
+	writel(0x00000000, card->membase + RSQH);
+	writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB);
+	PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base);
 
-   /* Initialize RCT. AAL type is set on opening the VC. */
+	/* Initialize SCQ0, the only VBR SCQ used */
+	card->scq1 = NULL;
+	card->scq2 = NULL;
+	card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0);
+	if (card->scq0 == NULL) {
+		printk("nicstar%d: can't get SCQ0.\n", i);
+		error = 12;
+		ns_init_card_error(card, error);
+		return error;
+	}
+	u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base);
+	u32d[1] = (u32) 0x00000000;
+	u32d[2] = (u32) 0xffffffff;
+	u32d[3] = (u32) 0x00000000;
+	ns_write_sram(card, NS_VRSCD0, u32d, 4);
+	ns_write_sram(card, NS_VRSCD1, u32d, 4);	/* These last two won't be used */
+	ns_write_sram(card, NS_VRSCD2, u32d, 4);	/* but are initialized, just in case... */
+	card->scq0->scd = NS_VRSCD0;
+	PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base);
+
+	/* Initialize TSTs */
+	card->tst_addr = NS_TST0;
+	card->tst_free_entries = NS_TST_NUM_ENTRIES;
+	data = NS_TST_OPCODE_VARIABLE;
+	for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
+		ns_write_sram(card, NS_TST0 + j, &data, 1);
+	data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
+	ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
+	for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
+		ns_write_sram(card, NS_TST1 + j, &data, 1);
+	data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
+	ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
+	for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
+		card->tste2vc[j] = NULL;
+	writel(NS_TST0 << 2, card->membase + TSTB);
+
+	/* Initialize RCT. AAL type is set on opening the VC. */
 #ifdef RCQ_SUPPORT
-   u32d[0] = NS_RCTE_RAWCELLINTEN;
+	u32d[0] = NS_RCTE_RAWCELLINTEN;
 #else
-   u32d[0] = 0x00000000;
+	u32d[0] = 0x00000000;
 #endif /* RCQ_SUPPORT */
-   u32d[1] = 0x00000000;
-   u32d[2] = 0x00000000;
-   u32d[3] = 0xFFFFFFFF;
-   for (j = 0; j < card->rct_size; j++)
-      ns_write_sram(card, j * 4, u32d, 4);      
-      
-   memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
-      
-   for (j = 0; j < NS_FRSCD_NUM; j++)
-      card->scd2vc[j] = NULL;
+	u32d[1] = 0x00000000;
+	u32d[2] = 0x00000000;
+	u32d[3] = 0xFFFFFFFF;
+	for (j = 0; j < card->rct_size; j++)
+		ns_write_sram(card, j * 4, u32d, 4);
 
-   /* Initialize buffer levels */
-   card->sbnr.min = MIN_SB;
-   card->sbnr.init = NUM_SB;
-   card->sbnr.max = MAX_SB;
-   card->lbnr.min = MIN_LB;
-   card->lbnr.init = NUM_LB;
-   card->lbnr.max = MAX_LB;
-   card->iovnr.min = MIN_IOVB;
-   card->iovnr.init = NUM_IOVB;
-   card->iovnr.max = MAX_IOVB;
-   card->hbnr.min = MIN_HB;
-   card->hbnr.init = NUM_HB;
-   card->hbnr.max = MAX_HB;
-   
-   card->sm_handle = 0x00000000;
-   card->sm_addr = 0x00000000;
-   card->lg_handle = 0x00000000;
-   card->lg_addr = 0x00000000;
-   
-   card->efbie = 1;	/* To prevent push_rxbufs from enabling the interrupt */
+	memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
 
-   /* Pre-allocate some huge buffers */
-   skb_queue_head_init(&card->hbpool.queue);
-   card->hbpool.count = 0;
-   for (j = 0; j < NUM_HB; j++)
-   {
-      struct sk_buff *hb;
-      hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
-      if (hb == NULL)
-      {
-         printk("nicstar%d: can't allocate %dth of %d huge buffers.\n",
-                i, j, NUM_HB);
-         error = 13;
-         ns_init_card_error(card, error);
-	 return error;
-      }
-      NS_SKB_CB(hb)->buf_type = BUF_NONE;
-      skb_queue_tail(&card->hbpool.queue, hb);
-      card->hbpool.count++;
-   }
+	for (j = 0; j < NS_FRSCD_NUM; j++)
+		card->scd2vc[j] = NULL;
 
+	/* Initialize buffer levels */
+	card->sbnr.min = MIN_SB;
+	card->sbnr.init = NUM_SB;
+	card->sbnr.max = MAX_SB;
+	card->lbnr.min = MIN_LB;
+	card->lbnr.init = NUM_LB;
+	card->lbnr.max = MAX_LB;
+	card->iovnr.min = MIN_IOVB;
+	card->iovnr.init = NUM_IOVB;
+	card->iovnr.max = MAX_IOVB;
+	card->hbnr.min = MIN_HB;
+	card->hbnr.init = NUM_HB;
+	card->hbnr.max = MAX_HB;
 
-   /* Allocate large buffers */
-   skb_queue_head_init(&card->lbpool.queue);
-   card->lbpool.count = 0;			/* Not used */
-   for (j = 0; j < NUM_LB; j++)
-   {
-      struct sk_buff *lb;
-      lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
-      if (lb == NULL)
-      {
-         printk("nicstar%d: can't allocate %dth of %d large buffers.\n",
-                i, j, NUM_LB);
-         error = 14;
-         ns_init_card_error(card, error);
-	 return error;
-      }
-      NS_SKB_CB(lb)->buf_type = BUF_LG;
-      skb_queue_tail(&card->lbpool.queue, lb);
-      skb_reserve(lb, NS_SMBUFSIZE);
-      push_rxbufs(card, lb);
-      /* Due to the implementation of push_rxbufs() this is 1, not 0 */
-      if (j == 1)
-      {
-         card->rcbuf = lb;
-         card->rawch = (u32) virt_to_bus(lb->data);
-      }
-   }
-   /* Test for strange behaviour which leads to crashes */
-   if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min)
-   {
-      printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
-             i, j, bcount);
-      error = 14;
-      ns_init_card_error(card, error);
-      return error;
-   }
-      
+	card->sm_handle = 0x00000000;
+	card->sm_addr = 0x00000000;
+	card->lg_handle = 0x00000000;
+	card->lg_addr = 0x00000000;
 
-   /* Allocate small buffers */
-   skb_queue_head_init(&card->sbpool.queue);
-   card->sbpool.count = 0;			/* Not used */
-   for (j = 0; j < NUM_SB; j++)
-   {
-      struct sk_buff *sb;
-      sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
-      if (sb == NULL)
-      {
-         printk("nicstar%d: can't allocate %dth of %d small buffers.\n",
-                i, j, NUM_SB);
-         error = 15;
-         ns_init_card_error(card, error);
-	 return error;
-      }
-      NS_SKB_CB(sb)->buf_type = BUF_SM;
-      skb_queue_tail(&card->sbpool.queue, sb);
-      skb_reserve(sb, NS_AAL0_HEADER);
-      push_rxbufs(card, sb);
-   }
-   /* Test for strange behaviour which leads to crashes */
-   if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
-   {
-      printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
-             i, j, bcount);
-      error = 15;
-      ns_init_card_error(card, error);
-      return error;
-   }
-      
+	card->efbie = 1;	/* To prevent push_rxbufs from enabling the interrupt */
 
-   /* Allocate iovec buffers */
-   skb_queue_head_init(&card->iovpool.queue);
-   card->iovpool.count = 0;
-   for (j = 0; j < NUM_IOVB; j++)
-   {
-      struct sk_buff *iovb;
-      iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
-      if (iovb == NULL)
-      {
-         printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
-                i, j, NUM_IOVB);
-         error = 16;
-         ns_init_card_error(card, error);
-	 return error;
-      }
-      NS_SKB_CB(iovb)->buf_type = BUF_NONE;
-      skb_queue_tail(&card->iovpool.queue, iovb);
-      card->iovpool.count++;
-   }
+	idr_init(&card->idr);
 
-   /* Configure NICStAR */
-   if (card->rct_size == 4096)
-      ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
-   else /* (card->rct_size == 16384) */
-      ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
+	/* Pre-allocate some huge buffers */
+	skb_queue_head_init(&card->hbpool.queue);
+	card->hbpool.count = 0;
+	for (j = 0; j < NUM_HB; j++) {
+		struct sk_buff *hb;
+		hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
+		if (hb == NULL) {
+			printk
+			    ("nicstar%d: can't allocate %dth of %d huge buffers.\n",
+			     i, j, NUM_HB);
+			error = 13;
+			ns_init_card_error(card, error);
+			return error;
+		}
+		NS_PRV_BUFTYPE(hb) = BUF_NONE;
+		skb_queue_tail(&card->hbpool.queue, hb);
+		card->hbpool.count++;
+	}
 
-   card->efbie = 1;
+	/* Allocate large buffers */
+	skb_queue_head_init(&card->lbpool.queue);
+	card->lbpool.count = 0;	/* Not used */
+	for (j = 0; j < NUM_LB; j++) {
+		struct sk_buff *lb;
+		lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
+		if (lb == NULL) {
+			printk
+			    ("nicstar%d: can't allocate %dth of %d large buffers.\n",
+			     i, j, NUM_LB);
+			error = 14;
+			ns_init_card_error(card, error);
+			return error;
+		}
+		NS_PRV_BUFTYPE(lb) = BUF_LG;
+		skb_queue_tail(&card->lbpool.queue, lb);
+		skb_reserve(lb, NS_SMBUFSIZE);
+		push_rxbufs(card, lb);
+		/* Due to the implementation of push_rxbufs() this is 1, not 0 */
+		if (j == 1) {
+			card->rcbuf = lb;
+			card->rawcell = (struct ns_rcqe *) lb->data;
+			card->rawch = NS_PRV_DMA(lb);
+		}
+	}
+	/* Test for strange behaviour which leads to crashes */
+	if ((bcount =
+	     ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) {
+		printk
+		    ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
+		     i, j, bcount);
+		error = 14;
+		ns_init_card_error(card, error);
+		return error;
+	}
 
-   card->intcnt = 0;
-   if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0)
-   {
-      printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
-      error = 9;
-      ns_init_card_error(card, error);
-      return error;
-   }
+	/* Allocate small buffers */
+	skb_queue_head_init(&card->sbpool.queue);
+	card->sbpool.count = 0;	/* Not used */
+	for (j = 0; j < NUM_SB; j++) {
+		struct sk_buff *sb;
+		sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
+		if (sb == NULL) {
+			printk
+			    ("nicstar%d: can't allocate %dth of %d small buffers.\n",
+			     i, j, NUM_SB);
+			error = 15;
+			ns_init_card_error(card, error);
+			return error;
+		}
+		NS_PRV_BUFTYPE(sb) = BUF_SM;
+		skb_queue_tail(&card->sbpool.queue, sb);
+		skb_reserve(sb, NS_AAL0_HEADER);
+		push_rxbufs(card, sb);
+	}
+	/* Test for strange behaviour which leads to crashes */
+	if ((bcount =
+	     ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) {
+		printk
+		    ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
+		     i, j, bcount);
+		error = 15;
+		ns_init_card_error(card, error);
+		return error;
+	}
 
-   /* Register device */
-   card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
-   if (card->atmdev == NULL)
-   {
-      printk("nicstar%d: can't register device.\n", i);
-      error = 17;
-      ns_init_card_error(card, error);
-      return error;
-   }
-      
-   if (ns_parse_mac(mac[i], card->atmdev->esi)) {
-      nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
-                         card->atmdev->esi, 6);
-      if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) {
-         nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
-                         card->atmdev->esi, 6);
-      }
-   }
+	/* Allocate iovec buffers */
+	skb_queue_head_init(&card->iovpool.queue);
+	card->iovpool.count = 0;
+	for (j = 0; j < NUM_IOVB; j++) {
+		struct sk_buff *iovb;
+		iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
+		if (iovb == NULL) {
+			printk
+			    ("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
+			     i, j, NUM_IOVB);
+			error = 16;
+			ns_init_card_error(card, error);
+			return error;
+		}
+		NS_PRV_BUFTYPE(iovb) = BUF_NONE;
+		skb_queue_tail(&card->iovpool.queue, iovb);
+		card->iovpool.count++;
+	}
 
-   printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
+	/* Configure NICStAR */
+	if (card->rct_size == 4096)
+		ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
+	else			/* (card->rct_size == 16384) */
+		ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
 
-   card->atmdev->dev_data = card;
-   card->atmdev->ci_range.vpi_bits = card->vpibits;
-   card->atmdev->ci_range.vci_bits = card->vcibits;
-   card->atmdev->link_rate = card->max_pcr;
-   card->atmdev->phy = NULL;
+	card->efbie = 1;
+
+	card->intcnt = 0;
+	if (request_irq
+	    (pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED,
+	     "nicstar", card) != 0) {
+		printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
+		error = 9;
+		ns_init_card_error(card, error);
+		return error;
+	}
+
+	/* Register device */
+	card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
+	if (card->atmdev == NULL) {
+		printk("nicstar%d: can't register device.\n", i);
+		error = 17;
+		ns_init_card_error(card, error);
+		return error;
+	}
+
+	if (ns_parse_mac(mac[i], card->atmdev->esi)) {
+		nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
+				   card->atmdev->esi, 6);
+		if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
+		    0) {
+			nicstar_read_eprom(card->membase,
+					   NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
+					   card->atmdev->esi, 6);
+		}
+	}
+
+	printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
+
+	card->atmdev->dev_data = card;
+	card->atmdev->ci_range.vpi_bits = card->vpibits;
+	card->atmdev->ci_range.vci_bits = card->vcibits;
+	card->atmdev->link_rate = card->max_pcr;
+	card->atmdev->phy = NULL;
 
 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI
-   if (card->max_pcr == ATM_OC3_PCR)
-      suni_init(card->atmdev);
+	if (card->max_pcr == ATM_OC3_PCR)
+		suni_init(card->atmdev);
 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
 
 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
-   if (card->max_pcr == ATM_25_PCR)
-      idt77105_init(card->atmdev);
+	if (card->max_pcr == ATM_25_PCR)
+		idt77105_init(card->atmdev);
 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
 
-   if (card->atmdev->phy && card->atmdev->phy->start)
-      card->atmdev->phy->start(card->atmdev);
+	if (card->atmdev->phy && card->atmdev->phy->start)
+		card->atmdev->phy->start(card->atmdev);
 
-   writel(NS_CFG_RXPATH |
-          NS_CFG_SMBUFSIZE |
-          NS_CFG_LGBUFSIZE |
-          NS_CFG_EFBIE |
-          NS_CFG_RSQSIZE |
-          NS_CFG_VPIBITS |
-          ns_cfg_rctsize |
-          NS_CFG_RXINT_NODELAY |
-          NS_CFG_RAWIE |		/* Only enabled if RCQ_SUPPORT */
-          NS_CFG_RSQAFIE |
-          NS_CFG_TXEN |
-          NS_CFG_TXIE |
-          NS_CFG_TSQFIE_OPT |		/* Only enabled if ENABLE_TSQFIE */ 
-          NS_CFG_PHYIE,
-          card->membase + CFG);
+	writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE |	/* Only enabled if RCQ_SUPPORT */
+	       NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT |	/* Only enabled if ENABLE_TSQFIE */
+	       NS_CFG_PHYIE, card->membase + CFG);
 
-   num_cards++;
+	num_cards++;
 
-   return error;
+	return error;
 }
 
-
-
-static void __devinit ns_init_card_error(ns_dev *card, int error)
+static void __devinit ns_init_card_error(ns_dev * card, int error)
 {
-   if (error >= 17)
-   {
-      writel(0x00000000, card->membase + CFG);
-   }
-   if (error >= 16)
-   {
-      struct sk_buff *iovb;
-      while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
-         dev_kfree_skb_any(iovb);
-   }
-   if (error >= 15)
-   {
-      struct sk_buff *sb;
-      while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
-         dev_kfree_skb_any(sb);
-      free_scq(card->scq0, NULL);
-   }
-   if (error >= 14)
-   {
-      struct sk_buff *lb;
-      while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
-         dev_kfree_skb_any(lb);
-   }
-   if (error >= 13)
-   {
-      struct sk_buff *hb;
-      while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
-         dev_kfree_skb_any(hb);
-   }
-   if (error >= 12)
-   {
-      kfree(card->rsq.org);
-   }
-   if (error >= 11)
-   {
-      kfree(card->tsq.org);
-   }
-   if (error >= 10)
-   {
-      free_irq(card->pcidev->irq, card);
-   }
-   if (error >= 4)
-   {
-      iounmap(card->membase);
-   }
-   if (error >= 3)
-   {
-      pci_disable_device(card->pcidev);
-      kfree(card);
-   }
+	if (error >= 17) {
+		writel(0x00000000, card->membase + CFG);
+	}
+	if (error >= 16) {
+		struct sk_buff *iovb;
+		while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
+			dev_kfree_skb_any(iovb);
+	}
+	if (error >= 15) {
+		struct sk_buff *sb;
+		while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
+			dev_kfree_skb_any(sb);
+		free_scq(card, card->scq0, NULL);
+	}
+	if (error >= 14) {
+		struct sk_buff *lb;
+		while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
+			dev_kfree_skb_any(lb);
+	}
+	if (error >= 13) {
+		struct sk_buff *hb;
+		while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
+			dev_kfree_skb_any(hb);
+	}
+	if (error >= 12) {
+		kfree(card->rsq.org);
+	}
+	if (error >= 11) {
+		kfree(card->tsq.org);
+	}
+	if (error >= 10) {
+		free_irq(card->pcidev->irq, card);
+	}
+	if (error >= 4) {
+		iounmap(card->membase);
+	}
+	if (error >= 3) {
+		pci_disable_device(card->pcidev);
+		kfree(card);
+	}
 }
 
-
-
-static scq_info *get_scq(int size, u32 scd)
+static scq_info *get_scq(ns_dev *card, int size, u32 scd)
 {
-   scq_info *scq;
-   int i;
+	scq_info *scq;
+	int i;
 
-   if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
-      return NULL;
+	if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
+		return NULL;
 
-   scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
-   if (scq == NULL)
-      return NULL;
-   scq->org = kmalloc(2 * size, GFP_KERNEL);
-   if (scq->org == NULL)
-   {
-      kfree(scq);
-      return NULL;
-   }
-   scq->skb = kmalloc(sizeof(struct sk_buff *) *
-                                          (size / NS_SCQE_SIZE), GFP_KERNEL);
-   if (scq->skb == NULL)
-   {
-      kfree(scq->org);
-      kfree(scq);
-      return NULL;
-   }
-   scq->num_entries = size / NS_SCQE_SIZE;
-   scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size);
-   scq->next = scq->base;
-   scq->last = scq->base + (scq->num_entries - 1);
-   scq->tail = scq->last;
-   scq->scd = scd;
-   scq->num_entries = size / NS_SCQE_SIZE;
-   scq->tbd_count = 0;
-   init_waitqueue_head(&scq->scqfull_waitq);
-   scq->full = 0;
-   spin_lock_init(&scq->lock);
+	scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
+	if (!scq)
+		return NULL;
+        scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma);
+	if (!scq->org) {
+		kfree(scq);
+		return NULL;
+	}
+	scq->skb = kmalloc(sizeof(struct sk_buff *) *
+			   (size / NS_SCQE_SIZE), GFP_KERNEL);
+	if (!scq->skb) {
+		kfree(scq->org);
+		kfree(scq);
+		return NULL;
+	}
+	scq->num_entries = size / NS_SCQE_SIZE;
+	scq->base = PTR_ALIGN(scq->org, size);
+	scq->next = scq->base;
+	scq->last = scq->base + (scq->num_entries - 1);
+	scq->tail = scq->last;
+	scq->scd = scd;
+	scq->num_entries = size / NS_SCQE_SIZE;
+	scq->tbd_count = 0;
+	init_waitqueue_head(&scq->scqfull_waitq);
+	scq->full = 0;
+	spin_lock_init(&scq->lock);
 
-   for (i = 0; i < scq->num_entries; i++)
-      scq->skb[i] = NULL;
+	for (i = 0; i < scq->num_entries; i++)
+		scq->skb[i] = NULL;
 
-   return scq;
+	return scq;
 }
 
-
-
 /* For variable rate SCQ vcc must be NULL */
-static void free_scq(scq_info *scq, struct atm_vcc *vcc)
+static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
 {
-   int i;
+	int i;
 
-   if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
-      for (i = 0; i < scq->num_entries; i++)
-      {
-         if (scq->skb[i] != NULL)
-	 {
-            vcc = ATM_SKB(scq->skb[i])->vcc;
-            if (vcc->pop != NULL)
-	       vcc->pop(vcc, scq->skb[i]);
-	    else
-               dev_kfree_skb_any(scq->skb[i]);
-         }
-      }
-   else /* vcc must be != NULL */
-   {
-      if (vcc == NULL)
-      {
-         printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
-         for (i = 0; i < scq->num_entries; i++)
-            dev_kfree_skb_any(scq->skb[i]);
-      }
-      else
-         for (i = 0; i < scq->num_entries; i++)
-         {
-            if (scq->skb[i] != NULL)
-            {
-               if (vcc->pop != NULL)
-                  vcc->pop(vcc, scq->skb[i]);
-               else
-                  dev_kfree_skb_any(scq->skb[i]);
-            }
-         }
-   }
-   kfree(scq->skb);
-   kfree(scq->org);
-   kfree(scq);
+	if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
+		for (i = 0; i < scq->num_entries; i++) {
+			if (scq->skb[i] != NULL) {
+				vcc = ATM_SKB(scq->skb[i])->vcc;
+				if (vcc->pop != NULL)
+					vcc->pop(vcc, scq->skb[i]);
+				else
+					dev_kfree_skb_any(scq->skb[i]);
+			}
+	} else {		/* vcc must be != NULL */
+
+		if (vcc == NULL) {
+			printk
+			    ("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
+			for (i = 0; i < scq->num_entries; i++)
+				dev_kfree_skb_any(scq->skb[i]);
+		} else
+			for (i = 0; i < scq->num_entries; i++) {
+				if (scq->skb[i] != NULL) {
+					if (vcc->pop != NULL)
+						vcc->pop(vcc, scq->skb[i]);
+					else
+						dev_kfree_skb_any(scq->skb[i]);
+				}
+			}
+	}
+	kfree(scq->skb);
+	pci_free_consistent(card->pcidev,
+			    2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
+				 VBR_SCQSIZE : CBR_SCQSIZE),
+			    scq->org, scq->dma);
+	kfree(scq);
 }
 
-
-
 /* The handles passed must be pointers to the sk_buff containing the small
    or large buffer(s) cast to u32. */
-static void push_rxbufs(ns_dev *card, struct sk_buff *skb)
+static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
 {
-   struct ns_skb_cb *cb = NS_SKB_CB(skb);
-   u32 handle1, addr1;
-   u32 handle2, addr2;
-   u32 stat;
-   unsigned long flags;
-   
-   /* *BARF* */
-   handle2 = addr2 = 0;
-   handle1 = (u32)skb;
-   addr1 = (u32)virt_to_bus(skb->data);
+	struct sk_buff *handle1, *handle2;
+	u32 id1 = 0, id2 = 0;
+	u32 addr1, addr2;
+	u32 stat;
+	unsigned long flags;
+	int err;
+
+	/* *BARF* */
+	handle2 = NULL;
+	addr2 = 0;
+	handle1 = skb;
+	addr1 = pci_map_single(card->pcidev,
+			       skb->data,
+			       (NS_PRV_BUFTYPE(skb) == BUF_SM
+				? NS_SMSKBSIZE : NS_LGSKBSIZE),
+			       PCI_DMA_TODEVICE);
+	NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
 
 #ifdef GENERAL_DEBUG
-   if (!addr1)
-      printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index);
+	if (!addr1)
+		printk("nicstar%d: push_rxbufs called with addr1 = 0.\n",
+		       card->index);
 #endif /* GENERAL_DEBUG */
 
-   stat = readl(card->membase + STAT);
-   card->sbfqc = ns_stat_sfbqc_get(stat);
-   card->lbfqc = ns_stat_lfbqc_get(stat);
-   if (cb->buf_type == BUF_SM)
-   {
-      if (!addr2)
-      {
-         if (card->sm_addr)
-	 {
-	    addr2 = card->sm_addr;
-	    handle2 = card->sm_handle;
-	    card->sm_addr = 0x00000000;
-	    card->sm_handle = 0x00000000;
-	 }
-	 else /* (!sm_addr) */
-	 {
-	    card->sm_addr = addr1;
-	    card->sm_handle = handle1;
-	 }
-      }      
-   }
-   else /* buf_type == BUF_LG */
-   {
-      if (!addr2)
-      {
-         if (card->lg_addr)
-	 {
-	    addr2 = card->lg_addr;
-	    handle2 = card->lg_handle;
-	    card->lg_addr = 0x00000000;
-	    card->lg_handle = 0x00000000;
-	 }
-	 else /* (!lg_addr) */
-	 {
-	    card->lg_addr = addr1;
-	    card->lg_handle = handle1;
-	 }
-      }      
-   }
+	stat = readl(card->membase + STAT);
+	card->sbfqc = ns_stat_sfbqc_get(stat);
+	card->lbfqc = ns_stat_lfbqc_get(stat);
+	if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
+		if (!addr2) {
+			if (card->sm_addr) {
+				addr2 = card->sm_addr;
+				handle2 = card->sm_handle;
+				card->sm_addr = 0x00000000;
+				card->sm_handle = 0x00000000;
+			} else {	/* (!sm_addr) */
 
-   if (addr2)
-   {
-      if (cb->buf_type == BUF_SM)
-      {
-         if (card->sbfqc >= card->sbnr.max)
-         {
-            skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue);
-            dev_kfree_skb_any((struct sk_buff *) handle1);
-            skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue);
-            dev_kfree_skb_any((struct sk_buff *) handle2);
-            return;
-         }
-	 else
-            card->sbfqc += 2;
-      }
-      else /* (buf_type == BUF_LG) */
-      {
-         if (card->lbfqc >= card->lbnr.max)
-         {
-            skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue);
-            dev_kfree_skb_any((struct sk_buff *) handle1);
-            skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue);
-            dev_kfree_skb_any((struct sk_buff *) handle2);
-            return;
-         }
-         else
-            card->lbfqc += 2;
-      }
+				card->sm_addr = addr1;
+				card->sm_handle = handle1;
+			}
+		}
+	} else {		/* buf_type == BUF_LG */
 
-      spin_lock_irqsave(&card->res_lock, flags);
+		if (!addr2) {
+			if (card->lg_addr) {
+				addr2 = card->lg_addr;
+				handle2 = card->lg_handle;
+				card->lg_addr = 0x00000000;
+				card->lg_handle = 0x00000000;
+			} else {	/* (!lg_addr) */
 
-      while (CMD_BUSY(card));
-      writel(addr2, card->membase + DR3);
-      writel(handle2, card->membase + DR2);
-      writel(addr1, card->membase + DR1);
-      writel(handle1, card->membase + DR0);
-      writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD);
- 
-      spin_unlock_irqrestore(&card->res_lock, flags);
+				card->lg_addr = addr1;
+				card->lg_handle = handle1;
+			}
+		}
+	}
 
-      XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
-              (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2);
-   }
+	if (addr2) {
+		if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
+			if (card->sbfqc >= card->sbnr.max) {
+				skb_unlink(handle1, &card->sbpool.queue);
+				dev_kfree_skb_any(handle1);
+				skb_unlink(handle2, &card->sbpool.queue);
+				dev_kfree_skb_any(handle2);
+				return;
+			} else
+				card->sbfqc += 2;
+		} else {	/* (buf_type == BUF_LG) */
 
-   if (!card->efbie && card->sbfqc >= card->sbnr.min &&
-       card->lbfqc >= card->lbnr.min)
-   {
-      card->efbie = 1;
-      writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG);
-   }
+			if (card->lbfqc >= card->lbnr.max) {
+				skb_unlink(handle1, &card->lbpool.queue);
+				dev_kfree_skb_any(handle1);
+				skb_unlink(handle2, &card->lbpool.queue);
+				dev_kfree_skb_any(handle2);
+				return;
+			} else
+				card->lbfqc += 2;
+		}
 
-   return;
+		do {
+			if (!idr_pre_get(&card->idr, GFP_ATOMIC)) {
+				printk(KERN_ERR
+				       "nicstar%d: no free memory for idr\n",
+				       card->index);
+				goto out;
+			}
+
+			if (!id1)
+				err = idr_get_new_above(&card->idr, handle1, 0, &id1);
+
+			if (!id2 && err == 0)
+				err = idr_get_new_above(&card->idr, handle2, 0, &id2);
+
+		} while (err == -EAGAIN);
+
+		if (err)
+			goto out;
+
+		spin_lock_irqsave(&card->res_lock, flags);
+		while (CMD_BUSY(card)) ;
+		writel(addr2, card->membase + DR3);
+		writel(id2, card->membase + DR2);
+		writel(addr1, card->membase + DR1);
+		writel(id1, card->membase + DR0);
+		writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb),
+		       card->membase + CMD);
+		spin_unlock_irqrestore(&card->res_lock, flags);
+
+		XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n",
+			card->index,
+			(NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"),
+			addr1, addr2);
+	}
+
+	if (!card->efbie && card->sbfqc >= card->sbnr.min &&
+	    card->lbfqc >= card->lbnr.min) {
+		card->efbie = 1;
+		writel((readl(card->membase + CFG) | NS_CFG_EFBIE),
+		       card->membase + CFG);
+	}
+
+out:
+	return;
 }
 
-
-
 static irqreturn_t ns_irq_handler(int irq, void *dev_id)
 {
-   u32 stat_r;
-   ns_dev *card;
-   struct atm_dev *dev;
-   unsigned long flags;
+	u32 stat_r;
+	ns_dev *card;
+	struct atm_dev *dev;
+	unsigned long flags;
 
-   card = (ns_dev *) dev_id;
-   dev = card->atmdev;
-   card->intcnt++;
+	card = (ns_dev *) dev_id;
+	dev = card->atmdev;
+	card->intcnt++;
 
-   PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
+	PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
 
-   spin_lock_irqsave(&card->int_lock, flags);
-   
-   stat_r = readl(card->membase + STAT);
+	spin_lock_irqsave(&card->int_lock, flags);
 
-   /* Transmit Status Indicator has been written to T. S. Queue */
-   if (stat_r & NS_STAT_TSIF)
-   {
-      TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
-      process_tsq(card);
-      writel(NS_STAT_TSIF, card->membase + STAT);
-   }
-   
-   /* Incomplete CS-PDU has been transmitted */
-   if (stat_r & NS_STAT_TXICP)
-   {
-      writel(NS_STAT_TXICP, card->membase + STAT);
-      TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
-               card->index);
-   }
-   
-   /* Transmit Status Queue 7/8 full */
-   if (stat_r & NS_STAT_TSQF)
-   {
-      writel(NS_STAT_TSQF, card->membase + STAT);
-      PRINTK("nicstar%d: TSQ full.\n", card->index);
-      process_tsq(card);
-   }
-   
-   /* Timer overflow */
-   if (stat_r & NS_STAT_TMROF)
-   {
-      writel(NS_STAT_TMROF, card->membase + STAT);
-      PRINTK("nicstar%d: Timer overflow.\n", card->index);
-   }
-   
-   /* PHY device interrupt signal active */
-   if (stat_r & NS_STAT_PHYI)
-   {
-      writel(NS_STAT_PHYI, card->membase + STAT);
-      PRINTK("nicstar%d: PHY interrupt.\n", card->index);
-      if (dev->phy && dev->phy->interrupt) {
-         dev->phy->interrupt(dev);
-      }
-   }
+	stat_r = readl(card->membase + STAT);
 
-   /* Small Buffer Queue is full */
-   if (stat_r & NS_STAT_SFBQF)
-   {
-      writel(NS_STAT_SFBQF, card->membase + STAT);
-      printk("nicstar%d: Small free buffer queue is full.\n", card->index);
-   }
-   
-   /* Large Buffer Queue is full */
-   if (stat_r & NS_STAT_LFBQF)
-   {
-      writel(NS_STAT_LFBQF, card->membase + STAT);
-      printk("nicstar%d: Large free buffer queue is full.\n", card->index);
-   }
+	/* Transmit Status Indicator has been written to T. S. Queue */
+	if (stat_r & NS_STAT_TSIF) {
+		TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
+		process_tsq(card);
+		writel(NS_STAT_TSIF, card->membase + STAT);
+	}
 
-   /* Receive Status Queue is full */
-   if (stat_r & NS_STAT_RSQF)
-   {
-      writel(NS_STAT_RSQF, card->membase + STAT);
-      printk("nicstar%d: RSQ full.\n", card->index);
-      process_rsq(card);
-   }
+	/* Incomplete CS-PDU has been transmitted */
+	if (stat_r & NS_STAT_TXICP) {
+		writel(NS_STAT_TXICP, card->membase + STAT);
+		TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
+			 card->index);
+	}
 
-   /* Complete CS-PDU received */
-   if (stat_r & NS_STAT_EOPDU)
-   {
-      RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
-      process_rsq(card);
-      writel(NS_STAT_EOPDU, card->membase + STAT);
-   }
+	/* Transmit Status Queue 7/8 full */
+	if (stat_r & NS_STAT_TSQF) {
+		writel(NS_STAT_TSQF, card->membase + STAT);
+		PRINTK("nicstar%d: TSQ full.\n", card->index);
+		process_tsq(card);
+	}
 
-   /* Raw cell received */
-   if (stat_r & NS_STAT_RAWCF)
-   {
-      writel(NS_STAT_RAWCF, card->membase + STAT);
+	/* Timer overflow */
+	if (stat_r & NS_STAT_TMROF) {
+		writel(NS_STAT_TMROF, card->membase + STAT);
+		PRINTK("nicstar%d: Timer overflow.\n", card->index);
+	}
+
+	/* PHY device interrupt signal active */
+	if (stat_r & NS_STAT_PHYI) {
+		writel(NS_STAT_PHYI, card->membase + STAT);
+		PRINTK("nicstar%d: PHY interrupt.\n", card->index);
+		if (dev->phy && dev->phy->interrupt) {
+			dev->phy->interrupt(dev);
+		}
+	}
+
+	/* Small Buffer Queue is full */
+	if (stat_r & NS_STAT_SFBQF) {
+		writel(NS_STAT_SFBQF, card->membase + STAT);
+		printk("nicstar%d: Small free buffer queue is full.\n",
+		       card->index);
+	}
+
+	/* Large Buffer Queue is full */
+	if (stat_r & NS_STAT_LFBQF) {
+		writel(NS_STAT_LFBQF, card->membase + STAT);
+		printk("nicstar%d: Large free buffer queue is full.\n",
+		       card->index);
+	}
+
+	/* Receive Status Queue is full */
+	if (stat_r & NS_STAT_RSQF) {
+		writel(NS_STAT_RSQF, card->membase + STAT);
+		printk("nicstar%d: RSQ full.\n", card->index);
+		process_rsq(card);
+	}
+
+	/* Complete CS-PDU received */
+	if (stat_r & NS_STAT_EOPDU) {
+		RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
+		process_rsq(card);
+		writel(NS_STAT_EOPDU, card->membase + STAT);
+	}
+
+	/* Raw cell received */
+	if (stat_r & NS_STAT_RAWCF) {
+		writel(NS_STAT_RAWCF, card->membase + STAT);
 #ifndef RCQ_SUPPORT
-      printk("nicstar%d: Raw cell received and no support yet...\n",
-             card->index);
+		printk("nicstar%d: Raw cell received and no support yet...\n",
+		       card->index);
 #endif /* RCQ_SUPPORT */
-      /* NOTE: the following procedure may keep a raw cell pending until the
-               next interrupt. As this preliminary support is only meant to
-               avoid buffer leakage, this is not an issue. */
-      while (readl(card->membase + RAWCT) != card->rawch)
-      {
-         ns_rcqe *rawcell;
+		/* NOTE: the following procedure may keep a raw cell pending until the
+		   next interrupt. As this preliminary support is only meant to
+		   avoid buffer leakage, this is not an issue. */
+		while (readl(card->membase + RAWCT) != card->rawch) {
 
-         rawcell = (ns_rcqe *) bus_to_virt(card->rawch);
-         if (ns_rcqe_islast(rawcell))
-         {
-            struct sk_buff *oldbuf;
+			if (ns_rcqe_islast(card->rawcell)) {
+				struct sk_buff *oldbuf;
 
-            oldbuf = card->rcbuf;
-            card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell);
-            card->rawch = (u32) virt_to_bus(card->rcbuf->data);
-            recycle_rx_buf(card, oldbuf);
-         }
-         else
-            card->rawch += NS_RCQE_SIZE;
-      }
-   }
+				oldbuf = card->rcbuf;
+				card->rcbuf = idr_find(&card->idr,
+						       ns_rcqe_nextbufhandle(card->rawcell));
+				card->rawch = NS_PRV_DMA(card->rcbuf);
+				card->rawcell = (struct ns_rcqe *)
+						card->rcbuf->data;
+				recycle_rx_buf(card, oldbuf);
+			} else {
+				card->rawch += NS_RCQE_SIZE;
+				card->rawcell++;
+			}
+		}
+	}
 
-   /* Small buffer queue is empty */
-   if (stat_r & NS_STAT_SFBQE)
-   {
-      int i;
-      struct sk_buff *sb;
+	/* Small buffer queue is empty */
+	if (stat_r & NS_STAT_SFBQE) {
+		int i;
+		struct sk_buff *sb;
 
-      writel(NS_STAT_SFBQE, card->membase + STAT);
-      printk("nicstar%d: Small free buffer queue empty.\n",
-             card->index);
-      for (i = 0; i < card->sbnr.min; i++)
-      {
-         sb = dev_alloc_skb(NS_SMSKBSIZE);
-         if (sb == NULL)
-         {
-            writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
-            card->efbie = 0;
-            break;
-         }
-         NS_SKB_CB(sb)->buf_type = BUF_SM;
-         skb_queue_tail(&card->sbpool.queue, sb);
-         skb_reserve(sb, NS_AAL0_HEADER);
-         push_rxbufs(card, sb);
-      }
-      card->sbfqc = i;
-      process_rsq(card);
-   }
+		writel(NS_STAT_SFBQE, card->membase + STAT);
+		printk("nicstar%d: Small free buffer queue empty.\n",
+		       card->index);
+		for (i = 0; i < card->sbnr.min; i++) {
+			sb = dev_alloc_skb(NS_SMSKBSIZE);
+			if (sb == NULL) {
+				writel(readl(card->membase + CFG) &
+				       ~NS_CFG_EFBIE, card->membase + CFG);
+				card->efbie = 0;
+				break;
+			}
+			NS_PRV_BUFTYPE(sb) = BUF_SM;
+			skb_queue_tail(&card->sbpool.queue, sb);
+			skb_reserve(sb, NS_AAL0_HEADER);
+			push_rxbufs(card, sb);
+		}
+		card->sbfqc = i;
+		process_rsq(card);
+	}
 
-   /* Large buffer queue empty */
-   if (stat_r & NS_STAT_LFBQE)
-   {
-      int i;
-      struct sk_buff *lb;
+	/* Large buffer queue empty */
+	if (stat_r & NS_STAT_LFBQE) {
+		int i;
+		struct sk_buff *lb;
 
-      writel(NS_STAT_LFBQE, card->membase + STAT);
-      printk("nicstar%d: Large free buffer queue empty.\n",
-             card->index);
-      for (i = 0; i < card->lbnr.min; i++)
-      {
-         lb = dev_alloc_skb(NS_LGSKBSIZE);
-         if (lb == NULL)
-         {
-            writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
-            card->efbie = 0;
-            break;
-         }
-         NS_SKB_CB(lb)->buf_type = BUF_LG;
-         skb_queue_tail(&card->lbpool.queue, lb);
-         skb_reserve(lb, NS_SMBUFSIZE);
-         push_rxbufs(card, lb);
-      }
-      card->lbfqc = i;
-      process_rsq(card);
-   }
+		writel(NS_STAT_LFBQE, card->membase + STAT);
+		printk("nicstar%d: Large free buffer queue empty.\n",
+		       card->index);
+		for (i = 0; i < card->lbnr.min; i++) {
+			lb = dev_alloc_skb(NS_LGSKBSIZE);
+			if (lb == NULL) {
+				writel(readl(card->membase + CFG) &
+				       ~NS_CFG_EFBIE, card->membase + CFG);
+				card->efbie = 0;
+				break;
+			}
+			NS_PRV_BUFTYPE(lb) = BUF_LG;
+			skb_queue_tail(&card->lbpool.queue, lb);
+			skb_reserve(lb, NS_SMBUFSIZE);
+			push_rxbufs(card, lb);
+		}
+		card->lbfqc = i;
+		process_rsq(card);
+	}
 
-   /* Receive Status Queue is 7/8 full */
-   if (stat_r & NS_STAT_RSQAF)
-   {
-      writel(NS_STAT_RSQAF, card->membase + STAT);
-      RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
-      process_rsq(card);
-   }
-   
-   spin_unlock_irqrestore(&card->int_lock, flags);
-   PRINTK("nicstar%d: end of interrupt service\n", card->index);
-   return IRQ_HANDLED;
+	/* Receive Status Queue is 7/8 full */
+	if (stat_r & NS_STAT_RSQAF) {
+		writel(NS_STAT_RSQAF, card->membase + STAT);
+		RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
+		process_rsq(card);
+	}
+
+	spin_unlock_irqrestore(&card->int_lock, flags);
+	PRINTK("nicstar%d: end of interrupt service\n", card->index);
+	return IRQ_HANDLED;
 }
 
-
-
 static int ns_open(struct atm_vcc *vcc)
 {
-   ns_dev *card;
-   vc_map *vc;
-   unsigned long tmpl, modl;
-   int tcr, tcra;	/* target cell rate, and absolute value */
-   int n = 0;		/* Number of entries in the TST. Initialized to remove
-                           the compiler warning. */
-   u32 u32d[4];
-   int frscdi = 0;	/* Index of the SCD. Initialized to remove the compiler
-                           warning. How I wish compilers were clever enough to
-			   tell which variables can truly be used
-			   uninitialized... */
-   int inuse;		/* tx or rx vc already in use by another vcc */
-   short vpi = vcc->vpi;
-   int vci = vcc->vci;
+	ns_dev *card;
+	vc_map *vc;
+	unsigned long tmpl, modl;
+	int tcr, tcra;		/* target cell rate, and absolute value */
+	int n = 0;		/* Number of entries in the TST. Initialized to remove
+				   the compiler warning. */
+	u32 u32d[4];
+	int frscdi = 0;		/* Index of the SCD. Initialized to remove the compiler
+				   warning. How I wish compilers were clever enough to
+				   tell which variables can truly be used
+				   uninitialized... */
+	int inuse;		/* tx or rx vc already in use by another vcc */
+	short vpi = vcc->vpi;
+	int vci = vcc->vci;
 
-   card = (ns_dev *) vcc->dev->dev_data;
-   PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci);
-   if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
-   {
-      PRINTK("nicstar%d: unsupported AAL.\n", card->index);
-      return -EINVAL;
-   }
+	card = (ns_dev *) vcc->dev->dev_data;
+	PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi,
+	       vci);
+	if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
+		PRINTK("nicstar%d: unsupported AAL.\n", card->index);
+		return -EINVAL;
+	}
 
-   vc = &(card->vcmap[vpi << card->vcibits | vci]);
-   vcc->dev_data = vc;
+	vc = &(card->vcmap[vpi << card->vcibits | vci]);
+	vcc->dev_data = vc;
 
-   inuse = 0;
-   if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
-      inuse = 1;
-   if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
-      inuse += 2;
-   if (inuse)
-   {
-      printk("nicstar%d: %s vci already in use.\n", card->index,
-             inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
-      return -EINVAL;
-   }
+	inuse = 0;
+	if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
+		inuse = 1;
+	if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
+		inuse += 2;
+	if (inuse) {
+		printk("nicstar%d: %s vci already in use.\n", card->index,
+		       inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
+		return -EINVAL;
+	}
 
-   set_bit(ATM_VF_ADDR,&vcc->flags);
+	set_bit(ATM_VF_ADDR, &vcc->flags);
 
-   /* NOTE: You are not allowed to modify an open connection's QOS. To change
-      that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
-      needed to do that. */
-   if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
-   {
-      scq_info *scq;
-      
-      set_bit(ATM_VF_PARTIAL,&vcc->flags);
-      if (vcc->qos.txtp.traffic_class == ATM_CBR)
-      {
-         /* Check requested cell rate and availability of SCD */
-         if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 &&
-             vcc->qos.txtp.min_pcr == 0)
-         {
-            PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
-	           card->index);
-	    clear_bit(ATM_VF_PARTIAL,&vcc->flags);
-	    clear_bit(ATM_VF_ADDR,&vcc->flags);
-            return -EINVAL;
-         }
+	/* NOTE: You are not allowed to modify an open connection's QOS. To change
+	   that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
+	   needed to do that. */
+	if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
+		scq_info *scq;
 
-         tcr = atm_pcr_goal(&(vcc->qos.txtp));
-         tcra = tcr >= 0 ? tcr : -tcr;
-      
-         PRINTK("nicstar%d: target cell rate = %d.\n", card->index,
-                vcc->qos.txtp.max_pcr);
+		set_bit(ATM_VF_PARTIAL, &vcc->flags);
+		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
+			/* Check requested cell rate and availability of SCD */
+			if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0
+			    && vcc->qos.txtp.min_pcr == 0) {
+				PRINTK
+				    ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
+				     card->index);
+				clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+				clear_bit(ATM_VF_ADDR, &vcc->flags);
+				return -EINVAL;
+			}
 
-         tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES;
-         modl = tmpl % card->max_pcr;
+			tcr = atm_pcr_goal(&(vcc->qos.txtp));
+			tcra = tcr >= 0 ? tcr : -tcr;
 
-         n = (int)(tmpl / card->max_pcr);
-         if (tcr > 0)
-         {
-            if (modl > 0) n++;
-         }
-         else if (tcr == 0)
-         {
-            if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0)
-	    {
-               PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index);
-	       clear_bit(ATM_VF_PARTIAL,&vcc->flags);
-	       clear_bit(ATM_VF_ADDR,&vcc->flags);
-               return -EINVAL;
-            }
-         }
+			PRINTK("nicstar%d: target cell rate = %d.\n",
+			       card->index, vcc->qos.txtp.max_pcr);
 
-         if (n == 0)
-         {
-            printk("nicstar%d: selected bandwidth < granularity.\n", card->index);
-	    clear_bit(ATM_VF_PARTIAL,&vcc->flags);
-	    clear_bit(ATM_VF_ADDR,&vcc->flags);
-            return -EINVAL;
-         }
+			tmpl =
+			    (unsigned long)tcra *(unsigned long)
+			    NS_TST_NUM_ENTRIES;
+			modl = tmpl % card->max_pcr;
 
-         if (n > (card->tst_free_entries - NS_TST_RESERVED))
-         {
-            PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index);
-	    clear_bit(ATM_VF_PARTIAL,&vcc->flags);
-	    clear_bit(ATM_VF_ADDR,&vcc->flags);
-            return -EINVAL;
-         }
-         else
-            card->tst_free_entries -= n;
+			n = (int)(tmpl / card->max_pcr);
+			if (tcr > 0) {
+				if (modl > 0)
+					n++;
+			} else if (tcr == 0) {
+				if ((n =
+				     (card->tst_free_entries -
+				      NS_TST_RESERVED)) <= 0) {
+					PRINTK
+					    ("nicstar%d: no CBR bandwidth free.\n",
+					     card->index);
+					clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+					clear_bit(ATM_VF_ADDR, &vcc->flags);
+					return -EINVAL;
+				}
+			}
 
-         XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n);
-         for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++)
-         {
-            if (card->scd2vc[frscdi] == NULL)
-            {
-               card->scd2vc[frscdi] = vc;
-               break;
-	    }
-         }
-         if (frscdi == NS_FRSCD_NUM)
-         {
-            PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index);
-            card->tst_free_entries += n;
-	    clear_bit(ATM_VF_PARTIAL,&vcc->flags);
-	    clear_bit(ATM_VF_ADDR,&vcc->flags);
-	    return -EBUSY;
-         }
+			if (n == 0) {
+				printk
+				    ("nicstar%d: selected bandwidth < granularity.\n",
+				     card->index);
+				clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+				clear_bit(ATM_VF_ADDR, &vcc->flags);
+				return -EINVAL;
+			}
 
-         vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
+			if (n > (card->tst_free_entries - NS_TST_RESERVED)) {
+				PRINTK
+				    ("nicstar%d: not enough free CBR bandwidth.\n",
+				     card->index);
+				clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+				clear_bit(ATM_VF_ADDR, &vcc->flags);
+				return -EINVAL;
+			} else
+				card->tst_free_entries -= n;
 
-         scq = get_scq(CBR_SCQSIZE, vc->cbr_scd);
-         if (scq == NULL)
-         {
-            PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index);
-            card->scd2vc[frscdi] = NULL;
-            card->tst_free_entries += n;
-	    clear_bit(ATM_VF_PARTIAL,&vcc->flags);
-	    clear_bit(ATM_VF_ADDR,&vcc->flags);
-            return -ENOMEM;
-         }
-	 vc->scq = scq;
-         u32d[0] = (u32) virt_to_bus(scq->base);
-         u32d[1] = (u32) 0x00000000;
-         u32d[2] = (u32) 0xffffffff;
-         u32d[3] = (u32) 0x00000000;
-         ns_write_sram(card, vc->cbr_scd, u32d, 4);
-         
-	 fill_tst(card, n, vc);
-      }
-      else if (vcc->qos.txtp.traffic_class == ATM_UBR)
-      {
-         vc->cbr_scd = 0x00000000;
-	 vc->scq = card->scq0;
-      }
-      
-      if (vcc->qos.txtp.traffic_class != ATM_NONE)
-      {
-         vc->tx = 1;
-	 vc->tx_vcc = vcc;
-	 vc->tbd_count = 0;
-      }
-      if (vcc->qos.rxtp.traffic_class != ATM_NONE)
-      {
-         u32 status;
-      
-         vc->rx = 1;
-         vc->rx_vcc = vcc;
-         vc->rx_iov = NULL;
+			XPRINTK("nicstar%d: writing %d tst entries.\n",
+				card->index, n);
+			for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) {
+				if (card->scd2vc[frscdi] == NULL) {
+					card->scd2vc[frscdi] = vc;
+					break;
+				}
+			}
+			if (frscdi == NS_FRSCD_NUM) {
+				PRINTK
+				    ("nicstar%d: no SCD available for CBR channel.\n",
+				     card->index);
+				card->tst_free_entries += n;
+				clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+				clear_bit(ATM_VF_ADDR, &vcc->flags);
+				return -EBUSY;
+			}
 
-	 /* Open the connection in hardware */
-	 if (vcc->qos.aal == ATM_AAL5)
-	    status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
-	 else /* vcc->qos.aal == ATM_AAL0 */
-	    status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
+			vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
+
+			scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd);
+			if (scq == NULL) {
+				PRINTK("nicstar%d: can't get fixed rate SCQ.\n",
+				       card->index);
+				card->scd2vc[frscdi] = NULL;
+				card->tst_free_entries += n;
+				clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+				clear_bit(ATM_VF_ADDR, &vcc->flags);
+				return -ENOMEM;
+			}
+			vc->scq = scq;
+			u32d[0] = scq_virt_to_bus(scq, scq->base);
+			u32d[1] = (u32) 0x00000000;
+			u32d[2] = (u32) 0xffffffff;
+			u32d[3] = (u32) 0x00000000;
+			ns_write_sram(card, vc->cbr_scd, u32d, 4);
+
+			fill_tst(card, n, vc);
+		} else if (vcc->qos.txtp.traffic_class == ATM_UBR) {
+			vc->cbr_scd = 0x00000000;
+			vc->scq = card->scq0;
+		}
+
+		if (vcc->qos.txtp.traffic_class != ATM_NONE) {
+			vc->tx = 1;
+			vc->tx_vcc = vcc;
+			vc->tbd_count = 0;
+		}
+		if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
+			u32 status;
+
+			vc->rx = 1;
+			vc->rx_vcc = vcc;
+			vc->rx_iov = NULL;
+
+			/* Open the connection in hardware */
+			if (vcc->qos.aal == ATM_AAL5)
+				status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
+			else	/* vcc->qos.aal == ATM_AAL0 */
+				status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
 #ifdef RCQ_SUPPORT
-         status |= NS_RCTE_RAWCELLINTEN;
+			status |= NS_RCTE_RAWCELLINTEN;
 #endif /* RCQ_SUPPORT */
-         ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) *
-	               NS_RCT_ENTRY_SIZE, &status, 1);
-      }
-      
-   }
-   
-   set_bit(ATM_VF_READY,&vcc->flags);
-   return 0;
+			ns_write_sram(card,
+				      NS_RCT +
+				      (vpi << card->vcibits | vci) *
+				      NS_RCT_ENTRY_SIZE, &status, 1);
+		}
+
+	}
+
+	set_bit(ATM_VF_READY, &vcc->flags);
+	return 0;
 }
 
-
-
 static void ns_close(struct atm_vcc *vcc)
 {
-   vc_map *vc;
-   ns_dev *card;
-   u32 data;
-   int i;
-   
-   vc = vcc->dev_data;
-   card = vcc->dev->dev_data;
-   PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
-          (int) vcc->vpi, vcc->vci);
+	vc_map *vc;
+	ns_dev *card;
+	u32 data;
+	int i;
 
-   clear_bit(ATM_VF_READY,&vcc->flags);
-   
-   if (vcc->qos.rxtp.traffic_class != ATM_NONE)
-   {
-      u32 addr;
-      unsigned long flags;
-      
-      addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
-      spin_lock_irqsave(&card->res_lock, flags);
-      while(CMD_BUSY(card));
-      writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD);
-      spin_unlock_irqrestore(&card->res_lock, flags);
-      
-      vc->rx = 0;
-      if (vc->rx_iov != NULL)
-      {
-	 struct sk_buff *iovb;
-	 u32 stat;
-   
-         stat = readl(card->membase + STAT);
-         card->sbfqc = ns_stat_sfbqc_get(stat);   
-         card->lbfqc = ns_stat_lfbqc_get(stat);
+	vc = vcc->dev_data;
+	card = vcc->dev->dev_data;
+	PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
+	       (int)vcc->vpi, vcc->vci);
 
-         PRINTK("nicstar%d: closing a VC with pending rx buffers.\n",
-	        card->index);
-         iovb = vc->rx_iov;
-         recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
-	                       NS_SKB(iovb)->iovcnt);
-         NS_SKB(iovb)->iovcnt = 0;
-         NS_SKB(iovb)->vcc = NULL;
-         spin_lock_irqsave(&card->int_lock, flags);
-         recycle_iov_buf(card, iovb);
-         spin_unlock_irqrestore(&card->int_lock, flags);
-         vc->rx_iov = NULL;
-      }
-   }
+	clear_bit(ATM_VF_READY, &vcc->flags);
 
-   if (vcc->qos.txtp.traffic_class != ATM_NONE)
-   {
-      vc->tx = 0;
-   }
+	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
+		u32 addr;
+		unsigned long flags;
 
-   if (vcc->qos.txtp.traffic_class == ATM_CBR)
-   {
-      unsigned long flags;
-      ns_scqe *scqep;
-      scq_info *scq;
+		addr =
+		    NS_RCT +
+		    (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
+		spin_lock_irqsave(&card->res_lock, flags);
+		while (CMD_BUSY(card)) ;
+		writel(NS_CMD_CLOSE_CONNECTION | addr << 2,
+		       card->membase + CMD);
+		spin_unlock_irqrestore(&card->res_lock, flags);
 
-      scq = vc->scq;
+		vc->rx = 0;
+		if (vc->rx_iov != NULL) {
+			struct sk_buff *iovb;
+			u32 stat;
 
-      for (;;)
-      {
-         spin_lock_irqsave(&scq->lock, flags);
-         scqep = scq->next;
-         if (scqep == scq->base)
-            scqep = scq->last;
-         else
-            scqep--;
-         if (scqep == scq->tail)
-         {
-            spin_unlock_irqrestore(&scq->lock, flags);
-            break;
-         }
-         /* If the last entry is not a TSR, place one in the SCQ in order to
-            be able to completely drain it and then close. */
-         if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next)
-         {
-            ns_scqe tsr;
-            u32 scdi, scqi;
-            u32 data;
-            int index;
+			stat = readl(card->membase + STAT);
+			card->sbfqc = ns_stat_sfbqc_get(stat);
+			card->lbfqc = ns_stat_lfbqc_get(stat);
 
-            tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
-            scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
-            scqi = scq->next - scq->base;
-            tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
-            tsr.word_3 = 0x00000000;
-            tsr.word_4 = 0x00000000;
-            *scq->next = tsr;
-            index = (int) scqi;
-            scq->skb[index] = NULL;
-            if (scq->next == scq->last)
-               scq->next = scq->base;
-            else
-               scq->next++;
-            data = (u32) virt_to_bus(scq->next);
-            ns_write_sram(card, scq->scd, &data, 1);
-         }
-         spin_unlock_irqrestore(&scq->lock, flags);
-         schedule();
-      }
+			PRINTK
+			    ("nicstar%d: closing a VC with pending rx buffers.\n",
+			     card->index);
+			iovb = vc->rx_iov;
+			recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+					      NS_PRV_IOVCNT(iovb));
+			NS_PRV_IOVCNT(iovb) = 0;
+			spin_lock_irqsave(&card->int_lock, flags);
+			recycle_iov_buf(card, iovb);
+			spin_unlock_irqrestore(&card->int_lock, flags);
+			vc->rx_iov = NULL;
+		}
+	}
 
-      /* Free all TST entries */
-      data = NS_TST_OPCODE_VARIABLE;
-      for (i = 0; i < NS_TST_NUM_ENTRIES; i++)
-      {
-         if (card->tste2vc[i] == vc)
-	 {
-            ns_write_sram(card, card->tst_addr + i, &data, 1);
-            card->tste2vc[i] = NULL;
-            card->tst_free_entries++;
-	 }
-      }
-      
-      card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
-      free_scq(vc->scq, vcc);
-   }
+	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
+		vc->tx = 0;
+	}
 
-   /* remove all references to vcc before deleting it */
-   if (vcc->qos.txtp.traffic_class != ATM_NONE)
-   {
-     unsigned long flags;
-     scq_info *scq = card->scq0;
+	if (vcc->qos.txtp.traffic_class == ATM_CBR) {
+		unsigned long flags;
+		ns_scqe *scqep;
+		scq_info *scq;
 
-     spin_lock_irqsave(&scq->lock, flags);
+		scq = vc->scq;
 
-     for(i = 0; i < scq->num_entries; i++) {
-       if(scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
-        ATM_SKB(scq->skb[i])->vcc = NULL;
-	atm_return(vcc, scq->skb[i]->truesize);
-        PRINTK("nicstar: deleted pending vcc mapping\n");
-       }
-     }
+		for (;;) {
+			spin_lock_irqsave(&scq->lock, flags);
+			scqep = scq->next;
+			if (scqep == scq->base)
+				scqep = scq->last;
+			else
+				scqep--;
+			if (scqep == scq->tail) {
+				spin_unlock_irqrestore(&scq->lock, flags);
+				break;
+			}
+			/* If the last entry is not a TSR, place one in the SCQ in order to
+			   be able to completely drain it and then close. */
+			if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) {
+				ns_scqe tsr;
+				u32 scdi, scqi;
+				u32 data;
+				int index;
 
-     spin_unlock_irqrestore(&scq->lock, flags);
-   }
+				tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
+				scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
+				scqi = scq->next - scq->base;
+				tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
+				tsr.word_3 = 0x00000000;
+				tsr.word_4 = 0x00000000;
+				*scq->next = tsr;
+				index = (int)scqi;
+				scq->skb[index] = NULL;
+				if (scq->next == scq->last)
+					scq->next = scq->base;
+				else
+					scq->next++;
+				data = scq_virt_to_bus(scq, scq->next);
+				ns_write_sram(card, scq->scd, &data, 1);
+			}
+			spin_unlock_irqrestore(&scq->lock, flags);
+			schedule();
+		}
 
-   vcc->dev_data = NULL;
-   clear_bit(ATM_VF_PARTIAL,&vcc->flags);
-   clear_bit(ATM_VF_ADDR,&vcc->flags);
+		/* Free all TST entries */
+		data = NS_TST_OPCODE_VARIABLE;
+		for (i = 0; i < NS_TST_NUM_ENTRIES; i++) {
+			if (card->tste2vc[i] == vc) {
+				ns_write_sram(card, card->tst_addr + i, &data,
+					      1);
+				card->tste2vc[i] = NULL;
+				card->tst_free_entries++;
+			}
+		}
+
+		card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
+		free_scq(card, vc->scq, vcc);
+	}
+
+	/* remove all references to vcc before deleting it */
+	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
+		unsigned long flags;
+		scq_info *scq = card->scq0;
+
+		spin_lock_irqsave(&scq->lock, flags);
+
+		for (i = 0; i < scq->num_entries; i++) {
+			if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
+				ATM_SKB(scq->skb[i])->vcc = NULL;
+				atm_return(vcc, scq->skb[i]->truesize);
+				PRINTK
+				    ("nicstar: deleted pending vcc mapping\n");
+			}
+		}
+
+		spin_unlock_irqrestore(&scq->lock, flags);
+	}
+
+	vcc->dev_data = NULL;
+	clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+	clear_bit(ATM_VF_ADDR, &vcc->flags);
 
 #ifdef RX_DEBUG
-   {
-      u32 stat, cfg;
-      stat = readl(card->membase + STAT);
-      cfg = readl(card->membase + CFG);
-      printk("STAT = 0x%08X  CFG = 0x%08X  \n", stat, cfg);
-      printk("TSQ: base = 0x%08X  next = 0x%08X  last = 0x%08X  TSQT = 0x%08X \n",
-             (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last,
-	     readl(card->membase + TSQT));
-      printk("RSQ: base = 0x%08X  next = 0x%08X  last = 0x%08X  RSQT = 0x%08X \n",
-             (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last,
-	     readl(card->membase + RSQT));
-      printk("Empty free buffer queue interrupt %s \n",
-             card->efbie ? "enabled" : "disabled");
-      printk("SBCNT = %d  count = %d   LBCNT = %d count = %d \n",
-             ns_stat_sfbqc_get(stat), card->sbpool.count,
-	     ns_stat_lfbqc_get(stat), card->lbpool.count);
-      printk("hbpool.count = %d  iovpool.count = %d \n",
-             card->hbpool.count, card->iovpool.count);
-   }
+	{
+		u32 stat, cfg;
+		stat = readl(card->membase + STAT);
+		cfg = readl(card->membase + CFG);
+		printk("STAT = 0x%08X  CFG = 0x%08X  \n", stat, cfg);
+		printk
+		    ("TSQ: base = 0x%p  next = 0x%p  last = 0x%p  TSQT = 0x%08X \n",
+		     card->tsq.base, card->tsq.next,
+		     card->tsq.last, readl(card->membase + TSQT));
+		printk
+		    ("RSQ: base = 0x%p  next = 0x%p  last = 0x%p  RSQT = 0x%08X \n",
+		     card->rsq.base, card->rsq.next,
+		     card->rsq.last, readl(card->membase + RSQT));
+		printk("Empty free buffer queue interrupt %s \n",
+		       card->efbie ? "enabled" : "disabled");
+		printk("SBCNT = %d  count = %d   LBCNT = %d count = %d \n",
+		       ns_stat_sfbqc_get(stat), card->sbpool.count,
+		       ns_stat_lfbqc_get(stat), card->lbpool.count);
+		printk("hbpool.count = %d  iovpool.count = %d \n",
+		       card->hbpool.count, card->iovpool.count);
+	}
 #endif /* RX_DEBUG */
 }
 
-
-
-static void fill_tst(ns_dev *card, int n, vc_map *vc)
+static void fill_tst(ns_dev * card, int n, vc_map * vc)
 {
-   u32 new_tst;
-   unsigned long cl;
-   int e, r;
-   u32 data;
-      
-   /* It would be very complicated to keep the two TSTs synchronized while
-      assuring that writes are only made to the inactive TST. So, for now I
-      will use only one TST. If problems occur, I will change this again */
-   
-   new_tst = card->tst_addr;
+	u32 new_tst;
+	unsigned long cl;
+	int e, r;
+	u32 data;
 
-   /* Fill procedure */
+	/* It would be very complicated to keep the two TSTs synchronized while
+	   assuring that writes are only made to the inactive TST. So, for now I
+	   will use only one TST. If problems occur, I will change this again */
 
-   for (e = 0; e < NS_TST_NUM_ENTRIES; e++)
-   {
-      if (card->tste2vc[e] == NULL)
-         break;
-   }
-   if (e == NS_TST_NUM_ENTRIES) {
-      printk("nicstar%d: No free TST entries found. \n", card->index);
-      return;
-   }
+	new_tst = card->tst_addr;
 
-   r = n;
-   cl = NS_TST_NUM_ENTRIES;
-   data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
-      
-   while (r > 0)
-   {
-      if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL)
-      {
-         card->tste2vc[e] = vc;
-         ns_write_sram(card, new_tst + e, &data, 1);
-         cl -= NS_TST_NUM_ENTRIES;
-         r--;
-      }
+	/* Fill procedure */
 
-      if (++e == NS_TST_NUM_ENTRIES) {
-         e = 0;
-      }
-      cl += n;
-   }
-   
-   /* End of fill procedure */
-   
-   data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
-   ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
-   ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
-   card->tst_addr = new_tst;
+	for (e = 0; e < NS_TST_NUM_ENTRIES; e++) {
+		if (card->tste2vc[e] == NULL)
+			break;
+	}
+	if (e == NS_TST_NUM_ENTRIES) {
+		printk("nicstar%d: No free TST entries found. \n", card->index);
+		return;
+	}
+
+	r = n;
+	cl = NS_TST_NUM_ENTRIES;
+	data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
+
+	while (r > 0) {
+		if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) {
+			card->tste2vc[e] = vc;
+			ns_write_sram(card, new_tst + e, &data, 1);
+			cl -= NS_TST_NUM_ENTRIES;
+			r--;
+		}
+
+		if (++e == NS_TST_NUM_ENTRIES) {
+			e = 0;
+		}
+		cl += n;
+	}
+
+	/* End of fill procedure */
+
+	data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
+	ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
+	ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
+	card->tst_addr = new_tst;
 }
 
-
-
 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
 {
-   ns_dev *card;
-   vc_map *vc;
-   scq_info *scq;
-   unsigned long buflen;
-   ns_scqe scqe;
-   u32 flags;		/* TBD flags, not CPU flags */
-   
-   card = vcc->dev->dev_data;
-   TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
-   if ((vc = (vc_map *) vcc->dev_data) == NULL)
-   {
-      printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
-      atomic_inc(&vcc->stats->tx_err);
-      dev_kfree_skb_any(skb);
-      return -EINVAL;
-   }
-   
-   if (!vc->tx)
-   {
-      printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
-      atomic_inc(&vcc->stats->tx_err);
-      dev_kfree_skb_any(skb);
-      return -EINVAL;
-   }
-   
-   if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
-   {
-      printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
-      atomic_inc(&vcc->stats->tx_err);
-      dev_kfree_skb_any(skb);
-      return -EINVAL;
-   }
-   
-   if (skb_shinfo(skb)->nr_frags != 0)
-   {
-      printk("nicstar%d: No scatter-gather yet.\n", card->index);
-      atomic_inc(&vcc->stats->tx_err);
-      dev_kfree_skb_any(skb);
-      return -EINVAL;
-   }
-   
-   ATM_SKB(skb)->vcc = vcc;
+	ns_dev *card;
+	vc_map *vc;
+	scq_info *scq;
+	unsigned long buflen;
+	ns_scqe scqe;
+	u32 flags;		/* TBD flags, not CPU flags */
 
-   if (vcc->qos.aal == ATM_AAL5)
-   {
-      buflen = (skb->len + 47 + 8) / 48 * 48;	/* Multiple of 48 */
-      flags = NS_TBD_AAL5;
-      scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data));
-      scqe.word_3 = cpu_to_le32((u32) skb->len);
-      scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
-                           ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? 1 : 0);
-      flags |= NS_TBD_EOPDU;
-   }
-   else /* (vcc->qos.aal == ATM_AAL0) */
-   {
-      buflen = ATM_CELL_PAYLOAD;	/* i.e., 48 bytes */
-      flags = NS_TBD_AAL0;
-      scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER);
-      scqe.word_3 = cpu_to_le32(0x00000000);
-      if (*skb->data & 0x02)	/* Payload type 1 - end of pdu */
-         flags |= NS_TBD_EOPDU;
-      scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
-      /* Force the VPI/VCI to be the same as in VCC struct */
-      scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT |
-                                 ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) &
-                                 NS_TBD_VC_MASK);
-   }
+	card = vcc->dev->dev_data;
+	TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
+	if ((vc = (vc_map *) vcc->dev_data) == NULL) {
+		printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
+		       card->index);
+		atomic_inc(&vcc->stats->tx_err);
+		dev_kfree_skb_any(skb);
+		return -EINVAL;
+	}
 
-   if (vcc->qos.txtp.traffic_class == ATM_CBR)
-   {
-      scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
-      scq = ((vc_map *) vcc->dev_data)->scq;
-   }
-   else
-   {
-      scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
-      scq = card->scq0;
-   }
+	if (!vc->tx) {
+		printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
+		       card->index);
+		atomic_inc(&vcc->stats->tx_err);
+		dev_kfree_skb_any(skb);
+		return -EINVAL;
+	}
 
-   if (push_scqe(card, vc, scq, &scqe, skb) != 0)
-   {
-      atomic_inc(&vcc->stats->tx_err);
-      dev_kfree_skb_any(skb);
-      return -EIO;
-   }
-   atomic_inc(&vcc->stats->tx);
+	if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
+		printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
+		       card->index);
+		atomic_inc(&vcc->stats->tx_err);
+		dev_kfree_skb_any(skb);
+		return -EINVAL;
+	}
 
-   return 0;
+	if (skb_shinfo(skb)->nr_frags != 0) {
+		printk("nicstar%d: No scatter-gather yet.\n", card->index);
+		atomic_inc(&vcc->stats->tx_err);
+		dev_kfree_skb_any(skb);
+		return -EINVAL;
+	}
+
+	ATM_SKB(skb)->vcc = vcc;
+
+	NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data,
+					 skb->len, PCI_DMA_TODEVICE);
+
+	if (vcc->qos.aal == ATM_AAL5) {
+		buflen = (skb->len + 47 + 8) / 48 * 48;	/* Multiple of 48 */
+		flags = NS_TBD_AAL5;
+		scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb));
+		scqe.word_3 = cpu_to_le32(skb->len);
+		scqe.word_4 =
+		    ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
+				    ATM_SKB(skb)->
+				    atm_options & ATM_ATMOPT_CLP ? 1 : 0);
+		flags |= NS_TBD_EOPDU;
+	} else {		/* (vcc->qos.aal == ATM_AAL0) */
+
+		buflen = ATM_CELL_PAYLOAD;	/* i.e., 48 bytes */
+		flags = NS_TBD_AAL0;
+		scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER);
+		scqe.word_3 = cpu_to_le32(0x00000000);
+		if (*skb->data & 0x02)	/* Payload type 1 - end of pdu */
+			flags |= NS_TBD_EOPDU;
+		scqe.word_4 =
+		    cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
+		/* Force the VPI/VCI to be the same as in VCC struct */
+		scqe.word_4 |=
+		    cpu_to_le32((((u32) vcc->
+				  vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc->
+							      vci) <<
+				 NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK);
+	}
+
+	if (vcc->qos.txtp.traffic_class == ATM_CBR) {
+		scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
+		scq = ((vc_map *) vcc->dev_data)->scq;
+	} else {
+		scqe.word_1 =
+		    ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
+		scq = card->scq0;
+	}
+
+	if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+		atomic_inc(&vcc->stats->tx_err);
+		dev_kfree_skb_any(skb);
+		return -EIO;
+	}
+	atomic_inc(&vcc->stats->tx);
+
+	return 0;
 }
 
-
-
-static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
-                     struct sk_buff *skb)
+static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
+		     struct sk_buff *skb)
 {
-   unsigned long flags;
-   ns_scqe tsr;
-   u32 scdi, scqi;
-   int scq_is_vbr;
-   u32 data;
-   int index;
-   
-   spin_lock_irqsave(&scq->lock, flags);
-   while (scq->tail == scq->next)
-   {
-      if (in_interrupt()) {
-         spin_unlock_irqrestore(&scq->lock, flags);
-         printk("nicstar%d: Error pushing TBD.\n", card->index);
-         return 1;
-      }
+	unsigned long flags;
+	ns_scqe tsr;
+	u32 scdi, scqi;
+	int scq_is_vbr;
+	u32 data;
+	int index;
 
-      scq->full = 1;
-      spin_unlock_irqrestore(&scq->lock, flags);
-      interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
-      spin_lock_irqsave(&scq->lock, flags);
+	spin_lock_irqsave(&scq->lock, flags);
+	while (scq->tail == scq->next) {
+		if (in_interrupt()) {
+			spin_unlock_irqrestore(&scq->lock, flags);
+			printk("nicstar%d: Error pushing TBD.\n", card->index);
+			return 1;
+		}
 
-      if (scq->full) {
-         spin_unlock_irqrestore(&scq->lock, flags);
-         printk("nicstar%d: Timeout pushing TBD.\n", card->index);
-         return 1;
-      }
-   }
-   *scq->next = *tbd;
-   index = (int) (scq->next - scq->base);
-   scq->skb[index] = skb;
-   XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n",
-           card->index, (u32) skb, index);
-   XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
-           card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
-           le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
-           (u32) scq->next);
-   if (scq->next == scq->last)
-      scq->next = scq->base;
-   else
-      scq->next++;
+		scq->full = 1;
+		spin_unlock_irqrestore(&scq->lock, flags);
+		interruptible_sleep_on_timeout(&scq->scqfull_waitq,
+					       SCQFULL_TIMEOUT);
+		spin_lock_irqsave(&scq->lock, flags);
 
-   vc->tbd_count++;
-   if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
-   {
-      scq->tbd_count++;
-      scq_is_vbr = 1;
-   }
-   else
-      scq_is_vbr = 0;
+		if (scq->full) {
+			spin_unlock_irqrestore(&scq->lock, flags);
+			printk("nicstar%d: Timeout pushing TBD.\n",
+			       card->index);
+			return 1;
+		}
+	}
+	*scq->next = *tbd;
+	index = (int)(scq->next - scq->base);
+	scq->skb[index] = skb;
+	XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n",
+		card->index, skb, index);
+	XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
+		card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
+		le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
+		scq->next);
+	if (scq->next == scq->last)
+		scq->next = scq->base;
+	else
+		scq->next++;
 
-   if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ)
-   {
-      int has_run = 0;
+	vc->tbd_count++;
+	if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) {
+		scq->tbd_count++;
+		scq_is_vbr = 1;
+	} else
+		scq_is_vbr = 0;
 
-      while (scq->tail == scq->next)
-      {
-         if (in_interrupt()) {
-            data = (u32) virt_to_bus(scq->next);
-            ns_write_sram(card, scq->scd, &data, 1);
-            spin_unlock_irqrestore(&scq->lock, flags);
-            printk("nicstar%d: Error pushing TSR.\n", card->index);
-            return 0;
-         }
+	if (vc->tbd_count >= MAX_TBD_PER_VC
+	    || scq->tbd_count >= MAX_TBD_PER_SCQ) {
+		int has_run = 0;
 
-         scq->full = 1;
-         if (has_run++) break;
-         spin_unlock_irqrestore(&scq->lock, flags);
-         interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
-         spin_lock_irqsave(&scq->lock, flags);
-      }
+		while (scq->tail == scq->next) {
+			if (in_interrupt()) {
+				data = scq_virt_to_bus(scq, scq->next);
+				ns_write_sram(card, scq->scd, &data, 1);
+				spin_unlock_irqrestore(&scq->lock, flags);
+				printk("nicstar%d: Error pushing TSR.\n",
+				       card->index);
+				return 0;
+			}
 
-      if (!scq->full)
-      {
-         tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
-         if (scq_is_vbr)
-            scdi = NS_TSR_SCDISVBR;
-         else
-            scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
-         scqi = scq->next - scq->base;
-         tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
-         tsr.word_3 = 0x00000000;
-         tsr.word_4 = 0x00000000;
+			scq->full = 1;
+			if (has_run++)
+				break;
+			spin_unlock_irqrestore(&scq->lock, flags);
+			interruptible_sleep_on_timeout(&scq->scqfull_waitq,
+						       SCQFULL_TIMEOUT);
+			spin_lock_irqsave(&scq->lock, flags);
+		}
 
-         *scq->next = tsr;
-         index = (int) scqi;
-         scq->skb[index] = NULL;
-         XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
-                 card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2),
-                 le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4),
-		 (u32) scq->next);
-         if (scq->next == scq->last)
-            scq->next = scq->base;
-         else
-            scq->next++;
-         vc->tbd_count = 0;
-         scq->tbd_count = 0;
-      }
-      else
-         PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index);
-   }
-   data = (u32) virt_to_bus(scq->next);
-   ns_write_sram(card, scq->scd, &data, 1);
-   
-   spin_unlock_irqrestore(&scq->lock, flags);
-   
-   return 0;
+		if (!scq->full) {
+			tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
+			if (scq_is_vbr)
+				scdi = NS_TSR_SCDISVBR;
+			else
+				scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
+			scqi = scq->next - scq->base;
+			tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
+			tsr.word_3 = 0x00000000;
+			tsr.word_4 = 0x00000000;
+
+			*scq->next = tsr;
+			index = (int)scqi;
+			scq->skb[index] = NULL;
+			XPRINTK
+			    ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
+			     card->index, le32_to_cpu(tsr.word_1),
+			     le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3),
+			     le32_to_cpu(tsr.word_4), scq->next);
+			if (scq->next == scq->last)
+				scq->next = scq->base;
+			else
+				scq->next++;
+			vc->tbd_count = 0;
+			scq->tbd_count = 0;
+		} else
+			PRINTK("nicstar%d: Timeout pushing TSR.\n",
+			       card->index);
+	}
+	data = scq_virt_to_bus(scq, scq->next);
+	ns_write_sram(card, scq->scd, &data, 1);
+
+	spin_unlock_irqrestore(&scq->lock, flags);
+
+	return 0;
 }
 
-
-
-static void process_tsq(ns_dev *card)
+static void process_tsq(ns_dev * card)
 {
-   u32 scdi;
-   scq_info *scq;
-   ns_tsi *previous = NULL, *one_ahead, *two_ahead;
-   int serviced_entries;   /* flag indicating at least on entry was serviced */
-   
-   serviced_entries = 0;
-   
-   if (card->tsq.next == card->tsq.last)
-      one_ahead = card->tsq.base;
-   else
-      one_ahead = card->tsq.next + 1;
+	u32 scdi;
+	scq_info *scq;
+	ns_tsi *previous = NULL, *one_ahead, *two_ahead;
+	int serviced_entries;	/* flag indicating at least on entry was serviced */
 
-   if (one_ahead == card->tsq.last)
-      two_ahead = card->tsq.base;
-   else
-      two_ahead = one_ahead + 1;
-   
-   while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
-          !ns_tsi_isempty(two_ahead))
-          /* At most two empty, as stated in the 77201 errata */
-   {
-      serviced_entries = 1;
-    
-      /* Skip the one or two possible empty entries */
-      while (ns_tsi_isempty(card->tsq.next)) {
-         if (card->tsq.next == card->tsq.last)
-            card->tsq.next = card->tsq.base;
-         else
-            card->tsq.next++;
-      }
-    
-      if (!ns_tsi_tmrof(card->tsq.next))
-      {
-         scdi = ns_tsi_getscdindex(card->tsq.next);
-	 if (scdi == NS_TSI_SCDISVBR)
-	    scq = card->scq0;
-	 else
-	 {
-	    if (card->scd2vc[scdi] == NULL)
-	    {
-	       printk("nicstar%d: could not find VC from SCD index.\n",
-	              card->index);
-               ns_tsi_init(card->tsq.next);
-               return;
-            }
-            scq = card->scd2vc[scdi]->scq;
-         }
-         drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
-         scq->full = 0;
-         wake_up_interruptible(&(scq->scqfull_waitq));
-      }
+	serviced_entries = 0;
 
-      ns_tsi_init(card->tsq.next);
-      previous = card->tsq.next;
-      if (card->tsq.next == card->tsq.last)
-         card->tsq.next = card->tsq.base;
-      else
-         card->tsq.next++;
+	if (card->tsq.next == card->tsq.last)
+		one_ahead = card->tsq.base;
+	else
+		one_ahead = card->tsq.next + 1;
 
-      if (card->tsq.next == card->tsq.last)
-         one_ahead = card->tsq.base;
-      else
-         one_ahead = card->tsq.next + 1;
+	if (one_ahead == card->tsq.last)
+		two_ahead = card->tsq.base;
+	else
+		two_ahead = one_ahead + 1;
 
-      if (one_ahead == card->tsq.last)
-         two_ahead = card->tsq.base;
-      else
-         two_ahead = one_ahead + 1;
-   }
+	while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
+	       !ns_tsi_isempty(two_ahead))
+		/* At most two empty, as stated in the 77201 errata */
+	{
+		serviced_entries = 1;
 
-   if (serviced_entries) {
-      writel((((u32) previous) - ((u32) card->tsq.base)),
-             card->membase + TSQH);
-   }
+		/* Skip the one or two possible empty entries */
+		while (ns_tsi_isempty(card->tsq.next)) {
+			if (card->tsq.next == card->tsq.last)
+				card->tsq.next = card->tsq.base;
+			else
+				card->tsq.next++;
+		}
+
+		if (!ns_tsi_tmrof(card->tsq.next)) {
+			scdi = ns_tsi_getscdindex(card->tsq.next);
+			if (scdi == NS_TSI_SCDISVBR)
+				scq = card->scq0;
+			else {
+				if (card->scd2vc[scdi] == NULL) {
+					printk
+					    ("nicstar%d: could not find VC from SCD index.\n",
+					     card->index);
+					ns_tsi_init(card->tsq.next);
+					return;
+				}
+				scq = card->scd2vc[scdi]->scq;
+			}
+			drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
+			scq->full = 0;
+			wake_up_interruptible(&(scq->scqfull_waitq));
+		}
+
+		ns_tsi_init(card->tsq.next);
+		previous = card->tsq.next;
+		if (card->tsq.next == card->tsq.last)
+			card->tsq.next = card->tsq.base;
+		else
+			card->tsq.next++;
+
+		if (card->tsq.next == card->tsq.last)
+			one_ahead = card->tsq.base;
+		else
+			one_ahead = card->tsq.next + 1;
+
+		if (one_ahead == card->tsq.last)
+			two_ahead = card->tsq.base;
+		else
+			two_ahead = one_ahead + 1;
+	}
+
+	if (serviced_entries)
+		writel(PTR_DIFF(previous, card->tsq.base),
+		       card->membase + TSQH);
 }
 
-
-
-static void drain_scq(ns_dev *card, scq_info *scq, int pos)
+static void drain_scq(ns_dev * card, scq_info * scq, int pos)
 {
-   struct atm_vcc *vcc;
-   struct sk_buff *skb;
-   int i;
-   unsigned long flags;
-   
-   XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n",
-           card->index, (u32) scq, pos);
-   if (pos >= scq->num_entries)
-   {
-      printk("nicstar%d: Bad index on drain_scq().\n", card->index);
-      return;
-   }
+	struct atm_vcc *vcc;
+	struct sk_buff *skb;
+	int i;
+	unsigned long flags;
 
-   spin_lock_irqsave(&scq->lock, flags);
-   i = (int) (scq->tail - scq->base);
-   if (++i == scq->num_entries)
-      i = 0;
-   while (i != pos)
-   {
-      skb = scq->skb[i];
-      XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n",
-              card->index, (u32) skb, i);
-      if (skb != NULL)
-      {
-         vcc = ATM_SKB(skb)->vcc;
-	 if (vcc && vcc->pop != NULL) {
-	    vcc->pop(vcc, skb);
-	 } else {
-	    dev_kfree_skb_irq(skb);
-         }
-	 scq->skb[i] = NULL;
-      }
-      if (++i == scq->num_entries)
-         i = 0;
-   }
-   scq->tail = scq->base + pos;
-   spin_unlock_irqrestore(&scq->lock, flags);
+	XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n",
+		card->index, scq, pos);
+	if (pos >= scq->num_entries) {
+		printk("nicstar%d: Bad index on drain_scq().\n", card->index);
+		return;
+	}
+
+	spin_lock_irqsave(&scq->lock, flags);
+	i = (int)(scq->tail - scq->base);
+	if (++i == scq->num_entries)
+		i = 0;
+	while (i != pos) {
+		skb = scq->skb[i];
+		XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
+			card->index, skb, i);
+		if (skb != NULL) {
+			pci_unmap_single(card->pcidev,
+					 NS_PRV_DMA(skb),
+					 skb->len,
+					 PCI_DMA_TODEVICE);
+			vcc = ATM_SKB(skb)->vcc;
+			if (vcc && vcc->pop != NULL) {
+				vcc->pop(vcc, skb);
+			} else {
+				dev_kfree_skb_irq(skb);
+			}
+			scq->skb[i] = NULL;
+		}
+		if (++i == scq->num_entries)
+			i = 0;
+	}
+	scq->tail = scq->base + pos;
+	spin_unlock_irqrestore(&scq->lock, flags);
 }
 
-
-
-static void process_rsq(ns_dev *card)
+static void process_rsq(ns_dev * card)
 {
-   ns_rsqe *previous;
+	ns_rsqe *previous;
 
-   if (!ns_rsqe_valid(card->rsq.next))
-      return;
-   do {
-      dequeue_rx(card, card->rsq.next);
-      ns_rsqe_init(card->rsq.next);
-      previous = card->rsq.next;
-      if (card->rsq.next == card->rsq.last)
-         card->rsq.next = card->rsq.base;
-      else
-         card->rsq.next++;
-   } while (ns_rsqe_valid(card->rsq.next));
-   writel((((u32) previous) - ((u32) card->rsq.base)),
-          card->membase + RSQH);
+	if (!ns_rsqe_valid(card->rsq.next))
+		return;
+	do {
+		dequeue_rx(card, card->rsq.next);
+		ns_rsqe_init(card->rsq.next);
+		previous = card->rsq.next;
+		if (card->rsq.next == card->rsq.last)
+			card->rsq.next = card->rsq.base;
+		else
+			card->rsq.next++;
+	} while (ns_rsqe_valid(card->rsq.next));
+	writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH);
 }
 
-
-
-static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
+static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
 {
-   u32 vpi, vci;
-   vc_map *vc;
-   struct sk_buff *iovb;
-   struct iovec *iov;
-   struct atm_vcc *vcc;
-   struct sk_buff *skb;
-   unsigned short aal5_len;
-   int len;
-   u32 stat;
+	u32 vpi, vci;
+	vc_map *vc;
+	struct sk_buff *iovb;
+	struct iovec *iov;
+	struct atm_vcc *vcc;
+	struct sk_buff *skb;
+	unsigned short aal5_len;
+	int len;
+	u32 stat;
+	u32 id;
 
-   stat = readl(card->membase + STAT);
-   card->sbfqc = ns_stat_sfbqc_get(stat);   
-   card->lbfqc = ns_stat_lfbqc_get(stat);
+	stat = readl(card->membase + STAT);
+	card->sbfqc = ns_stat_sfbqc_get(stat);
+	card->lbfqc = ns_stat_lfbqc_get(stat);
 
-   skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle);
-   vpi = ns_rsqe_vpi(rsqe);
-   vci = ns_rsqe_vci(rsqe);
-   if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits)
-   {
-      printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
-             card->index, vpi, vci);
-      recycle_rx_buf(card, skb);
-      return;
-   }
-   
-   vc = &(card->vcmap[vpi << card->vcibits | vci]);
-   if (!vc->rx)
-   {
-      RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
-             card->index, vpi, vci);
-      recycle_rx_buf(card, skb);
-      return;
-   }
+	id = le32_to_cpu(rsqe->buffer_handle);
+	skb = idr_find(&card->idr, id);
+	if (!skb) {
+		RXPRINTK(KERN_ERR
+			 "nicstar%d: idr_find() failed!\n", card->index);
+		return;
+	}
+	idr_remove(&card->idr, id);
+        pci_dma_sync_single_for_cpu(card->pcidev,
+				    NS_PRV_DMA(skb),
+				    (NS_PRV_BUFTYPE(skb) == BUF_SM
+				     ? NS_SMSKBSIZE : NS_LGSKBSIZE),
+				    PCI_DMA_FROMDEVICE);
+	pci_unmap_single(card->pcidev,
+			 NS_PRV_DMA(skb),
+			 (NS_PRV_BUFTYPE(skb) == BUF_SM
+			  ? NS_SMSKBSIZE : NS_LGSKBSIZE),
+			 PCI_DMA_FROMDEVICE);
+	vpi = ns_rsqe_vpi(rsqe);
+	vci = ns_rsqe_vci(rsqe);
+	if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {
+		printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
+		       card->index, vpi, vci);
+		recycle_rx_buf(card, skb);
+		return;
+	}
 
-   vcc = vc->rx_vcc;
+	vc = &(card->vcmap[vpi << card->vcibits | vci]);
+	if (!vc->rx) {
+		RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
+			 card->index, vpi, vci);
+		recycle_rx_buf(card, skb);
+		return;
+	}
 
-   if (vcc->qos.aal == ATM_AAL0)
-   {
-      struct sk_buff *sb;
-      unsigned char *cell;
-      int i;
+	vcc = vc->rx_vcc;
 
-      cell = skb->data;
-      for (i = ns_rsqe_cellcount(rsqe); i; i--)
-      {
-         if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL)
-         {
-            printk("nicstar%d: Can't allocate buffers for aal0.\n",
-                   card->index);
-            atomic_add(i,&vcc->stats->rx_drop);
-            break;
-         }
-         if (!atm_charge(vcc, sb->truesize))
-         {
-            RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
-                     card->index);
-            atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
-            dev_kfree_skb_any(sb);
-            break;
-         }
-         /* Rebuild the header */
-         *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
-                               (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
-         if (i == 1 && ns_rsqe_eopdu(rsqe))
-            *((u32 *) sb->data) |= 0x00000002;
-         skb_put(sb, NS_AAL0_HEADER);
-         memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
-         skb_put(sb, ATM_CELL_PAYLOAD);
-         ATM_SKB(sb)->vcc = vcc;
-	 __net_timestamp(sb);
-         vcc->push(vcc, sb);
-         atomic_inc(&vcc->stats->rx);
-         cell += ATM_CELL_PAYLOAD;
-      }
+	if (vcc->qos.aal == ATM_AAL0) {
+		struct sk_buff *sb;
+		unsigned char *cell;
+		int i;
 
-      recycle_rx_buf(card, skb);
-      return;
-   }
+		cell = skb->data;
+		for (i = ns_rsqe_cellcount(rsqe); i; i--) {
+			if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) {
+				printk
+				    ("nicstar%d: Can't allocate buffers for aal0.\n",
+				     card->index);
+				atomic_add(i, &vcc->stats->rx_drop);
+				break;
+			}
+			if (!atm_charge(vcc, sb->truesize)) {
+				RXPRINTK
+				    ("nicstar%d: atm_charge() dropped aal0 packets.\n",
+				     card->index);
+				atomic_add(i - 1, &vcc->stats->rx_drop);	/* already increased by 1 */
+				dev_kfree_skb_any(sb);
+				break;
+			}
+			/* Rebuild the header */
+			*((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
+			    (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
+			if (i == 1 && ns_rsqe_eopdu(rsqe))
+				*((u32 *) sb->data) |= 0x00000002;
+			skb_put(sb, NS_AAL0_HEADER);
+			memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
+			skb_put(sb, ATM_CELL_PAYLOAD);
+			ATM_SKB(sb)->vcc = vcc;
+			__net_timestamp(sb);
+			vcc->push(vcc, sb);
+			atomic_inc(&vcc->stats->rx);
+			cell += ATM_CELL_PAYLOAD;
+		}
 
-   /* To reach this point, the AAL layer can only be AAL5 */
+		recycle_rx_buf(card, skb);
+		return;
+	}
 
-   if ((iovb = vc->rx_iov) == NULL)
-   {
-      iovb = skb_dequeue(&(card->iovpool.queue));
-      if (iovb == NULL)		/* No buffers in the queue */
-      {
-         iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
-	 if (iovb == NULL)
-	 {
-	    printk("nicstar%d: Out of iovec buffers.\n", card->index);
-            atomic_inc(&vcc->stats->rx_drop);
-            recycle_rx_buf(card, skb);
-            return;
-	 }
-         NS_SKB_CB(iovb)->buf_type = BUF_NONE;
-      }
-      else
-         if (--card->iovpool.count < card->iovnr.min)
-	 {
-	    struct sk_buff *new_iovb;
-	    if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
-	    {
-               NS_SKB_CB(iovb)->buf_type = BUF_NONE;
-               skb_queue_tail(&card->iovpool.queue, new_iovb);
-               card->iovpool.count++;
-	    }
-	 }
-      vc->rx_iov = iovb;
-      NS_SKB(iovb)->iovcnt = 0;
-      iovb->len = 0;
-      iovb->data = iovb->head;
-      skb_reset_tail_pointer(iovb);
-      NS_SKB(iovb)->vcc = vcc;
-      /* IMPORTANT: a pointer to the sk_buff containing the small or large
-                    buffer is stored as iovec base, NOT a pointer to the 
-	            small or large buffer itself. */
-   }
-   else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
-   {
-      printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
-      atomic_inc(&vcc->stats->rx_err);
-      recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
-      NS_SKB(iovb)->iovcnt = 0;
-      iovb->len = 0;
-      iovb->data = iovb->head;
-      skb_reset_tail_pointer(iovb);
-      NS_SKB(iovb)->vcc = vcc;
-   }
-   iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++];
-   iov->iov_base = (void *) skb;
-   iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
-   iovb->len += iov->iov_len;
+	/* To reach this point, the AAL layer can only be AAL5 */
 
-   if (NS_SKB(iovb)->iovcnt == 1)
-   {
-      if (NS_SKB_CB(skb)->buf_type != BUF_SM)
-      {
-         printk("nicstar%d: Expected a small buffer, and this is not one.\n",
-	        card->index);
-         which_list(card, skb);
-         atomic_inc(&vcc->stats->rx_err);
-         recycle_rx_buf(card, skb);
-         vc->rx_iov = NULL;
-         recycle_iov_buf(card, iovb);
-         return;
-      }
-   }
-   else /* NS_SKB(iovb)->iovcnt >= 2 */
-   {
-      if (NS_SKB_CB(skb)->buf_type != BUF_LG)
-      {
-         printk("nicstar%d: Expected a large buffer, and this is not one.\n",
-	        card->index);
-         which_list(card, skb);
-         atomic_inc(&vcc->stats->rx_err);
-         recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
-	                       NS_SKB(iovb)->iovcnt);
-         vc->rx_iov = NULL;
-         recycle_iov_buf(card, iovb);
-	 return;
-      }
-   }
+	if ((iovb = vc->rx_iov) == NULL) {
+		iovb = skb_dequeue(&(card->iovpool.queue));
+		if (iovb == NULL) {	/* No buffers in the queue */
+			iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
+			if (iovb == NULL) {
+				printk("nicstar%d: Out of iovec buffers.\n",
+				       card->index);
+				atomic_inc(&vcc->stats->rx_drop);
+				recycle_rx_buf(card, skb);
+				return;
+			}
+			NS_PRV_BUFTYPE(iovb) = BUF_NONE;
+		} else if (--card->iovpool.count < card->iovnr.min) {
+			struct sk_buff *new_iovb;
+			if ((new_iovb =
+			     alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) {
+				NS_PRV_BUFTYPE(iovb) = BUF_NONE;
+				skb_queue_tail(&card->iovpool.queue, new_iovb);
+				card->iovpool.count++;
+			}
+		}
+		vc->rx_iov = iovb;
+		NS_PRV_IOVCNT(iovb) = 0;
+		iovb->len = 0;
+		iovb->data = iovb->head;
+		skb_reset_tail_pointer(iovb);
+		/* IMPORTANT: a pointer to the sk_buff containing the small or large
+		   buffer is stored as iovec base, NOT a pointer to the
+		   small or large buffer itself. */
+	} else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
+		printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
+		atomic_inc(&vcc->stats->rx_err);
+		recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+				      NS_MAX_IOVECS);
+		NS_PRV_IOVCNT(iovb) = 0;
+		iovb->len = 0;
+		iovb->data = iovb->head;
+		skb_reset_tail_pointer(iovb);
+	}
+	iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++];
+	iov->iov_base = (void *)skb;
+	iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
+	iovb->len += iov->iov_len;
 
-   if (ns_rsqe_eopdu(rsqe))
-   {
-      /* This works correctly regardless of the endianness of the host */
-      unsigned char *L1L2 = (unsigned char *)((u32)skb->data +
-                                              iov->iov_len - 6);
-      aal5_len = L1L2[0] << 8 | L1L2[1];
-      len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
-      if (ns_rsqe_crcerr(rsqe) ||
-          len + 8 > iovb->len || len + (47 + 8) < iovb->len)
-      {
-         printk("nicstar%d: AAL5 CRC error", card->index);
-         if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
-            printk(" - PDU size mismatch.\n");
-         else
-            printk(".\n");
-         atomic_inc(&vcc->stats->rx_err);
-         recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
-	   NS_SKB(iovb)->iovcnt);
-	 vc->rx_iov = NULL;
-         recycle_iov_buf(card, iovb);
-	 return;
-      }
-
-      /* By this point we (hopefully) have a complete SDU without errors. */
-
-      if (NS_SKB(iovb)->iovcnt == 1)	/* Just a small buffer */
-      {
-         /* skb points to a small buffer */
-         if (!atm_charge(vcc, skb->truesize))
-         {
-            push_rxbufs(card, skb);
-            atomic_inc(&vcc->stats->rx_drop);
-         }
-         else
-	 {
-            skb_put(skb, len);
-            dequeue_sm_buf(card, skb);
-#ifdef NS_USE_DESTRUCTORS
-            skb->destructor = ns_sb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
-            ATM_SKB(skb)->vcc = vcc;
-	    __net_timestamp(skb);
-            vcc->push(vcc, skb);
-            atomic_inc(&vcc->stats->rx);
-         }
-      }
-      else if (NS_SKB(iovb)->iovcnt == 2)	/* One small plus one large buffer */
-      {
-         struct sk_buff *sb;
-
-         sb = (struct sk_buff *) (iov - 1)->iov_base;
-         /* skb points to a large buffer */
-
-         if (len <= NS_SMBUFSIZE)
-	 {
-            if (!atm_charge(vcc, sb->truesize))
-            {
-               push_rxbufs(card, sb);
-               atomic_inc(&vcc->stats->rx_drop);
-            }
-            else
-	    {
-               skb_put(sb, len);
-               dequeue_sm_buf(card, sb);
-#ifdef NS_USE_DESTRUCTORS
-               sb->destructor = ns_sb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
-               ATM_SKB(sb)->vcc = vcc;
-	       __net_timestamp(sb);
-               vcc->push(vcc, sb);
-               atomic_inc(&vcc->stats->rx);
-            }
-
-            push_rxbufs(card, skb);
-
-	 }
-	 else			/* len > NS_SMBUFSIZE, the usual case */
-	 {
-            if (!atm_charge(vcc, skb->truesize))
-            {
-               push_rxbufs(card, skb);
-               atomic_inc(&vcc->stats->rx_drop);
-            }
-            else
-            {
-               dequeue_lg_buf(card, skb);
-#ifdef NS_USE_DESTRUCTORS
-               skb->destructor = ns_lb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
-               skb_push(skb, NS_SMBUFSIZE);
-               skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE);
-               skb_put(skb, len - NS_SMBUFSIZE);
-               ATM_SKB(skb)->vcc = vcc;
-	       __net_timestamp(skb);
-               vcc->push(vcc, skb);
-               atomic_inc(&vcc->stats->rx);
-            }
-
-            push_rxbufs(card, sb);
-
-         }
-	 
-      }
-      else				/* Must push a huge buffer */
-      {
-         struct sk_buff *hb, *sb, *lb;
-	 int remaining, tocopy;
-         int j;
-
-         hb = skb_dequeue(&(card->hbpool.queue));
-         if (hb == NULL)		/* No buffers in the queue */
-         {
-
-            hb = dev_alloc_skb(NS_HBUFSIZE);
-            if (hb == NULL)
-            {
-               printk("nicstar%d: Out of huge buffers.\n", card->index);
-               atomic_inc(&vcc->stats->rx_drop);
-               recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
-	                             NS_SKB(iovb)->iovcnt);
-               vc->rx_iov = NULL;
-               recycle_iov_buf(card, iovb);
-               return;
-            }
-            else if (card->hbpool.count < card->hbnr.min)
-	    {
-               struct sk_buff *new_hb;
-               if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
-               {
-                  skb_queue_tail(&card->hbpool.queue, new_hb);
-                  card->hbpool.count++;
-               }
-            }
-            NS_SKB_CB(hb)->buf_type = BUF_NONE;
-	 }
-	 else
-         if (--card->hbpool.count < card->hbnr.min)
-         {
-            struct sk_buff *new_hb;
-            if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
-            {
-               NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
-               skb_queue_tail(&card->hbpool.queue, new_hb);
-               card->hbpool.count++;
-            }
-            if (card->hbpool.count < card->hbnr.min)
-	    {
-               if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
-               {
-                  NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
-                  skb_queue_tail(&card->hbpool.queue, new_hb);
-                  card->hbpool.count++;
-               }
-            }
-         }
-
-         iov = (struct iovec *) iovb->data;
-
-         if (!atm_charge(vcc, hb->truesize))
-	 {
-            recycle_iovec_rx_bufs(card, iov, NS_SKB(iovb)->iovcnt);
-            if (card->hbpool.count < card->hbnr.max)
-            {
-               skb_queue_tail(&card->hbpool.queue, hb);
-               card->hbpool.count++;
-            }
-	    else
-	       dev_kfree_skb_any(hb);
-	    atomic_inc(&vcc->stats->rx_drop);
-         }
-         else
-	 {
-            /* Copy the small buffer to the huge buffer */
-            sb = (struct sk_buff *) iov->iov_base;
-            skb_copy_from_linear_data(sb, hb->data, iov->iov_len);
-            skb_put(hb, iov->iov_len);
-            remaining = len - iov->iov_len;
-            iov++;
-            /* Free the small buffer */
-            push_rxbufs(card, sb);
-
-            /* Copy all large buffers to the huge buffer and free them */
-            for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
-            {
-               lb = (struct sk_buff *) iov->iov_base;
-               tocopy = min_t(int, remaining, iov->iov_len);
-               skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy);
-               skb_put(hb, tocopy);
-               iov++;
-               remaining -= tocopy;
-               push_rxbufs(card, lb);
-            }
 #ifdef EXTRA_DEBUG
-            if (remaining != 0 || hb->len != len)
-               printk("nicstar%d: Huge buffer len mismatch.\n", card->index);
-#endif /* EXTRA_DEBUG */
-            ATM_SKB(hb)->vcc = vcc;
-#ifdef NS_USE_DESTRUCTORS
-            hb->destructor = ns_hb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
-	    __net_timestamp(hb);
-            vcc->push(vcc, hb);
-            atomic_inc(&vcc->stats->rx);
-         }
-      }
+	if (NS_PRV_IOVCNT(iovb) == 1) {
+		if (NS_PRV_BUFTYPE(skb) != BUF_SM) {
+			printk
+			    ("nicstar%d: Expected a small buffer, and this is not one.\n",
+			     card->index);
+			which_list(card, skb);
+			atomic_inc(&vcc->stats->rx_err);
+			recycle_rx_buf(card, skb);
+			vc->rx_iov = NULL;
+			recycle_iov_buf(card, iovb);
+			return;
+		}
+	} else {		/* NS_PRV_IOVCNT(iovb) >= 2 */
 
-      vc->rx_iov = NULL;
-      recycle_iov_buf(card, iovb);
-   }
+		if (NS_PRV_BUFTYPE(skb) != BUF_LG) {
+			printk
+			    ("nicstar%d: Expected a large buffer, and this is not one.\n",
+			     card->index);
+			which_list(card, skb);
+			atomic_inc(&vcc->stats->rx_err);
+			recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+					      NS_PRV_IOVCNT(iovb));
+			vc->rx_iov = NULL;
+			recycle_iov_buf(card, iovb);
+			return;
+		}
+	}
+#endif /* EXTRA_DEBUG */
+
+	if (ns_rsqe_eopdu(rsqe)) {
+		/* This works correctly regardless of the endianness of the host */
+		unsigned char *L1L2 = (unsigned char *)
+						(skb->data + iov->iov_len - 6);
+		aal5_len = L1L2[0] << 8 | L1L2[1];
+		len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
+		if (ns_rsqe_crcerr(rsqe) ||
+		    len + 8 > iovb->len || len + (47 + 8) < iovb->len) {
+			printk("nicstar%d: AAL5 CRC error", card->index);
+			if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
+				printk(" - PDU size mismatch.\n");
+			else
+				printk(".\n");
+			atomic_inc(&vcc->stats->rx_err);
+			recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+					      NS_PRV_IOVCNT(iovb));
+			vc->rx_iov = NULL;
+			recycle_iov_buf(card, iovb);
+			return;
+		}
+
+		/* By this point we (hopefully) have a complete SDU without errors. */
+
+		if (NS_PRV_IOVCNT(iovb) == 1) {	/* Just a small buffer */
+			/* skb points to a small buffer */
+			if (!atm_charge(vcc, skb->truesize)) {
+				push_rxbufs(card, skb);
+				atomic_inc(&vcc->stats->rx_drop);
+			} else {
+				skb_put(skb, len);
+				dequeue_sm_buf(card, skb);
+#ifdef NS_USE_DESTRUCTORS
+				skb->destructor = ns_sb_destructor;
+#endif /* NS_USE_DESTRUCTORS */
+				ATM_SKB(skb)->vcc = vcc;
+				__net_timestamp(skb);
+				vcc->push(vcc, skb);
+				atomic_inc(&vcc->stats->rx);
+			}
+		} else if (NS_PRV_IOVCNT(iovb) == 2) {	/* One small plus one large buffer */
+			struct sk_buff *sb;
+
+			sb = (struct sk_buff *)(iov - 1)->iov_base;
+			/* skb points to a large buffer */
+
+			if (len <= NS_SMBUFSIZE) {
+				if (!atm_charge(vcc, sb->truesize)) {
+					push_rxbufs(card, sb);
+					atomic_inc(&vcc->stats->rx_drop);
+				} else {
+					skb_put(sb, len);
+					dequeue_sm_buf(card, sb);
+#ifdef NS_USE_DESTRUCTORS
+					sb->destructor = ns_sb_destructor;
+#endif /* NS_USE_DESTRUCTORS */
+					ATM_SKB(sb)->vcc = vcc;
+					__net_timestamp(sb);
+					vcc->push(vcc, sb);
+					atomic_inc(&vcc->stats->rx);
+				}
+
+				push_rxbufs(card, skb);
+
+			} else {	/* len > NS_SMBUFSIZE, the usual case */
+
+				if (!atm_charge(vcc, skb->truesize)) {
+					push_rxbufs(card, skb);
+					atomic_inc(&vcc->stats->rx_drop);
+				} else {
+					dequeue_lg_buf(card, skb);
+#ifdef NS_USE_DESTRUCTORS
+					skb->destructor = ns_lb_destructor;
+#endif /* NS_USE_DESTRUCTORS */
+					skb_push(skb, NS_SMBUFSIZE);
+					skb_copy_from_linear_data(sb, skb->data,
+								  NS_SMBUFSIZE);
+					skb_put(skb, len - NS_SMBUFSIZE);
+					ATM_SKB(skb)->vcc = vcc;
+					__net_timestamp(skb);
+					vcc->push(vcc, skb);
+					atomic_inc(&vcc->stats->rx);
+				}
+
+				push_rxbufs(card, sb);
+
+			}
+
+		} else {	/* Must push a huge buffer */
+
+			struct sk_buff *hb, *sb, *lb;
+			int remaining, tocopy;
+			int j;
+
+			hb = skb_dequeue(&(card->hbpool.queue));
+			if (hb == NULL) {	/* No buffers in the queue */
+
+				hb = dev_alloc_skb(NS_HBUFSIZE);
+				if (hb == NULL) {
+					printk
+					    ("nicstar%d: Out of huge buffers.\n",
+					     card->index);
+					atomic_inc(&vcc->stats->rx_drop);
+					recycle_iovec_rx_bufs(card,
+							      (struct iovec *)
+							      iovb->data,
+							      NS_PRV_IOVCNT(iovb));
+					vc->rx_iov = NULL;
+					recycle_iov_buf(card, iovb);
+					return;
+				} else if (card->hbpool.count < card->hbnr.min) {
+					struct sk_buff *new_hb;
+					if ((new_hb =
+					     dev_alloc_skb(NS_HBUFSIZE)) !=
+					    NULL) {
+						skb_queue_tail(&card->hbpool.
+							       queue, new_hb);
+						card->hbpool.count++;
+					}
+				}
+				NS_PRV_BUFTYPE(hb) = BUF_NONE;
+			} else if (--card->hbpool.count < card->hbnr.min) {
+				struct sk_buff *new_hb;
+				if ((new_hb =
+				     dev_alloc_skb(NS_HBUFSIZE)) != NULL) {
+					NS_PRV_BUFTYPE(new_hb) = BUF_NONE;
+					skb_queue_tail(&card->hbpool.queue,
+						       new_hb);
+					card->hbpool.count++;
+				}
+				if (card->hbpool.count < card->hbnr.min) {
+					if ((new_hb =
+					     dev_alloc_skb(NS_HBUFSIZE)) !=
+					    NULL) {
+						NS_PRV_BUFTYPE(new_hb) =
+						    BUF_NONE;
+						skb_queue_tail(&card->hbpool.
+							       queue, new_hb);
+						card->hbpool.count++;
+					}
+				}
+			}
+
+			iov = (struct iovec *)iovb->data;
+
+			if (!atm_charge(vcc, hb->truesize)) {
+				recycle_iovec_rx_bufs(card, iov,
+						      NS_PRV_IOVCNT(iovb));
+				if (card->hbpool.count < card->hbnr.max) {
+					skb_queue_tail(&card->hbpool.queue, hb);
+					card->hbpool.count++;
+				} else
+					dev_kfree_skb_any(hb);
+				atomic_inc(&vcc->stats->rx_drop);
+			} else {
+				/* Copy the small buffer to the huge buffer */
+				sb = (struct sk_buff *)iov->iov_base;
+				skb_copy_from_linear_data(sb, hb->data,
+							  iov->iov_len);
+				skb_put(hb, iov->iov_len);
+				remaining = len - iov->iov_len;
+				iov++;
+				/* Free the small buffer */
+				push_rxbufs(card, sb);
+
+				/* Copy all large buffers to the huge buffer and free them */
+				for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) {
+					lb = (struct sk_buff *)iov->iov_base;
+					tocopy =
+					    min_t(int, remaining, iov->iov_len);
+					skb_copy_from_linear_data(lb,
+								  skb_tail_pointer
+								  (hb), tocopy);
+					skb_put(hb, tocopy);
+					iov++;
+					remaining -= tocopy;
+					push_rxbufs(card, lb);
+				}
+#ifdef EXTRA_DEBUG
+				if (remaining != 0 || hb->len != len)
+					printk
+					    ("nicstar%d: Huge buffer len mismatch.\n",
+					     card->index);
+#endif /* EXTRA_DEBUG */
+				ATM_SKB(hb)->vcc = vcc;
+#ifdef NS_USE_DESTRUCTORS
+				hb->destructor = ns_hb_destructor;
+#endif /* NS_USE_DESTRUCTORS */
+				__net_timestamp(hb);
+				vcc->push(vcc, hb);
+				atomic_inc(&vcc->stats->rx);
+			}
+		}
+
+		vc->rx_iov = NULL;
+		recycle_iov_buf(card, iovb);
+	}
 
 }
 
-
-
 #ifdef NS_USE_DESTRUCTORS
 
 static void ns_sb_destructor(struct sk_buff *sb)
 {
-   ns_dev *card;
-   u32 stat;
+	ns_dev *card;
+	u32 stat;
 
-   card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
-   stat = readl(card->membase + STAT);
-   card->sbfqc = ns_stat_sfbqc_get(stat);   
-   card->lbfqc = ns_stat_lfbqc_get(stat);
+	card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
+	stat = readl(card->membase + STAT);
+	card->sbfqc = ns_stat_sfbqc_get(stat);
+	card->lbfqc = ns_stat_lfbqc_get(stat);
 
-   do
-   {
-      sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
-      if (sb == NULL)
-         break;
-      NS_SKB_CB(sb)->buf_type = BUF_SM;
-      skb_queue_tail(&card->sbpool.queue, sb);
-      skb_reserve(sb, NS_AAL0_HEADER);
-      push_rxbufs(card, sb);
-   } while (card->sbfqc < card->sbnr.min);
+	do {
+		sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
+		if (sb == NULL)
+			break;
+		NS_PRV_BUFTYPE(sb) = BUF_SM;
+		skb_queue_tail(&card->sbpool.queue, sb);
+		skb_reserve(sb, NS_AAL0_HEADER);
+		push_rxbufs(card, sb);
+	} while (card->sbfqc < card->sbnr.min);
 }
 
-
-
 static void ns_lb_destructor(struct sk_buff *lb)
 {
-   ns_dev *card;
-   u32 stat;
+	ns_dev *card;
+	u32 stat;
 
-   card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
-   stat = readl(card->membase + STAT);
-   card->sbfqc = ns_stat_sfbqc_get(stat);   
-   card->lbfqc = ns_stat_lfbqc_get(stat);
+	card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
+	stat = readl(card->membase + STAT);
+	card->sbfqc = ns_stat_sfbqc_get(stat);
+	card->lbfqc = ns_stat_lfbqc_get(stat);
 
-   do
-   {
-      lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
-      if (lb == NULL)
-         break;
-      NS_SKB_CB(lb)->buf_type = BUF_LG;
-      skb_queue_tail(&card->lbpool.queue, lb);
-      skb_reserve(lb, NS_SMBUFSIZE);
-      push_rxbufs(card, lb);
-   } while (card->lbfqc < card->lbnr.min);
+	do {
+		lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
+		if (lb == NULL)
+			break;
+		NS_PRV_BUFTYPE(lb) = BUF_LG;
+		skb_queue_tail(&card->lbpool.queue, lb);
+		skb_reserve(lb, NS_SMBUFSIZE);
+		push_rxbufs(card, lb);
+	} while (card->lbfqc < card->lbnr.min);
 }
 
-
-
 static void ns_hb_destructor(struct sk_buff *hb)
 {
-   ns_dev *card;
+	ns_dev *card;
 
-   card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
+	card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
 
-   while (card->hbpool.count < card->hbnr.init)
-   {
-      hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
-      if (hb == NULL)
-         break;
-      NS_SKB_CB(hb)->buf_type = BUF_NONE;
-      skb_queue_tail(&card->hbpool.queue, hb);
-      card->hbpool.count++;
-   }
+	while (card->hbpool.count < card->hbnr.init) {
+		hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
+		if (hb == NULL)
+			break;
+		NS_PRV_BUFTYPE(hb) = BUF_NONE;
+		skb_queue_tail(&card->hbpool.queue, hb);
+		card->hbpool.count++;
+	}
 }
 
 #endif /* NS_USE_DESTRUCTORS */
 
-
-static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
+static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
 {
-	struct ns_skb_cb *cb = NS_SKB_CB(skb);
-
-	if (unlikely(cb->buf_type == BUF_NONE)) {
-		printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
+	if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
+		printk("nicstar%d: What kind of rx buffer is this?\n",
+		       card->index);
 		dev_kfree_skb_any(skb);
 	} else
 		push_rxbufs(card, skb);
 }
 
-
-static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
+static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count)
 {
 	while (count-- > 0)
-		recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base);
+		recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base);
 }
 
-
-static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
+static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
 {
-   if (card->iovpool.count < card->iovnr.max)
-   {
-      skb_queue_tail(&card->iovpool.queue, iovb);
-      card->iovpool.count++;
-   }
-   else
-      dev_kfree_skb_any(iovb);
+	if (card->iovpool.count < card->iovnr.max) {
+		skb_queue_tail(&card->iovpool.queue, iovb);
+		card->iovpool.count++;
+	} else
+		dev_kfree_skb_any(iovb);
 }
 
-
-
-static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
+static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
 {
-   skb_unlink(sb, &card->sbpool.queue);
+	skb_unlink(sb, &card->sbpool.queue);
 #ifdef NS_USE_DESTRUCTORS
-   if (card->sbfqc < card->sbnr.min)
+	if (card->sbfqc < card->sbnr.min)
 #else
-   if (card->sbfqc < card->sbnr.init)
-   {
-      struct sk_buff *new_sb;
-      if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
-      {
-         NS_SKB_CB(new_sb)->buf_type = BUF_SM;
-         skb_queue_tail(&card->sbpool.queue, new_sb);
-         skb_reserve(new_sb, NS_AAL0_HEADER);
-         push_rxbufs(card, new_sb);
-      }
-   }
-   if (card->sbfqc < card->sbnr.init)
+	if (card->sbfqc < card->sbnr.init) {
+		struct sk_buff *new_sb;
+		if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
+			NS_PRV_BUFTYPE(new_sb) = BUF_SM;
+			skb_queue_tail(&card->sbpool.queue, new_sb);
+			skb_reserve(new_sb, NS_AAL0_HEADER);
+			push_rxbufs(card, new_sb);
+		}
+	}
+	if (card->sbfqc < card->sbnr.init)
 #endif /* NS_USE_DESTRUCTORS */
-   {
-      struct sk_buff *new_sb;
-      if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
-      {
-         NS_SKB_CB(new_sb)->buf_type = BUF_SM;
-         skb_queue_tail(&card->sbpool.queue, new_sb);
-         skb_reserve(new_sb, NS_AAL0_HEADER);
-         push_rxbufs(card, new_sb);
-      }
-   }
+	{
+		struct sk_buff *new_sb;
+		if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
+			NS_PRV_BUFTYPE(new_sb) = BUF_SM;
+			skb_queue_tail(&card->sbpool.queue, new_sb);
+			skb_reserve(new_sb, NS_AAL0_HEADER);
+			push_rxbufs(card, new_sb);
+		}
+	}
 }
 
-
-
-static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
+static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
 {
-   skb_unlink(lb, &card->lbpool.queue);
+	skb_unlink(lb, &card->lbpool.queue);
 #ifdef NS_USE_DESTRUCTORS
-   if (card->lbfqc < card->lbnr.min)
+	if (card->lbfqc < card->lbnr.min)
 #else
-   if (card->lbfqc < card->lbnr.init)
-   {
-      struct sk_buff *new_lb;
-      if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
-      {
-         NS_SKB_CB(new_lb)->buf_type = BUF_LG;
-         skb_queue_tail(&card->lbpool.queue, new_lb);
-         skb_reserve(new_lb, NS_SMBUFSIZE);
-         push_rxbufs(card, new_lb);
-      }
-   }
-   if (card->lbfqc < card->lbnr.init)
+	if (card->lbfqc < card->lbnr.init) {
+		struct sk_buff *new_lb;
+		if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
+			NS_PRV_BUFTYPE(new_lb) = BUF_LG;
+			skb_queue_tail(&card->lbpool.queue, new_lb);
+			skb_reserve(new_lb, NS_SMBUFSIZE);
+			push_rxbufs(card, new_lb);
+		}
+	}
+	if (card->lbfqc < card->lbnr.init)
 #endif /* NS_USE_DESTRUCTORS */
-   {
-      struct sk_buff *new_lb;
-      if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
-      {
-         NS_SKB_CB(new_lb)->buf_type = BUF_LG;
-         skb_queue_tail(&card->lbpool.queue, new_lb);
-         skb_reserve(new_lb, NS_SMBUFSIZE);
-         push_rxbufs(card, new_lb);
-      }
-   }
+	{
+		struct sk_buff *new_lb;
+		if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
+			NS_PRV_BUFTYPE(new_lb) = BUF_LG;
+			skb_queue_tail(&card->lbpool.queue, new_lb);
+			skb_reserve(new_lb, NS_SMBUFSIZE);
+			push_rxbufs(card, new_lb);
+		}
+	}
 }
 
-
-
-static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
+static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page)
 {
-   u32 stat;
-   ns_dev *card;
-   int left;
+	u32 stat;
+	ns_dev *card;
+	int left;
 
-   left = (int) *pos;
-   card = (ns_dev *) dev->dev_data;
-   stat = readl(card->membase + STAT);
-   if (!left--)
-      return sprintf(page, "Pool   count    min   init    max \n");
-   if (!left--)
-      return sprintf(page, "Small  %5d  %5d  %5d  %5d \n",
-                     ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init,
-		     card->sbnr.max);
-   if (!left--)
-      return sprintf(page, "Large  %5d  %5d  %5d  %5d \n",
-                     ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init,
-		     card->lbnr.max);
-   if (!left--)
-      return sprintf(page, "Huge   %5d  %5d  %5d  %5d \n", card->hbpool.count,
-                     card->hbnr.min, card->hbnr.init, card->hbnr.max);
-   if (!left--)
-      return sprintf(page, "Iovec  %5d  %5d  %5d  %5d \n", card->iovpool.count,
-                     card->iovnr.min, card->iovnr.init, card->iovnr.max);
-   if (!left--)
-   {
-      int retval;
-      retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt);
-      card->intcnt = 0;
-      return retval;
-   }
+	left = (int)*pos;
+	card = (ns_dev *) dev->dev_data;
+	stat = readl(card->membase + STAT);
+	if (!left--)
+		return sprintf(page, "Pool   count    min   init    max \n");
+	if (!left--)
+		return sprintf(page, "Small  %5d  %5d  %5d  %5d \n",
+			       ns_stat_sfbqc_get(stat), card->sbnr.min,
+			       card->sbnr.init, card->sbnr.max);
+	if (!left--)
+		return sprintf(page, "Large  %5d  %5d  %5d  %5d \n",
+			       ns_stat_lfbqc_get(stat), card->lbnr.min,
+			       card->lbnr.init, card->lbnr.max);
+	if (!left--)
+		return sprintf(page, "Huge   %5d  %5d  %5d  %5d \n",
+			       card->hbpool.count, card->hbnr.min,
+			       card->hbnr.init, card->hbnr.max);
+	if (!left--)
+		return sprintf(page, "Iovec  %5d  %5d  %5d  %5d \n",
+			       card->iovpool.count, card->iovnr.min,
+			       card->iovnr.init, card->iovnr.max);
+	if (!left--) {
+		int retval;
+		retval =
+		    sprintf(page, "Interrupt counter: %u \n", card->intcnt);
+		card->intcnt = 0;
+		return retval;
+	}
 #if 0
-   /* Dump 25.6 Mbps PHY registers */
-   /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
-      here just in case it's needed for debugging. */
-   if (card->max_pcr == ATM_25_PCR && !left--)
-   {
-      u32 phy_regs[4];
-      u32 i;
+	/* Dump 25.6 Mbps PHY registers */
+	/* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
+	   here just in case it's needed for debugging. */
+	if (card->max_pcr == ATM_25_PCR && !left--) {
+		u32 phy_regs[4];
+		u32 i;
 
-      for (i = 0; i < 4; i++)
-      {
-         while (CMD_BUSY(card));
-         writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD);
-         while (CMD_BUSY(card));
-         phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
-      }
+		for (i = 0; i < 4; i++) {
+			while (CMD_BUSY(card)) ;
+			writel(NS_CMD_READ_UTILITY | 0x00000200 | i,
+			       card->membase + CMD);
+			while (CMD_BUSY(card)) ;
+			phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
+		}
 
-      return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
-                     phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]);
-   }
+		return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
+			       phy_regs[0], phy_regs[1], phy_regs[2],
+			       phy_regs[3]);
+	}
 #endif /* 0 - Dump 25.6 Mbps PHY registers */
 #if 0
-   /* Dump TST */
-   if (left-- < NS_TST_NUM_ENTRIES)
-   {
-      if (card->tste2vc[left + 1] == NULL)
-         return sprintf(page, "%5d - VBR/UBR \n", left + 1);
-      else
-         return sprintf(page, "%5d - %d %d \n", left + 1,
-                        card->tste2vc[left + 1]->tx_vcc->vpi,
-                        card->tste2vc[left + 1]->tx_vcc->vci);
-   }
+	/* Dump TST */
+	if (left-- < NS_TST_NUM_ENTRIES) {
+		if (card->tste2vc[left + 1] == NULL)
+			return sprintf(page, "%5d - VBR/UBR \n", left + 1);
+		else
+			return sprintf(page, "%5d - %d %d \n", left + 1,
+				       card->tste2vc[left + 1]->tx_vcc->vpi,
+				       card->tste2vc[left + 1]->tx_vcc->vci);
+	}
 #endif /* 0 */
-   return 0;
+	return 0;
 }
 
-
-
-static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
+static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
 {
-   ns_dev *card;
-   pool_levels pl;
-   long btype;
-   unsigned long flags;
+	ns_dev *card;
+	pool_levels pl;
+	long btype;
+	unsigned long flags;
 
-   card = dev->dev_data;
-   switch (cmd)
-   {
-      case NS_GETPSTAT:
-         if (get_user(pl.buftype, &((pool_levels __user *) arg)->buftype))
-	    return -EFAULT;
-         switch (pl.buftype)
-	 {
-	    case NS_BUFTYPE_SMALL:
-	       pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT));
-	       pl.level.min = card->sbnr.min;
-	       pl.level.init = card->sbnr.init;
-	       pl.level.max = card->sbnr.max;
-	       break;
+	card = dev->dev_data;
+	switch (cmd) {
+	case NS_GETPSTAT:
+		if (get_user
+		    (pl.buftype, &((pool_levels __user *) arg)->buftype))
+			return -EFAULT;
+		switch (pl.buftype) {
+		case NS_BUFTYPE_SMALL:
+			pl.count =
+			    ns_stat_sfbqc_get(readl(card->membase + STAT));
+			pl.level.min = card->sbnr.min;
+			pl.level.init = card->sbnr.init;
+			pl.level.max = card->sbnr.max;
+			break;
 
-	    case NS_BUFTYPE_LARGE:
-	       pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT));
-	       pl.level.min = card->lbnr.min;
-	       pl.level.init = card->lbnr.init;
-	       pl.level.max = card->lbnr.max;
-	       break;
+		case NS_BUFTYPE_LARGE:
+			pl.count =
+			    ns_stat_lfbqc_get(readl(card->membase + STAT));
+			pl.level.min = card->lbnr.min;
+			pl.level.init = card->lbnr.init;
+			pl.level.max = card->lbnr.max;
+			break;
 
-	    case NS_BUFTYPE_HUGE:
-	       pl.count = card->hbpool.count;
-	       pl.level.min = card->hbnr.min;
-	       pl.level.init = card->hbnr.init;
-	       pl.level.max = card->hbnr.max;
-	       break;
+		case NS_BUFTYPE_HUGE:
+			pl.count = card->hbpool.count;
+			pl.level.min = card->hbnr.min;
+			pl.level.init = card->hbnr.init;
+			pl.level.max = card->hbnr.max;
+			break;
 
-	    case NS_BUFTYPE_IOVEC:
-	       pl.count = card->iovpool.count;
-	       pl.level.min = card->iovnr.min;
-	       pl.level.init = card->iovnr.init;
-	       pl.level.max = card->iovnr.max;
-	       break;
+		case NS_BUFTYPE_IOVEC:
+			pl.count = card->iovpool.count;
+			pl.level.min = card->iovnr.min;
+			pl.level.init = card->iovnr.init;
+			pl.level.max = card->iovnr.max;
+			break;
 
-            default:
-	       return -ENOIOCTLCMD;
+		default:
+			return -ENOIOCTLCMD;
 
-	 }
-         if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl)))
-	    return (sizeof(pl));
-	 else
-	    return -EFAULT;
+		}
+		if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl)))
+			return (sizeof(pl));
+		else
+			return -EFAULT;
 
-      case NS_SETBUFLEV:
-         if (!capable(CAP_NET_ADMIN))
-	    return -EPERM;
-         if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl)))
-	    return -EFAULT;
-	 if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max)
-	    return -EINVAL;
-	 if (pl.level.min == 0)
-	    return -EINVAL;
-         switch (pl.buftype)
-	 {
-	    case NS_BUFTYPE_SMALL:
-               if (pl.level.max > TOP_SB)
-	          return -EINVAL;
-	       card->sbnr.min = pl.level.min;
-	       card->sbnr.init = pl.level.init;
-	       card->sbnr.max = pl.level.max;
-	       break;
+	case NS_SETBUFLEV:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl)))
+			return -EFAULT;
+		if (pl.level.min >= pl.level.init
+		    || pl.level.init >= pl.level.max)
+			return -EINVAL;
+		if (pl.level.min == 0)
+			return -EINVAL;
+		switch (pl.buftype) {
+		case NS_BUFTYPE_SMALL:
+			if (pl.level.max > TOP_SB)
+				return -EINVAL;
+			card->sbnr.min = pl.level.min;
+			card->sbnr.init = pl.level.init;
+			card->sbnr.max = pl.level.max;
+			break;
 
-	    case NS_BUFTYPE_LARGE:
-               if (pl.level.max > TOP_LB)
-	          return -EINVAL;
-	       card->lbnr.min = pl.level.min;
-	       card->lbnr.init = pl.level.init;
-	       card->lbnr.max = pl.level.max;
-	       break;
+		case NS_BUFTYPE_LARGE:
+			if (pl.level.max > TOP_LB)
+				return -EINVAL;
+			card->lbnr.min = pl.level.min;
+			card->lbnr.init = pl.level.init;
+			card->lbnr.max = pl.level.max;
+			break;
 
-	    case NS_BUFTYPE_HUGE:
-               if (pl.level.max > TOP_HB)
-	          return -EINVAL;
-	       card->hbnr.min = pl.level.min;
-	       card->hbnr.init = pl.level.init;
-	       card->hbnr.max = pl.level.max;
-	       break;
+		case NS_BUFTYPE_HUGE:
+			if (pl.level.max > TOP_HB)
+				return -EINVAL;
+			card->hbnr.min = pl.level.min;
+			card->hbnr.init = pl.level.init;
+			card->hbnr.max = pl.level.max;
+			break;
 
-	    case NS_BUFTYPE_IOVEC:
-               if (pl.level.max > TOP_IOVB)
-	          return -EINVAL;
-	       card->iovnr.min = pl.level.min;
-	       card->iovnr.init = pl.level.init;
-	       card->iovnr.max = pl.level.max;
-	       break;
+		case NS_BUFTYPE_IOVEC:
+			if (pl.level.max > TOP_IOVB)
+				return -EINVAL;
+			card->iovnr.min = pl.level.min;
+			card->iovnr.init = pl.level.init;
+			card->iovnr.max = pl.level.max;
+			break;
 
-            default:
-	       return -EINVAL;
+		default:
+			return -EINVAL;
 
-         }	 
-         return 0;
+		}
+		return 0;
 
-      case NS_ADJBUFLEV:
-         if (!capable(CAP_NET_ADMIN))
-	    return -EPERM;
-         btype = (long) arg;	/* a long is the same size as a pointer or bigger */
-         switch (btype)
-	 {
-	    case NS_BUFTYPE_SMALL:
-	       while (card->sbfqc < card->sbnr.init)
-	       {
-                  struct sk_buff *sb;
+	case NS_ADJBUFLEV:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		btype = (long)arg;	/* a long is the same size as a pointer or bigger */
+		switch (btype) {
+		case NS_BUFTYPE_SMALL:
+			while (card->sbfqc < card->sbnr.init) {
+				struct sk_buff *sb;
 
-                  sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
-                  if (sb == NULL)
-                     return -ENOMEM;
-                  NS_SKB_CB(sb)->buf_type = BUF_SM;
-                  skb_queue_tail(&card->sbpool.queue, sb);
-                  skb_reserve(sb, NS_AAL0_HEADER);
-                  push_rxbufs(card, sb);
-	       }
-	       break;
+				sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
+				if (sb == NULL)
+					return -ENOMEM;
+				NS_PRV_BUFTYPE(sb) = BUF_SM;
+				skb_queue_tail(&card->sbpool.queue, sb);
+				skb_reserve(sb, NS_AAL0_HEADER);
+				push_rxbufs(card, sb);
+			}
+			break;
 
-            case NS_BUFTYPE_LARGE:
-	       while (card->lbfqc < card->lbnr.init)
-	       {
-                  struct sk_buff *lb;
+		case NS_BUFTYPE_LARGE:
+			while (card->lbfqc < card->lbnr.init) {
+				struct sk_buff *lb;
 
-                  lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
-                  if (lb == NULL)
-                     return -ENOMEM;
-                  NS_SKB_CB(lb)->buf_type = BUF_LG;
-                  skb_queue_tail(&card->lbpool.queue, lb);
-                  skb_reserve(lb, NS_SMBUFSIZE);
-                  push_rxbufs(card, lb);
-	       }
-	       break;
+				lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
+				if (lb == NULL)
+					return -ENOMEM;
+				NS_PRV_BUFTYPE(lb) = BUF_LG;
+				skb_queue_tail(&card->lbpool.queue, lb);
+				skb_reserve(lb, NS_SMBUFSIZE);
+				push_rxbufs(card, lb);
+			}
+			break;
 
-            case NS_BUFTYPE_HUGE:
-               while (card->hbpool.count > card->hbnr.init)
-	       {
-                  struct sk_buff *hb;
+		case NS_BUFTYPE_HUGE:
+			while (card->hbpool.count > card->hbnr.init) {
+				struct sk_buff *hb;
 
-                  spin_lock_irqsave(&card->int_lock, flags);
-		  hb = skb_dequeue(&card->hbpool.queue);
-		  card->hbpool.count--;
-                  spin_unlock_irqrestore(&card->int_lock, flags);
-                  if (hb == NULL)
-		     printk("nicstar%d: huge buffer count inconsistent.\n",
-		            card->index);
-                  else
-		     dev_kfree_skb_any(hb);
-		  
-	       }
-               while (card->hbpool.count < card->hbnr.init)
-               {
-                  struct sk_buff *hb;
+				spin_lock_irqsave(&card->int_lock, flags);
+				hb = skb_dequeue(&card->hbpool.queue);
+				card->hbpool.count--;
+				spin_unlock_irqrestore(&card->int_lock, flags);
+				if (hb == NULL)
+					printk
+					    ("nicstar%d: huge buffer count inconsistent.\n",
+					     card->index);
+				else
+					dev_kfree_skb_any(hb);
 
-                  hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
-                  if (hb == NULL)
-                     return -ENOMEM;
-                  NS_SKB_CB(hb)->buf_type = BUF_NONE;
-                  spin_lock_irqsave(&card->int_lock, flags);
-                  skb_queue_tail(&card->hbpool.queue, hb);
-                  card->hbpool.count++;
-                  spin_unlock_irqrestore(&card->int_lock, flags);
-               }
-	       break;
+			}
+			while (card->hbpool.count < card->hbnr.init) {
+				struct sk_buff *hb;
 
-            case NS_BUFTYPE_IOVEC:
-	       while (card->iovpool.count > card->iovnr.init)
-	       {
-	          struct sk_buff *iovb;
+				hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
+				if (hb == NULL)
+					return -ENOMEM;
+				NS_PRV_BUFTYPE(hb) = BUF_NONE;
+				spin_lock_irqsave(&card->int_lock, flags);
+				skb_queue_tail(&card->hbpool.queue, hb);
+				card->hbpool.count++;
+				spin_unlock_irqrestore(&card->int_lock, flags);
+			}
+			break;
 
-                  spin_lock_irqsave(&card->int_lock, flags);
-		  iovb = skb_dequeue(&card->iovpool.queue);
-		  card->iovpool.count--;
-                  spin_unlock_irqrestore(&card->int_lock, flags);
-                  if (iovb == NULL)
-		     printk("nicstar%d: iovec buffer count inconsistent.\n",
-		            card->index);
-                  else
-		     dev_kfree_skb_any(iovb);
+		case NS_BUFTYPE_IOVEC:
+			while (card->iovpool.count > card->iovnr.init) {
+				struct sk_buff *iovb;
 
-	       }
-               while (card->iovpool.count < card->iovnr.init)
-	       {
-	          struct sk_buff *iovb;
+				spin_lock_irqsave(&card->int_lock, flags);
+				iovb = skb_dequeue(&card->iovpool.queue);
+				card->iovpool.count--;
+				spin_unlock_irqrestore(&card->int_lock, flags);
+				if (iovb == NULL)
+					printk
+					    ("nicstar%d: iovec buffer count inconsistent.\n",
+					     card->index);
+				else
+					dev_kfree_skb_any(iovb);
 
-                  iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
-                  if (iovb == NULL)
-                     return -ENOMEM;
-                  NS_SKB_CB(iovb)->buf_type = BUF_NONE;
-                  spin_lock_irqsave(&card->int_lock, flags);
-                  skb_queue_tail(&card->iovpool.queue, iovb);
-                  card->iovpool.count++;
-                  spin_unlock_irqrestore(&card->int_lock, flags);
-	       }
-	       break;
+			}
+			while (card->iovpool.count < card->iovnr.init) {
+				struct sk_buff *iovb;
 
-            default:
-	       return -EINVAL;
+				iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
+				if (iovb == NULL)
+					return -ENOMEM;
+				NS_PRV_BUFTYPE(iovb) = BUF_NONE;
+				spin_lock_irqsave(&card->int_lock, flags);
+				skb_queue_tail(&card->iovpool.queue, iovb);
+				card->iovpool.count++;
+				spin_unlock_irqrestore(&card->int_lock, flags);
+			}
+			break;
 
-	 }
-         return 0;
+		default:
+			return -EINVAL;
 
-      default:
-         if (dev->phy && dev->phy->ioctl) {
-            return dev->phy->ioctl(dev, cmd, arg);
-         }
-         else {
-            printk("nicstar%d: %s == NULL \n", card->index,
-                   dev->phy ? "dev->phy->ioctl" : "dev->phy");
-            return -ENOIOCTLCMD;
-         }
-   }
+		}
+		return 0;
+
+	default:
+		if (dev->phy && dev->phy->ioctl) {
+			return dev->phy->ioctl(dev, cmd, arg);
+		} else {
+			printk("nicstar%d: %s == NULL \n", card->index,
+			       dev->phy ? "dev->phy->ioctl" : "dev->phy");
+			return -ENOIOCTLCMD;
+		}
+	}
 }
 
-
-static void which_list(ns_dev *card, struct sk_buff *skb)
+#ifdef EXTRA_DEBUG
+static void which_list(ns_dev * card, struct sk_buff *skb)
 {
-	printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type);
+	printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb));
 }
-
+#endif /* EXTRA_DEBUG */
 
 static void ns_poll(unsigned long arg)
 {
-   int i;
-   ns_dev *card;
-   unsigned long flags;
-   u32 stat_r, stat_w;
+	int i;
+	ns_dev *card;
+	unsigned long flags;
+	u32 stat_r, stat_w;
 
-   PRINTK("nicstar: Entering ns_poll().\n");
-   for (i = 0; i < num_cards; i++)
-   {
-      card = cards[i];
-      if (spin_is_locked(&card->int_lock)) {
-      /* Probably it isn't worth spinning */
-         continue;
-      }
-      spin_lock_irqsave(&card->int_lock, flags);
+	PRINTK("nicstar: Entering ns_poll().\n");
+	for (i = 0; i < num_cards; i++) {
+		card = cards[i];
+		if (spin_is_locked(&card->int_lock)) {
+			/* Probably it isn't worth spinning */
+			continue;
+		}
+		spin_lock_irqsave(&card->int_lock, flags);
 
-      stat_w = 0;
-      stat_r = readl(card->membase + STAT);
-      if (stat_r & NS_STAT_TSIF)
-         stat_w |= NS_STAT_TSIF;
-      if (stat_r & NS_STAT_EOPDU)
-         stat_w |= NS_STAT_EOPDU;
+		stat_w = 0;
+		stat_r = readl(card->membase + STAT);
+		if (stat_r & NS_STAT_TSIF)
+			stat_w |= NS_STAT_TSIF;
+		if (stat_r & NS_STAT_EOPDU)
+			stat_w |= NS_STAT_EOPDU;
 
-      process_tsq(card);
-      process_rsq(card);
+		process_tsq(card);
+		process_rsq(card);
 
-      writel(stat_w, card->membase + STAT);
-      spin_unlock_irqrestore(&card->int_lock, flags);
-   }
-   mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
-   PRINTK("nicstar: Leaving ns_poll().\n");
+		writel(stat_w, card->membase + STAT);
+		spin_unlock_irqrestore(&card->int_lock, flags);
+	}
+	mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
+	PRINTK("nicstar: Leaving ns_poll().\n");
 }
 
-
-
 static int ns_parse_mac(char *mac, unsigned char *esi)
 {
-   int i, j;
-   short byte1, byte0;
+	int i, j;
+	short byte1, byte0;
 
-   if (mac == NULL || esi == NULL)
-      return -1;
-   j = 0;
-   for (i = 0; i < 6; i++)
-   {
-      if ((byte1 = ns_h2i(mac[j++])) < 0)
-         return -1;
-      if ((byte0 = ns_h2i(mac[j++])) < 0)
-         return -1;
-      esi[i] = (unsigned char) (byte1 * 16 + byte0);
-      if (i < 5)
-      {
-         if (mac[j++] != ':')
-            return -1;
-      }
-   }
-   return 0;
+	if (mac == NULL || esi == NULL)
+		return -1;
+	j = 0;
+	for (i = 0; i < 6; i++) {
+		if ((byte1 = ns_h2i(mac[j++])) < 0)
+			return -1;
+		if ((byte0 = ns_h2i(mac[j++])) < 0)
+			return -1;
+		esi[i] = (unsigned char)(byte1 * 16 + byte0);
+		if (i < 5) {
+			if (mac[j++] != ':')
+				return -1;
+		}
+	}
+	return 0;
 }
 
-
-
 static short ns_h2i(char c)
 {
-   if (c >= '0' && c <= '9')
-      return (short) (c - '0');
-   if (c >= 'A' && c <= 'F')
-      return (short) (c - 'A' + 10);
-   if (c >= 'a' && c <= 'f')
-      return (short) (c - 'a' + 10);
-   return -1;
+	if (c >= '0' && c <= '9')
+		return (short)(c - '0');
+	if (c >= 'A' && c <= 'F')
+		return (short)(c - 'A' + 10);
+	if (c >= 'a' && c <= 'f')
+		return (short)(c - 'a' + 10);
+	return -1;
 }
 
-
-
 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
-                    unsigned long addr)
+		       unsigned long addr)
 {
-   ns_dev *card;
-   unsigned long flags;
+	ns_dev *card;
+	unsigned long flags;
 
-   card = dev->dev_data;
-   spin_lock_irqsave(&card->res_lock, flags);
-   while(CMD_BUSY(card));
-   writel((unsigned long) value, card->membase + DR0);
-   writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
-          card->membase + CMD);
-   spin_unlock_irqrestore(&card->res_lock, flags);
+	card = dev->dev_data;
+	spin_lock_irqsave(&card->res_lock, flags);
+	while (CMD_BUSY(card)) ;
+	writel((u32) value, card->membase + DR0);
+	writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
+	       card->membase + CMD);
+	spin_unlock_irqrestore(&card->res_lock, flags);
 }
 
-
-
 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
 {
-   ns_dev *card;
-   unsigned long flags;
-   unsigned long data;
+	ns_dev *card;
+	unsigned long flags;
+	u32 data;
 
-   card = dev->dev_data;
-   spin_lock_irqsave(&card->res_lock, flags);
-   while(CMD_BUSY(card));
-   writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
-          card->membase + CMD);
-   while(CMD_BUSY(card));
-   data = readl(card->membase + DR0) & 0x000000FF;
-   spin_unlock_irqrestore(&card->res_lock, flags);
-   return (unsigned char) data;
+	card = dev->dev_data;
+	spin_lock_irqsave(&card->res_lock, flags);
+	while (CMD_BUSY(card)) ;
+	writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
+	       card->membase + CMD);
+	while (CMD_BUSY(card)) ;
+	data = readl(card->membase + DR0) & 0x000000FF;
+	spin_unlock_irqrestore(&card->res_lock, flags);
+	return (unsigned char)data;
 }
 
-
-
 module_init(nicstar_init);
 module_exit(nicstar_cleanup);
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h
index 6010e3d..9bc27ea 100644
--- a/drivers/atm/nicstar.h
+++ b/drivers/atm/nicstar.h
@@ -1,5 +1,4 @@
-/******************************************************************************
- *
+/*
  * nicstar.h
  *
  * Header file for the nicstar device driver.
@@ -8,29 +7,26 @@
  * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999
  *
  * (C) INESC 1998
- *
- ******************************************************************************/
-
+ */
 
 #ifndef _LINUX_NICSTAR_H_
 #define _LINUX_NICSTAR_H_
 
-
-/* Includes *******************************************************************/
+/* Includes */
 
 #include <linux/types.h>
 #include <linux/pci.h>
+#include <linux/idr.h>
 #include <linux/uio.h>
 #include <linux/skbuff.h>
 #include <linux/atmdev.h>
 #include <linux/atm_nicstar.h>
 
-
-/* Options ********************************************************************/
+/* Options */
 
 #define NS_MAX_CARDS 4		/* Maximum number of NICStAR based cards
 				   controlled by the device driver. Must
-                                   be <= 5 */
+				   be <= 5 */
 
 #undef RCQ_SUPPORT		/* Do not define this for now */
 
@@ -43,7 +39,7 @@
 #define NS_VPIBITS 2		/* 0, 1, 2, or 8 */
 
 #define NS_MAX_RCTSIZE 4096	/* Number of entries. 4096 or 16384.
-                                   Define 4096 only if (all) your card(s)
+				   Define 4096 only if (all) your card(s)
 				   have 32K x 32bit SRAM, in which case
 				   setting this to 16384 will just waste a
 				   lot of memory.
@@ -51,33 +47,32 @@
 				   128K x 32bit SRAM will limit the maximum
 				   VCI. */
 
-/*#define NS_PCI_LATENCY 64*/	/* Must be a multiple of 32 */
+				/*#define NS_PCI_LATENCY 64*//* Must be a multiple of 32 */
 
 	/* Number of buffers initially allocated */
-#define NUM_SB 32	/* Must be even */
-#define NUM_LB 24	/* Must be even */
-#define NUM_HB 8	/* Pre-allocated huge buffers */
-#define NUM_IOVB 48	/* Iovec buffers */
+#define NUM_SB 32		/* Must be even */
+#define NUM_LB 24		/* Must be even */
+#define NUM_HB 8		/* Pre-allocated huge buffers */
+#define NUM_IOVB 48		/* Iovec buffers */
 
 	/* Lower level for count of buffers */
-#define MIN_SB 8	/* Must be even */
-#define MIN_LB 8	/* Must be even */
+#define MIN_SB 8		/* Must be even */
+#define MIN_LB 8		/* Must be even */
 #define MIN_HB 6
 #define MIN_IOVB 8
 
 	/* Upper level for count of buffers */
-#define MAX_SB 64	/* Must be even, <= 508 */
-#define MAX_LB 48	/* Must be even, <= 508 */
+#define MAX_SB 64		/* Must be even, <= 508 */
+#define MAX_LB 48		/* Must be even, <= 508 */
 #define MAX_HB 10
 #define MAX_IOVB 80
 
 	/* These are the absolute maximum allowed for the ioctl() */
-#define TOP_SB 256	/* Must be even, <= 508 */
-#define TOP_LB 128	/* Must be even, <= 508 */
+#define TOP_SB 256		/* Must be even, <= 508 */
+#define TOP_LB 128		/* Must be even, <= 508 */
 #define TOP_HB 64
 #define TOP_IOVB 256
 
-
 #define MAX_TBD_PER_VC 1	/* Number of TBDs before a TSR */
 #define MAX_TBD_PER_SCQ 10	/* Only meaningful for variable rate SCQs */
 
@@ -89,15 +84,12 @@
 
 #define PCR_TOLERANCE (1.0001)
 
-
-
-/* ESI stuff ******************************************************************/
+/* ESI stuff */
 
 #define NICSTAR_EPROM_MAC_ADDR_OFFSET 0x6C
 #define NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT 0xF6
 
-
-/* #defines *******************************************************************/
+/* #defines */
 
 #define NS_IOREMAP_SIZE 4096
 
@@ -123,22 +115,19 @@
 #define NS_SMSKBSIZE (NS_SMBUFSIZE + NS_AAL0_HEADER)
 #define NS_LGSKBSIZE (NS_SMBUFSIZE + NS_LGBUFSIZE)
 
+/* NICStAR structures located in host memory */
 
-/* NICStAR structures located in host memory **********************************/
-
-
-
-/* RSQ - Receive Status Queue 
+/*
+ * RSQ - Receive Status Queue
  *
  * Written by the NICStAR, read by the device driver.
  */
 
-typedef struct ns_rsqe
-{
-   u32 word_1;
-   u32 buffer_handle;
-   u32 final_aal5_crc32;
-   u32 word_4;
+typedef struct ns_rsqe {
+	u32 word_1;
+	u32 buffer_handle;
+	u32 final_aal5_crc32;
+	u32 word_4;
 } ns_rsqe;
 
 #define ns_rsqe_vpi(ns_rsqep) \
@@ -175,30 +164,27 @@
 #define ns_rsqe_cellcount(ns_rsqep) \
         (le32_to_cpu((ns_rsqep)->word_4) & 0x000001FF)
 #define ns_rsqe_init(ns_rsqep) \
-        ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000)) 
+        ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000))
 
 #define NS_RSQ_NUM_ENTRIES (NS_RSQSIZE / 16)
 #define NS_RSQ_ALIGNMENT NS_RSQSIZE
 
-
-
-/* RCQ - Raw Cell Queue
+/*
+ * RCQ - Raw Cell Queue
  *
  * Written by the NICStAR, read by the device driver.
  */
 
-typedef struct cell_payload
-{
-   u32 word[12];
+typedef struct cell_payload {
+	u32 word[12];
 } cell_payload;
 
-typedef struct ns_rcqe
-{
-   u32 word_1;
-   u32 word_2;
-   u32 word_3;
-   u32 word_4;
-   cell_payload payload;
+typedef struct ns_rcqe {
+	u32 word_1;
+	u32 word_2;
+	u32 word_3;
+	u32 word_4;
+	cell_payload payload;
 } ns_rcqe;
 
 #define NS_RCQE_SIZE 64		/* bytes */
@@ -210,28 +196,25 @@
 #define ns_rcqe_nextbufhandle(ns_rcqep) \
         (le32_to_cpu((ns_rcqep)->word_2))
 
-
-
-/* SCQ - Segmentation Channel Queue 
+/*
+ * SCQ - Segmentation Channel Queue
  *
  * Written by the device driver, read by the NICStAR.
  */
 
-typedef struct ns_scqe
-{
-   u32 word_1;
-   u32 word_2;
-   u32 word_3;
-   u32 word_4;
+typedef struct ns_scqe {
+	u32 word_1;
+	u32 word_2;
+	u32 word_3;
+	u32 word_4;
 } ns_scqe;
 
    /* NOTE: SCQ entries can be either a TBD (Transmit Buffer Descriptors)
-            or TSR (Transmit Status Requests) */
+      or TSR (Transmit Status Requests) */
 
 #define NS_SCQE_TYPE_TBD 0x00000000
 #define NS_SCQE_TYPE_TSR 0x80000000
 
-
 #define NS_TBD_EOPDU 0x40000000
 #define NS_TBD_AAL0  0x00000000
 #define NS_TBD_AAL34 0x04000000
@@ -253,10 +236,9 @@
 #define ns_tbd_mkword_4(gfc, vpi, vci, pt, clp) \
       (cpu_to_le32((gfc) << 28 | (vpi) << 20 | (vci) << 4 | (pt) << 1 | (clp)))
 
-
 #define NS_TSR_INTENABLE 0x20000000
 
-#define NS_TSR_SCDISVBR 0xFFFF		/* Use as scdi for VBR SCD */
+#define NS_TSR_SCDISVBR 0xFFFF	/* Use as scdi for VBR SCD */
 
 #define ns_tsr_mkword_1(flags) \
         (cpu_to_le32(NS_SCQE_TYPE_TSR | (flags)))
@@ -273,22 +255,20 @@
 
 #define NS_SCQE_SIZE 16
 
-
-
-/* TSQ - Transmit Status Queue
+/*
+ * TSQ - Transmit Status Queue
  *
  * Written by the NICStAR, read by the device driver.
  */
 
-typedef struct ns_tsi
-{
-   u32 word_1;
-   u32 word_2;
+typedef struct ns_tsi {
+	u32 word_1;
+	u32 word_2;
 } ns_tsi;
 
    /* NOTE: The first word can be a status word copied from the TSR which
-            originated the TSI, or a timer overflow indicator. In this last
-	    case, the value of the first word is all zeroes. */
+      originated the TSI, or a timer overflow indicator. In this last
+      case, the value of the first word is all zeroes. */
 
 #define NS_TSI_EMPTY          0x80000000
 #define NS_TSI_TIMESTAMP_MASK 0x00FFFFFF
@@ -301,12 +281,10 @@
 #define ns_tsi_init(ns_tsip) \
         ((ns_tsip)->word_2 = cpu_to_le32(NS_TSI_EMPTY))
 
-
 #define NS_TSQSIZE 8192
 #define NS_TSQ_NUM_ENTRIES 1024
 #define NS_TSQ_ALIGNMENT 8192
 
-
 #define NS_TSI_SCDISVBR NS_TSR_SCDISVBR
 
 #define ns_tsi_tmrof(ns_tsip) \
@@ -316,26 +294,22 @@
 #define ns_tsi_getscqpos(ns_tsip) \
         (le32_to_cpu((ns_tsip)->word_1) & 0x00007FFF)
 
+/* NICStAR structures located in local SRAM */
 
-
-/* NICStAR structures located in local SRAM ***********************************/
-
-
-
-/* RCT - Receive Connection Table
+/*
+ * RCT - Receive Connection Table
  *
  * Written by both the NICStAR and the device driver.
  */
 
-typedef struct ns_rcte
-{
-   u32 word_1;
-   u32 buffer_handle;
-   u32 dma_address;
-   u32 aal5_crc32;
+typedef struct ns_rcte {
+	u32 word_1;
+	u32 buffer_handle;
+	u32 dma_address;
+	u32 aal5_crc32;
 } ns_rcte;
 
-#define NS_RCTE_BSFB            0x00200000  /* Rev. D only */
+#define NS_RCTE_BSFB            0x00200000	/* Rev. D only */
 #define NS_RCTE_NZGFC           0x00100000
 #define NS_RCTE_CONNECTOPEN     0x00080000
 #define NS_RCTE_AALMASK         0x00070000
@@ -358,25 +332,21 @@
 #define NS_RCT_ENTRY_SIZE 4	/* Number of dwords */
 
    /* NOTE: We could make macros to contruct the first word of the RCTE,
-            but that doesn't seem to make much sense... */
+      but that doesn't seem to make much sense... */
 
-
-
-/* FBD - Free Buffer Descriptor
+/*
+ * FBD - Free Buffer Descriptor
  *
  * Written by the device driver using via the command register.
  */
 
-typedef struct ns_fbd
-{
-   u32 buffer_handle;
-   u32 dma_address;
+typedef struct ns_fbd {
+	u32 buffer_handle;
+	u32 dma_address;
 } ns_fbd;
 
-
-
-
-/* TST - Transmit Schedule Table
+/*
+ * TST - Transmit Schedule Table
  *
  * Written by the device driver.
  */
@@ -385,40 +355,38 @@
 
 #define NS_TST_OPCODE_MASK 0x60000000
 
-#define NS_TST_OPCODE_NULL     0x00000000 /* Insert null cell */
-#define NS_TST_OPCODE_FIXED    0x20000000 /* Cell from a fixed rate channel */
+#define NS_TST_OPCODE_NULL     0x00000000	/* Insert null cell */
+#define NS_TST_OPCODE_FIXED    0x20000000	/* Cell from a fixed rate channel */
 #define NS_TST_OPCODE_VARIABLE 0x40000000
-#define NS_TST_OPCODE_END      0x60000000 /* Jump */
+#define NS_TST_OPCODE_END      0x60000000	/* Jump */
 
 #define ns_tste_make(opcode, sramad) (opcode | sramad)
 
    /* NOTE:
 
       - When the opcode is FIXED, sramad specifies the SRAM address of the
-        SCD for that fixed rate channel.
+      SCD for that fixed rate channel.
       - When the opcode is END, sramad specifies the SRAM address of the
-        location of the next TST entry to read.
+      location of the next TST entry to read.
     */
 
-
-
-/* SCD - Segmentation Channel Descriptor
+/*
+ * SCD - Segmentation Channel Descriptor
  *
  * Written by both the device driver and the NICStAR
  */
 
-typedef struct ns_scd
-{
-   u32 word_1;
-   u32 word_2;
-   u32 partial_aal5_crc;
-   u32 reserved;
-   ns_scqe cache_a;
-   ns_scqe cache_b;
+typedef struct ns_scd {
+	u32 word_1;
+	u32 word_2;
+	u32 partial_aal5_crc;
+	u32 reserved;
+	ns_scqe cache_a;
+	ns_scqe cache_b;
 } ns_scd;
 
-#define NS_SCD_BASE_MASK_VAR 0xFFFFE000		/* Variable rate */
-#define NS_SCD_BASE_MASK_FIX 0xFFFFFC00		/* Fixed rate */
+#define NS_SCD_BASE_MASK_VAR 0xFFFFE000	/* Variable rate */
+#define NS_SCD_BASE_MASK_FIX 0xFFFFFC00	/* Fixed rate */
 #define NS_SCD_TAIL_MASK_VAR 0x00001FF0
 #define NS_SCD_TAIL_MASK_FIX 0x000003F0
 #define NS_SCD_HEAD_MASK_VAR 0x00001FF0
@@ -426,13 +394,9 @@
 #define NS_SCD_XMITFOREVER   0x02000000
 
    /* NOTE: There are other fields in word 2 of the SCD, but as they should
-            not be needed in the device driver they are not defined here. */
+      not be needed in the device driver they are not defined here. */
 
-
-
-
-/* NICStAR local SRAM memory map **********************************************/
-
+/* NICStAR local SRAM memory map */
 
 #define NS_RCT           0x00000
 #define NS_RCT_32_END    0x03FFF
@@ -455,100 +419,93 @@
 #define NS_LGFBQ         0x1FC00
 #define NS_LGFBQ_END     0x1FFFF
 
-
-
-/* NISCtAR operation registers ************************************************/
-
+/* NISCtAR operation registers */
 
 /* See Section 3.4 of `IDT77211 NICStAR User Manual' from www.idt.com */
 
-enum ns_regs
-{
-   DR0   = 0x00,      /* Data Register 0 R/W*/
-   DR1   = 0x04,      /* Data Register 1 W */
-   DR2   = 0x08,      /* Data Register 2 W */
-   DR3   = 0x0C,      /* Data Register 3 W */
-   CMD   = 0x10,      /* Command W */
-   CFG   = 0x14,      /* Configuration R/W */
-   STAT  = 0x18,      /* Status R/W */
-   RSQB  = 0x1C,      /* Receive Status Queue Base W */
-   RSQT  = 0x20,      /* Receive Status Queue Tail R */
-   RSQH  = 0x24,      /* Receive Status Queue Head W */
-   CDC   = 0x28,      /* Cell Drop Counter R/clear */
-   VPEC  = 0x2C,      /* VPI/VCI Lookup Error Count R/clear */
-   ICC   = 0x30,      /* Invalid Cell Count R/clear */
-   RAWCT = 0x34,      /* Raw Cell Tail R */
-   TMR   = 0x38,      /* Timer R */
-   TSTB  = 0x3C,      /* Transmit Schedule Table Base R/W */
-   TSQB  = 0x40,      /* Transmit Status Queue Base W */
-   TSQT  = 0x44,      /* Transmit Status Queue Tail R */
-   TSQH  = 0x48,      /* Transmit Status Queue Head W */
-   GP    = 0x4C,      /* General Purpose R/W */
-   VPM   = 0x50       /* VPI/VCI Mask W */
+enum ns_regs {
+	DR0 = 0x00,		/* Data Register 0 R/W */
+	DR1 = 0x04,		/* Data Register 1 W */
+	DR2 = 0x08,		/* Data Register 2 W */
+	DR3 = 0x0C,		/* Data Register 3 W */
+	CMD = 0x10,		/* Command W */
+	CFG = 0x14,		/* Configuration R/W */
+	STAT = 0x18,		/* Status R/W */
+	RSQB = 0x1C,		/* Receive Status Queue Base W */
+	RSQT = 0x20,		/* Receive Status Queue Tail R */
+	RSQH = 0x24,		/* Receive Status Queue Head W */
+	CDC = 0x28,		/* Cell Drop Counter R/clear */
+	VPEC = 0x2C,		/* VPI/VCI Lookup Error Count R/clear */
+	ICC = 0x30,		/* Invalid Cell Count R/clear */
+	RAWCT = 0x34,		/* Raw Cell Tail R */
+	TMR = 0x38,		/* Timer R */
+	TSTB = 0x3C,		/* Transmit Schedule Table Base R/W */
+	TSQB = 0x40,		/* Transmit Status Queue Base W */
+	TSQT = 0x44,		/* Transmit Status Queue Tail R */
+	TSQH = 0x48,		/* Transmit Status Queue Head W */
+	GP = 0x4C,		/* General Purpose R/W */
+	VPM = 0x50		/* VPI/VCI Mask W */
 };
 
-
-/* NICStAR commands issued to the CMD register ********************************/
-
+/* NICStAR commands issued to the CMD register */
 
 /* Top 4 bits are command opcode, lower 28 are parameters. */
 
 #define NS_CMD_NO_OPERATION         0x00000000
-        /* params always 0 */
+	/* params always 0 */
 
 #define NS_CMD_OPENCLOSE_CONNECTION 0x20000000
-        /* b19{1=open,0=close} b18-2{SRAM addr} */
+	/* b19{1=open,0=close} b18-2{SRAM addr} */
 
 #define NS_CMD_WRITE_SRAM           0x40000000
-        /* b18-2{SRAM addr} b1-0{burst size} */
+	/* b18-2{SRAM addr} b1-0{burst size} */
 
 #define NS_CMD_READ_SRAM            0x50000000
-        /* b18-2{SRAM addr} */
+	/* b18-2{SRAM addr} */
 
 #define NS_CMD_WRITE_FREEBUFQ       0x60000000
-        /* b0{large buf indicator} */
+	/* b0{large buf indicator} */
 
 #define NS_CMD_READ_UTILITY         0x80000000
-        /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */
+	/* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */
 
 #define NS_CMD_WRITE_UTILITY        0x90000000
-        /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */
+	/* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */
 
 #define NS_CMD_OPEN_CONNECTION (NS_CMD_OPENCLOSE_CONNECTION | 0x00080000)
 #define NS_CMD_CLOSE_CONNECTION NS_CMD_OPENCLOSE_CONNECTION
 
+/* NICStAR configuration bits */
 
-/* NICStAR configuration bits *************************************************/
-
-#define NS_CFG_SWRST          0x80000000    /* Software Reset */
-#define NS_CFG_RXPATH         0x20000000    /* Receive Path Enable */
-#define NS_CFG_SMBUFSIZE_MASK 0x18000000    /* Small Receive Buffer Size */
-#define NS_CFG_LGBUFSIZE_MASK 0x06000000    /* Large Receive Buffer Size */
-#define NS_CFG_EFBIE          0x01000000    /* Empty Free Buffer Queue
-                                               Interrupt Enable */
-#define NS_CFG_RSQSIZE_MASK   0x00C00000    /* Receive Status Queue Size */
-#define NS_CFG_ICACCEPT       0x00200000    /* Invalid Cell Accept */
-#define NS_CFG_IGNOREGFC      0x00100000    /* Ignore General Flow Control */
-#define NS_CFG_VPIBITS_MASK   0x000C0000    /* VPI/VCI Bits Size Select */
-#define NS_CFG_RCTSIZE_MASK   0x00030000    /* Receive Connection Table Size */
-#define NS_CFG_VCERRACCEPT    0x00008000    /* VPI/VCI Error Cell Accept */
-#define NS_CFG_RXINT_MASK     0x00007000    /* End of Receive PDU Interrupt
-                                               Handling */
-#define NS_CFG_RAWIE          0x00000800    /* Raw Cell Qu' Interrupt Enable */
-#define NS_CFG_RSQAFIE        0x00000400    /* Receive Queue Almost Full
-                                               Interrupt Enable */
-#define NS_CFG_RXRM           0x00000200    /* Receive RM Cells */
-#define NS_CFG_TMRROIE        0x00000080    /* Timer Roll Over Interrupt
-                                               Enable */
-#define NS_CFG_TXEN           0x00000020    /* Transmit Operation Enable */
-#define NS_CFG_TXIE           0x00000010    /* Transmit Status Interrupt
-                                               Enable */
-#define NS_CFG_TXURIE         0x00000008    /* Transmit Under-run Interrupt
-                                               Enable */
-#define NS_CFG_UMODE          0x00000004    /* Utopia Mode (cell/byte) Select */
-#define NS_CFG_TSQFIE         0x00000002    /* Transmit Status Queue Full
-                                               Interrupt Enable */
-#define NS_CFG_PHYIE          0x00000001    /* PHY Interrupt Enable */
+#define NS_CFG_SWRST          0x80000000	/* Software Reset */
+#define NS_CFG_RXPATH         0x20000000	/* Receive Path Enable */
+#define NS_CFG_SMBUFSIZE_MASK 0x18000000	/* Small Receive Buffer Size */
+#define NS_CFG_LGBUFSIZE_MASK 0x06000000	/* Large Receive Buffer Size */
+#define NS_CFG_EFBIE          0x01000000	/* Empty Free Buffer Queue
+						   Interrupt Enable */
+#define NS_CFG_RSQSIZE_MASK   0x00C00000	/* Receive Status Queue Size */
+#define NS_CFG_ICACCEPT       0x00200000	/* Invalid Cell Accept */
+#define NS_CFG_IGNOREGFC      0x00100000	/* Ignore General Flow Control */
+#define NS_CFG_VPIBITS_MASK   0x000C0000	/* VPI/VCI Bits Size Select */
+#define NS_CFG_RCTSIZE_MASK   0x00030000	/* Receive Connection Table Size */
+#define NS_CFG_VCERRACCEPT    0x00008000	/* VPI/VCI Error Cell Accept */
+#define NS_CFG_RXINT_MASK     0x00007000	/* End of Receive PDU Interrupt
+						   Handling */
+#define NS_CFG_RAWIE          0x00000800	/* Raw Cell Qu' Interrupt Enable */
+#define NS_CFG_RSQAFIE        0x00000400	/* Receive Queue Almost Full
+						   Interrupt Enable */
+#define NS_CFG_RXRM           0x00000200	/* Receive RM Cells */
+#define NS_CFG_TMRROIE        0x00000080	/* Timer Roll Over Interrupt
+						   Enable */
+#define NS_CFG_TXEN           0x00000020	/* Transmit Operation Enable */
+#define NS_CFG_TXIE           0x00000010	/* Transmit Status Interrupt
+						   Enable */
+#define NS_CFG_TXURIE         0x00000008	/* Transmit Under-run Interrupt
+						   Enable */
+#define NS_CFG_UMODE          0x00000004	/* Utopia Mode (cell/byte) Select */
+#define NS_CFG_TSQFIE         0x00000002	/* Transmit Status Queue Full
+						   Interrupt Enable */
+#define NS_CFG_PHYIE          0x00000001	/* PHY Interrupt Enable */
 
 #define NS_CFG_SMBUFSIZE_48    0x00000000
 #define NS_CFG_SMBUFSIZE_96    0x08000000
@@ -579,33 +536,29 @@
 #define NS_CFG_RXINT_624US   0x00003000
 #define NS_CFG_RXINT_899US   0x00004000
 
+/* NICStAR STATus bits */
 
-/* NICStAR STATus bits ********************************************************/
-
-#define NS_STAT_SFBQC_MASK 0xFF000000   /* hi 8 bits Small Buffer Queue Count */
-#define NS_STAT_LFBQC_MASK 0x00FF0000   /* hi 8 bits Large Buffer Queue Count */
-#define NS_STAT_TSIF       0x00008000   /* Transmit Status Queue Indicator */
-#define NS_STAT_TXICP      0x00004000   /* Transmit Incomplete PDU */
-#define NS_STAT_TSQF       0x00001000   /* Transmit Status Queue Full */
-#define NS_STAT_TMROF      0x00000800   /* Timer Overflow */
-#define NS_STAT_PHYI       0x00000400   /* PHY Device Interrupt */
-#define NS_STAT_CMDBZ      0x00000200   /* Command Busy */
-#define NS_STAT_SFBQF      0x00000100   /* Small Buffer Queue Full */
-#define NS_STAT_LFBQF      0x00000080   /* Large Buffer Queue Full */
-#define NS_STAT_RSQF       0x00000040   /* Receive Status Queue Full */
-#define NS_STAT_EOPDU      0x00000020   /* End of PDU */
-#define NS_STAT_RAWCF      0x00000010   /* Raw Cell Flag */
-#define NS_STAT_SFBQE      0x00000008   /* Small Buffer Queue Empty */
-#define NS_STAT_LFBQE      0x00000004   /* Large Buffer Queue Empty */
-#define NS_STAT_RSQAF      0x00000002   /* Receive Status Queue Almost Full */
+#define NS_STAT_SFBQC_MASK 0xFF000000	/* hi 8 bits Small Buffer Queue Count */
+#define NS_STAT_LFBQC_MASK 0x00FF0000	/* hi 8 bits Large Buffer Queue Count */
+#define NS_STAT_TSIF       0x00008000	/* Transmit Status Queue Indicator */
+#define NS_STAT_TXICP      0x00004000	/* Transmit Incomplete PDU */
+#define NS_STAT_TSQF       0x00001000	/* Transmit Status Queue Full */
+#define NS_STAT_TMROF      0x00000800	/* Timer Overflow */
+#define NS_STAT_PHYI       0x00000400	/* PHY Device Interrupt */
+#define NS_STAT_CMDBZ      0x00000200	/* Command Busy */
+#define NS_STAT_SFBQF      0x00000100	/* Small Buffer Queue Full */
+#define NS_STAT_LFBQF      0x00000080	/* Large Buffer Queue Full */
+#define NS_STAT_RSQF       0x00000040	/* Receive Status Queue Full */
+#define NS_STAT_EOPDU      0x00000020	/* End of PDU */
+#define NS_STAT_RAWCF      0x00000010	/* Raw Cell Flag */
+#define NS_STAT_SFBQE      0x00000008	/* Small Buffer Queue Empty */
+#define NS_STAT_LFBQE      0x00000004	/* Large Buffer Queue Empty */
+#define NS_STAT_RSQAF      0x00000002	/* Receive Status Queue Almost Full */
 
 #define ns_stat_sfbqc_get(stat) (((stat) & NS_STAT_SFBQC_MASK) >> 23)
 #define ns_stat_lfbqc_get(stat) (((stat) & NS_STAT_LFBQC_MASK) >> 15)
 
-
-
-/* #defines which depend on other #defines ************************************/
-
+/* #defines which depend on other #defines */
 
 #define NS_TST0 NS_TST_FRSCD
 #define NS_TST1 (NS_TST_FRSCD + NS_TST_NUM_ENTRIES + 1)
@@ -672,8 +625,7 @@
 #define NS_CFG_TSQFIE_OPT 0x00000000
 #endif /* ENABLE_TSQFIE */
 
-
-/* PCI stuff ******************************************************************/
+/* PCI stuff */
 
 #ifndef PCI_VENDOR_ID_IDT
 #define PCI_VENDOR_ID_IDT 0x111D
@@ -683,138 +635,124 @@
 #define PCI_DEVICE_ID_IDT_IDT77201 0x0001
 #endif /* PCI_DEVICE_ID_IDT_IDT77201 */
 
+/* Device driver structures */
 
-
-/* Device driver structures ***************************************************/
-
-
-struct ns_skb_cb {
-	u32 buf_type;			/* BUF_SM/BUF_LG/BUF_NONE */
+struct ns_skb_prv {
+	u32 buf_type;		/* BUF_SM/BUF_LG/BUF_NONE */
+	u32 dma;
+	int iovcnt;
 };
 
-#define NS_SKB_CB(skb)	((struct ns_skb_cb *)((skb)->cb))
+#define NS_PRV_BUFTYPE(skb)   \
+        (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->buf_type)
+#define NS_PRV_DMA(skb) \
+        (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->dma)
+#define NS_PRV_IOVCNT(skb) \
+        (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->iovcnt)
 
-typedef struct tsq_info
-{
-   void *org;
-   ns_tsi *base;
-   ns_tsi *next;
-   ns_tsi *last;
+typedef struct tsq_info {
+	void *org;
+        dma_addr_t dma;
+	ns_tsi *base;
+	ns_tsi *next;
+	ns_tsi *last;
 } tsq_info;
 
-
-typedef struct scq_info
-{
-   void *org;
-   ns_scqe *base;
-   ns_scqe *last;
-   ns_scqe *next;
-   volatile ns_scqe *tail;		/* Not related to the nicstar register */
-   unsigned num_entries;
-   struct sk_buff **skb;		/* Pointer to an array of pointers
-                                           to the sk_buffs used for tx */
-   u32 scd;				/* SRAM address of the corresponding
-                                           SCD */
-   int tbd_count;			/* Only meaningful on variable rate */
-   wait_queue_head_t scqfull_waitq;
-   volatile char full;			/* SCQ full indicator */
-   spinlock_t lock;			/* SCQ spinlock */
+typedef struct scq_info {
+	void *org;
+	dma_addr_t dma;
+	ns_scqe *base;
+	ns_scqe *last;
+	ns_scqe *next;
+	volatile ns_scqe *tail;	/* Not related to the nicstar register */
+	unsigned num_entries;
+	struct sk_buff **skb;	/* Pointer to an array of pointers
+				   to the sk_buffs used for tx */
+	u32 scd;		/* SRAM address of the corresponding
+				   SCD */
+	int tbd_count;		/* Only meaningful on variable rate */
+	wait_queue_head_t scqfull_waitq;
+	volatile char full;	/* SCQ full indicator */
+	spinlock_t lock;	/* SCQ spinlock */
 } scq_info;
 
-
-
-typedef struct rsq_info
-{
-   void *org;
-   ns_rsqe *base;
-   ns_rsqe *next;
-   ns_rsqe *last;
+typedef struct rsq_info {
+	void *org;
+        dma_addr_t dma;
+	ns_rsqe *base;
+	ns_rsqe *next;
+	ns_rsqe *last;
 } rsq_info;
 
-
-typedef struct skb_pool
-{
-   volatile int count;			/* number of buffers in the queue */
-   struct sk_buff_head queue;
+typedef struct skb_pool {
+	volatile int count;	/* number of buffers in the queue */
+	struct sk_buff_head queue;
 } skb_pool;
 
 /* NOTE: for small and large buffer pools, the count is not used, as the
          actual value used for buffer management is the one read from the
 	 card. */
 
-
-typedef struct vc_map
-{
-   volatile unsigned int tx:1;				/* TX vc? */
-   volatile unsigned int rx:1;				/* RX vc? */
-   struct atm_vcc *tx_vcc, *rx_vcc;
-   struct sk_buff *rx_iov;		/* RX iovector skb */
-   scq_info *scq;			/* To keep track of the SCQ */
-   u32 cbr_scd;				/* SRAM address of the corresponding
-               				   SCD. 0x00000000 for UBR/VBR/ABR */
-   int tbd_count;
+typedef struct vc_map {
+	volatile unsigned int tx:1;	/* TX vc? */
+	volatile unsigned int rx:1;	/* RX vc? */
+	struct atm_vcc *tx_vcc, *rx_vcc;
+	struct sk_buff *rx_iov;	/* RX iovector skb */
+	scq_info *scq;		/* To keep track of the SCQ */
+	u32 cbr_scd;		/* SRAM address of the corresponding
+				   SCD. 0x00000000 for UBR/VBR/ABR */
+	int tbd_count;
 } vc_map;
 
-
-struct ns_skb_data
-{
-	struct atm_vcc *vcc;
-	int iovcnt;
-};
-
-#define NS_SKB(skb) (((struct ns_skb_data *) (skb)->cb))
-
-
-typedef struct ns_dev
-{
-   int index;				/* Card ID to the device driver */
-   int sram_size;			/* In k x 32bit words. 32 or 128 */
-   void __iomem *membase;		/* Card's memory base address */
-   unsigned long max_pcr;
-   int rct_size;			/* Number of entries */
-   int vpibits;
-   int vcibits;
-   struct pci_dev *pcidev;
-   struct atm_dev *atmdev;
-   tsq_info tsq;
-   rsq_info rsq;
-   scq_info *scq0, *scq1, *scq2;	/* VBR SCQs */
-   skb_pool sbpool;			/* Small buffers */
-   skb_pool lbpool;			/* Large buffers */
-   skb_pool hbpool;			/* Pre-allocated huge buffers */
-   skb_pool iovpool;			/* iovector buffers */
-   volatile int efbie;			/* Empty free buf. queue int. enabled */
-   volatile u32 tst_addr;		/* SRAM address of the TST in use */
-   volatile int tst_free_entries;
-   vc_map vcmap[NS_MAX_RCTSIZE];
-   vc_map *tste2vc[NS_TST_NUM_ENTRIES];
-   vc_map *scd2vc[NS_FRSCD_NUM];
-   buf_nr sbnr;
-   buf_nr lbnr;
-   buf_nr hbnr;
-   buf_nr iovnr;
-   int sbfqc;
-   int lbfqc;
-   u32 sm_handle;
-   u32 sm_addr;
-   u32 lg_handle;
-   u32 lg_addr;
-   struct sk_buff *rcbuf;		/* Current raw cell buffer */
-   u32 rawch;				/* Raw cell queue head */
-   unsigned intcnt;			/* Interrupt counter */
-   spinlock_t int_lock;		/* Interrupt lock */
-   spinlock_t res_lock;		/* Card resource lock */
+typedef struct ns_dev {
+	int index;		/* Card ID to the device driver */
+	int sram_size;		/* In k x 32bit words. 32 or 128 */
+	void __iomem *membase;	/* Card's memory base address */
+	unsigned long max_pcr;
+	int rct_size;		/* Number of entries */
+	int vpibits;
+	int vcibits;
+	struct pci_dev *pcidev;
+	struct idr idr;
+	struct atm_dev *atmdev;
+	tsq_info tsq;
+	rsq_info rsq;
+	scq_info *scq0, *scq1, *scq2;	/* VBR SCQs */
+	skb_pool sbpool;	/* Small buffers */
+	skb_pool lbpool;	/* Large buffers */
+	skb_pool hbpool;	/* Pre-allocated huge buffers */
+	skb_pool iovpool;	/* iovector buffers */
+	volatile int efbie;	/* Empty free buf. queue int. enabled */
+	volatile u32 tst_addr;	/* SRAM address of the TST in use */
+	volatile int tst_free_entries;
+	vc_map vcmap[NS_MAX_RCTSIZE];
+	vc_map *tste2vc[NS_TST_NUM_ENTRIES];
+	vc_map *scd2vc[NS_FRSCD_NUM];
+	buf_nr sbnr;
+	buf_nr lbnr;
+	buf_nr hbnr;
+	buf_nr iovnr;
+	int sbfqc;
+	int lbfqc;
+	struct sk_buff *sm_handle;
+	u32 sm_addr;
+	struct sk_buff *lg_handle;
+	u32 lg_addr;
+	struct sk_buff *rcbuf;	/* Current raw cell buffer */
+        struct ns_rcqe *rawcell;
+	u32 rawch;		/* Raw cell queue head */
+	unsigned intcnt;	/* Interrupt counter */
+	spinlock_t int_lock;	/* Interrupt lock */
+	spinlock_t res_lock;	/* Card resource lock */
 } ns_dev;
 
-
    /* NOTE: Each tste2vc entry relates a given TST entry to the corresponding
-            CBR vc. If the entry is not allocated, it must be NULL.
-	    
-	    There are two TSTs so the driver can modify them on the fly
-	    without stopping the transmission.
-	    
-	    scd2vc allows us to find out unused fixed rate SCDs, because
-	    they must have a NULL pointer here. */
+      CBR vc. If the entry is not allocated, it must be NULL.
 
+      There are two TSTs so the driver can modify them on the fly
+      without stopping the transmission.
+
+      scd2vc allows us to find out unused fixed rate SCDs, because
+      they must have a NULL pointer here. */
 
 #endif /* _LINUX_NICSTAR_H_ */
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c
index 842e26c..f594526 100644
--- a/drivers/atm/nicstarmac.c
+++ b/drivers/atm/nicstarmac.c
@@ -13,15 +13,15 @@
 
 #define CYCLE_DELAY 5
 
-/* This was the original definition
+/*
+   This was the original definition
 #define osp_MicroDelay(microsec) \
     do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
 */
 #define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \
                                   udelay((useconds));}
-
-
-/* The following tables represent the timing diagrams found in
+/*
+ * The following tables represent the timing diagrams found in
  * the Data Sheet for the Xicor X25020 EEProm.  The #defines below
  * represent the bits in the NICStAR's General Purpose register
  * that must be toggled for the corresponding actions on the EEProm
@@ -31,86 +31,80 @@
 /* Write Data To EEProm from SI line on rising edge of CLK */
 /* Read Data From EEProm on falling edge of CLK */
 
-#define CS_HIGH		0x0002		/* Chip select high */
-#define CS_LOW		0x0000		/* Chip select low (active low)*/
-#define CLK_HIGH	0x0004		/* Clock high */
-#define CLK_LOW		0x0000		/* Clock low  */
-#define SI_HIGH		0x0001		/* Serial input data high */
-#define SI_LOW		0x0000		/* Serial input data low */
+#define CS_HIGH		0x0002	/* Chip select high */
+#define CS_LOW		0x0000	/* Chip select low (active low) */
+#define CLK_HIGH	0x0004	/* Clock high */
+#define CLK_LOW		0x0000	/* Clock low  */
+#define SI_HIGH		0x0001	/* Serial input data high */
+#define SI_LOW		0x0000	/* Serial input data low */
 
 /* Read Status Register = 0000 0101b */
 #if 0
-static u_int32_t rdsrtab[] =
-{
-    CS_HIGH | CLK_HIGH, 
-    CS_LOW | CLK_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW,
-    CLK_HIGH,             /* 0 */
-    CLK_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW | SI_HIGH, 
-    CLK_HIGH | SI_HIGH,   /* 1 */
-    CLK_LOW | SI_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW | SI_HIGH, 
-    CLK_HIGH | SI_HIGH   /* 1 */
+static u_int32_t rdsrtab[] = {
+	CS_HIGH | CLK_HIGH,
+	CS_LOW | CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW | SI_HIGH,
+	CLK_HIGH | SI_HIGH,	/* 1 */
+	CLK_LOW | SI_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW | SI_HIGH,
+	CLK_HIGH | SI_HIGH	/* 1 */
 };
-#endif  /*  0  */
-
+#endif /*  0  */
 
 /* Read from EEPROM = 0000 0011b */
-static u_int32_t readtab[] =
-{
-    /*
-    CS_HIGH | CLK_HIGH, 
-    */
-    CS_LOW | CLK_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW,
-    CLK_HIGH,             /* 0 */
-    CLK_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW, 
-    CLK_HIGH,             /* 0 */
-    CLK_LOW | SI_HIGH, 
-    CLK_HIGH | SI_HIGH,   /* 1 */
-    CLK_LOW | SI_HIGH, 
-    CLK_HIGH | SI_HIGH    /* 1 */
+static u_int32_t readtab[] = {
+	/*
+	   CS_HIGH | CLK_HIGH,
+	 */
+	CS_LOW | CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW,
+	CLK_HIGH,		/* 0 */
+	CLK_LOW | SI_HIGH,
+	CLK_HIGH | SI_HIGH,	/* 1 */
+	CLK_LOW | SI_HIGH,
+	CLK_HIGH | SI_HIGH	/* 1 */
 };
 
-
 /* Clock to read from/write to the eeprom */
-static u_int32_t clocktab[] =
-{	
-    CLK_LOW,
-    CLK_HIGH,
-    CLK_LOW, 
-    CLK_HIGH,
-    CLK_LOW,
-    CLK_HIGH,
-    CLK_LOW, 
-    CLK_HIGH,
-    CLK_LOW, 
-    CLK_HIGH,
-    CLK_LOW, 
-    CLK_HIGH, 
-    CLK_LOW, 
-    CLK_HIGH,
-    CLK_LOW, 
-    CLK_HIGH,
-    CLK_LOW 
+static u_int32_t clocktab[] = {
+	CLK_LOW,
+	CLK_HIGH,
+	CLK_LOW,
+	CLK_HIGH,
+	CLK_LOW,
+	CLK_HIGH,
+	CLK_LOW,
+	CLK_HIGH,
+	CLK_LOW,
+	CLK_HIGH,
+	CLK_LOW,
+	CLK_HIGH,
+	CLK_LOW,
+	CLK_HIGH,
+	CLK_LOW,
+	CLK_HIGH,
+	CLK_LOW
 };
 
-
 #define NICSTAR_REG_WRITE(bs, reg, val) \
 	while ( readl(bs + STAT) & 0x0200 ) ; \
 	writel((val),(base)+(reg))
@@ -124,153 +118,131 @@
  * register.  
  */
 #if 0
-u_int32_t
-nicstar_read_eprom_status( virt_addr_t base )
+u_int32_t nicstar_read_eprom_status(virt_addr_t base)
 {
-   u_int32_t	val;
-   u_int32_t	rbyte;
-   int32_t	i, j;
+	u_int32_t val;
+	u_int32_t rbyte;
+	int32_t i, j;
 
-   /* Send read instruction */
-   val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0;
+	/* Send read instruction */
+	val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
 
-   for (i=0; i<ARRAY_SIZE(rdsrtab); i++)
-   {
-	NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
-		(val | rdsrtab[i]) );
-        osp_MicroDelay( CYCLE_DELAY );
-   }
+	for (i = 0; i < ARRAY_SIZE(rdsrtab); i++) {
+		NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+				  (val | rdsrtab[i]));
+		osp_MicroDelay(CYCLE_DELAY);
+	}
 
-   /* Done sending instruction - now pull data off of bit 16, MSB first */
-   /* Data clocked out of eeprom on falling edge of clock */
+	/* Done sending instruction - now pull data off of bit 16, MSB first */
+	/* Data clocked out of eeprom on falling edge of clock */
 
-   rbyte = 0;
-   for (i=7, j=0; i>=0; i--)
-   {
-	NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
-		(val | clocktab[j++]) );
-        rbyte |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE)
-			& 0x00010000) >> 16) << i);
-	NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
-		(val | clocktab[j++]) );
-        osp_MicroDelay( CYCLE_DELAY );
-   }
-   NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 );
-   osp_MicroDelay( CYCLE_DELAY );
-   return rbyte;
+	rbyte = 0;
+	for (i = 7, j = 0; i >= 0; i--) {
+		NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+				  (val | clocktab[j++]));
+		rbyte |= (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE)
+			    & 0x00010000) >> 16) << i);
+		NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+				  (val | clocktab[j++]));
+		osp_MicroDelay(CYCLE_DELAY);
+	}
+	NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2);
+	osp_MicroDelay(CYCLE_DELAY);
+	return rbyte;
 }
-#endif  /*  0  */
-
+#endif /*  0  */
 
 /*
  * This routine will clock the Read_data function into the X2520
  * eeprom, followed by the address to read from, through the NicSTaR's General
  * Purpose register.  
  */
- 
-static u_int8_t 
-read_eprom_byte(virt_addr_t base, u_int8_t offset)
+
+static u_int8_t read_eprom_byte(virt_addr_t base, u_int8_t offset)
 {
-   u_int32_t val = 0;
-   int i,j=0;
-   u_int8_t tempread = 0;
+	u_int32_t val = 0;
+	int i, j = 0;
+	u_int8_t tempread = 0;
 
-   val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0;
+	val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
 
-   /* Send READ instruction */
-   for (i=0; i<ARRAY_SIZE(readtab); i++)
-   {
-	NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
-		(val | readtab[i]) );
-        osp_MicroDelay( CYCLE_DELAY );
-   }
+	/* Send READ instruction */
+	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
+		NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+				  (val | readtab[i]));
+		osp_MicroDelay(CYCLE_DELAY);
+	}
 
-   /* Next, we need to send the byte address to read from */
-   for (i=7; i>=0; i--)
-   {
-      NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
-      		(val | clocktab[j++] | ((offset >> i) & 1) ) );
-      osp_MicroDelay(CYCLE_DELAY);
-      NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
-      		(val | clocktab[j++] | ((offset >> i) & 1) ) );
-      osp_MicroDelay( CYCLE_DELAY );
-   }
+	/* Next, we need to send the byte address to read from */
+	for (i = 7; i >= 0; i--) {
+		NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+				  (val | clocktab[j++] | ((offset >> i) & 1)));
+		osp_MicroDelay(CYCLE_DELAY);
+		NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+				  (val | clocktab[j++] | ((offset >> i) & 1)));
+		osp_MicroDelay(CYCLE_DELAY);
+	}
 
-   j = 0;
-   
-   /* Now, we can read data from the eeprom by clocking it in */
-   for (i=7; i>=0; i--)
-   {
-      NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
-      		(val | clocktab[j++]) );
-      osp_MicroDelay( CYCLE_DELAY );
-      tempread |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE )
-		& 0x00010000) >> 16) << i);
-      NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
-      		(val | clocktab[j++]) );
-      osp_MicroDelay( CYCLE_DELAY );
-   }
+	j = 0;
 
-   NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 );
-   osp_MicroDelay( CYCLE_DELAY );
-   return tempread;
+	/* Now, we can read data from the eeprom by clocking it in */
+	for (i = 7; i >= 0; i--) {
+		NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+				  (val | clocktab[j++]));
+		osp_MicroDelay(CYCLE_DELAY);
+		tempread |=
+		    (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE)
+		       & 0x00010000) >> 16) << i);
+		NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+				  (val | clocktab[j++]));
+		osp_MicroDelay(CYCLE_DELAY);
+	}
+
+	NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2);
+	osp_MicroDelay(CYCLE_DELAY);
+	return tempread;
 }
 
-
-static void
-nicstar_init_eprom( virt_addr_t base )
+static void nicstar_init_eprom(virt_addr_t base)
 {
-    u_int32_t val;
+	u_int32_t val;
 
-    /*
-     * turn chip select off
-     */
-    val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
+	/*
+	 * turn chip select off
+	 */
+	val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
 
-    NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
-    	(val | CS_HIGH | CLK_HIGH));
-    osp_MicroDelay( CYCLE_DELAY );
+	NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+			  (val | CS_HIGH | CLK_HIGH));
+	osp_MicroDelay(CYCLE_DELAY);
 
-    NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
-    	(val | CS_HIGH | CLK_LOW));
-    osp_MicroDelay( CYCLE_DELAY );
+	NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+			  (val | CS_HIGH | CLK_LOW));
+	osp_MicroDelay(CYCLE_DELAY);
 
-    NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
-    	(val | CS_HIGH | CLK_HIGH));
-    osp_MicroDelay( CYCLE_DELAY );
+	NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+			  (val | CS_HIGH | CLK_HIGH));
+	osp_MicroDelay(CYCLE_DELAY);
 
-    NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
-    	(val | CS_HIGH | CLK_LOW));
-    osp_MicroDelay( CYCLE_DELAY );
+	NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+			  (val | CS_HIGH | CLK_LOW));
+	osp_MicroDelay(CYCLE_DELAY);
 }
 
-
 /*
  * This routine will be the interface to the ReadPromByte function
  * above.
- */ 
+ */
 
 static void
-nicstar_read_eprom(
-    virt_addr_t	base,
-    u_int8_t	prom_offset,
-    u_int8_t	*buffer,
-    u_int32_t	nbytes )
+nicstar_read_eprom(virt_addr_t base,
+		   u_int8_t prom_offset, u_int8_t * buffer, u_int32_t nbytes)
 {
-    u_int		i;
-    
-    for (i=0; i<nbytes; i++)
-    {
-	buffer[i] = read_eprom_byte( base, prom_offset );
-	++prom_offset;
- 	osp_MicroDelay( CYCLE_DELAY );
-    }
+	u_int i;
+
+	for (i = 0; i < nbytes; i++) {
+		buffer[i] = read_eprom_byte(base, prom_offset);
+		++prom_offset;
+		osp_MicroDelay(CYCLE_DELAY);
+	}
 }
-
-
-/*
-void osp_MicroDelay(int x) {
-    
-}
-*/
-
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 9630fbd..38bbbd0 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1819,3 +1819,67 @@
 	spin_unlock(&devices_kset->list_lock);
 	async_synchronize_full();
 }
+
+/*
+ * Device logging functions
+ */
+
+#ifdef CONFIG_PRINTK
+
+static int __dev_printk(const char *level, const struct device *dev,
+			struct va_format *vaf)
+{
+	if (!dev)
+		return printk("%s(NULL device *): %pV", level, vaf);
+
+	return printk("%s%s %s: %pV",
+		      level, dev_driver_string(dev), dev_name(dev), vaf);
+}
+
+int dev_printk(const char *level, const struct device *dev,
+	       const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+	int r;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	r = __dev_printk(level, dev, &vaf);
+	va_end(args);
+
+	return r;
+}
+EXPORT_SYMBOL(dev_printk);
+
+#define define_dev_printk_level(func, kern_level)		\
+int func(const struct device *dev, const char *fmt, ...)	\
+{								\
+	struct va_format vaf;					\
+	va_list args;						\
+	int r;							\
+								\
+	va_start(args, fmt);					\
+								\
+	vaf.fmt = fmt;						\
+	vaf.va = &args;						\
+								\
+	r = __dev_printk(kern_level, dev, &vaf);		\
+	va_end(args);						\
+								\
+	return r;						\
+}								\
+EXPORT_SYMBOL(func);
+
+define_dev_printk_level(dev_emerg, KERN_EMERG);
+define_dev_printk_level(dev_alert, KERN_ALERT);
+define_dev_printk_level(dev_crit, KERN_CRIT);
+define_dev_printk_level(dev_err, KERN_ERR);
+define_dev_printk_level(dev_warn, KERN_WARNING);
+define_dev_printk_level(dev_notice, KERN_NOTICE);
+define_dev_printk_level(_dev_info, KERN_INFO);
+
+#endif
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0b926e4..a5ea1bc 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -215,7 +215,7 @@
 
 	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
 	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-		neigh_event_send(rt->u.dst.neighbour, NULL);
+		neigh_event_send(rt->dst.neighbour, NULL);
 		ret = -ENODATA;
 		if (neigh)
 			goto release;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index ebfb117..abd683e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1364,7 +1364,7 @@
 		       __func__);
 		goto reject;
 	}
-	dst = &rt->u.dst;
+	dst = &rt->dst;
 	l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
 	if (!l2t) {
 		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
@@ -1932,7 +1932,7 @@
 		err = -EHOSTUNREACH;
 		goto fail3;
 	}
-	ep->dst = &rt->u.dst;
+	ep->dst = &rt->dst;
 
 	/* get a l2t entry */
 	ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 30ce0a8..8b693c8 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1364,7 +1364,7 @@
 		       __func__);
 		goto reject;
 	}
-	dst = &rt->u.dst;
+	dst = &rt->dst;
 	if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
 		pdev = ip_dev_find(&init_net, peer_ip);
 		BUG_ON(!pdev);
@@ -1938,7 +1938,7 @@
 		err = -EHOSTUNREACH;
 		goto fail3;
 	}
-	ep->dst = &rt->u.dst;
+	ep->dst = &rt->dst;
 
 	/* get a l2t entry */
 	if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 986d6f3..d876d04 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1146,7 +1146,7 @@
 	}
 
 	if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
-		neigh_event_send(rt->u.dst.neighbour, NULL);
+		neigh_event_send(rt->dst.neighbour, NULL);
 
 	ip_rt_put(rt);
 	return rc;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 5cc0a9a..42e7aad 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1567,6 +1567,12 @@
 }
 
 
+static int nes_netdev_set_flags(struct net_device *netdev, u32 flags)
+{
+	return ethtool_op_set_flags(netdev, flags, ETH_FLAG_LRO);
+}
+
+
 static const struct ethtool_ops nes_ethtool_ops = {
 	.get_link = ethtool_op_get_link,
 	.get_settings = nes_netdev_get_settings,
@@ -1588,7 +1594,7 @@
 	.get_tso = ethtool_op_get_tso,
 	.set_tso = ethtool_op_set_tso,
 	.get_flags = ethtool_op_get_flags,
-	.set_flags = ethtool_op_set_flags,
+	.set_flags = nes_netdev_set_flags,
 };
 
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 40e8584..1a1657c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -147,6 +147,11 @@
 	data[index++] = priv->lro.lro_mgr.stats.no_desc;
 }
 
+static int ipoib_set_flags(struct net_device *dev, u32 flags)
+{
+	return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
+}
+
 static const struct ethtool_ops ipoib_ethtool_ops = {
 	.get_drvinfo		= ipoib_get_drvinfo,
 	.get_rx_csum		= ipoib_get_rx_csum,
@@ -154,7 +159,7 @@
 	.get_coalesce		= ipoib_get_coalesce,
 	.set_coalesce		= ipoib_set_coalesce,
 	.get_flags		= ethtool_op_get_flags,
-	.set_flags		= ethtool_op_set_flags,
+	.set_flags		= ipoib_set_flags,
 	.get_strings		= ipoib_get_strings,
 	.get_sset_count		= ipoib_get_sset_count,
 	.get_ethtool_stats	= ipoib_get_ethtool_stats,
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index f37b8f6..8c46bae 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -449,14 +449,9 @@
 
 	/* uprog.len is unsigned short, so no overflow here */
 	len = uprog.len * sizeof(struct sock_filter);
-	code = kmalloc(len, GFP_KERNEL);
-	if (code == NULL)
-		return -ENOMEM;
-
-	if (copy_from_user(code, uprog.filter, len)) {
-		kfree(code);
-		return -EFAULT;
-	}
+	code = memdup_user(uprog.filter, len);
+	if (IS_ERR(code))
+		return PTR_ERR(code);
 
 	err = sk_chk_filter(code, uprog.len);
 	if (err) {
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index fc8454d..51dc60d 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -2636,12 +2636,6 @@
 		if ((info->flags & ISDN_ASYNC_CLOSING) || (!info->tty)) {
 			return;
 		}
-#ifdef CONFIG_ISDN_AUDIO
-		if ( !info->vonline )
-			tty_ldisc_flush(info->tty);
-#else
-		tty_ldisc_flush(info->tty);
-#endif
 		if ((info->flags & ISDN_ASYNC_CHECK_CD) &&
 		    (!((info->flags & ISDN_ASYNC_CALLOUT_ACTIVE) &&
 		       (info->flags & ISDN_ASYNC_CALLOUT_NOHUP)))) {
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 123c1d6..1507d2e 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -411,14 +411,10 @@
 			return -EINVAL;
 		}
 
-		cbuf = kmalloc(len, GFP_KERNEL);
-		if (!cbuf)
-			return -ENOMEM;
+		cbuf = memdup_user(buf, len);
+		if (IS_ERR(cbuf))
+			return PTR_ERR(cbuf);
 
-		if (copy_from_user(cbuf, buf, len)) {
-			kfree(cbuf);
-			return -EFAULT;
-		}
 		memcpy_toio(dev->sh_mem, cbuf, len);
 		kfree(cbuf);
 		return len;
diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c
index 1081091..43c5dc35 100644
--- a/drivers/isdn/sc/ioctl.c
+++ b/drivers/isdn/sc/ioctl.c
@@ -215,19 +215,13 @@
 		pr_debug("%s: DCBIOSETSPID: ioctl received\n",
 				sc_adapter[card]->devicename);
 
-		spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
-		if(!spid) {
-			kfree(rcvmsg);
-			return -ENOMEM;
-		}
-
 		/*
 		 * Get the spid from user space
 		 */
-		if (copy_from_user(spid, data->dataptr, SCIOC_SPIDSIZE)) {
+		spid = memdup_user(data->dataptr, SCIOC_SPIDSIZE);
+		if (IS_ERR(spid)) {
 			kfree(rcvmsg);
-			kfree(spid);
-			return -EFAULT;
+			return PTR_ERR(spid);
 		}
 
 		pr_debug("%s: SCIOCSETSPID: setting channel %d spid to %s\n", 
@@ -296,18 +290,13 @@
 		pr_debug("%s: SCIOSETDN: ioctl received\n",
 				sc_adapter[card]->devicename);
 
-		dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL);
-		if (!dn) {
-			kfree(rcvmsg);
-			return -ENOMEM;
-		}
 		/*
 		 * Get the spid from user space
 		 */
-		if (copy_from_user(dn, data->dataptr, SCIOC_DNSIZE)) {
+		dn = memdup_user(data->dataptr, SCIOC_DNSIZE);
+		if (IS_ERR(dn)) {
 			kfree(rcvmsg);
-			kfree(dn);
-			return -EFAULT;
+			return PTR_ERR(dn);
 		}
 
 		pr_debug("%s: SCIOCSETDN: setting channel %d dn to %s\n", 
diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h
index 75e28fe..d693b8d 100644
--- a/drivers/net/3c527.h
+++ b/drivers/net/3c527.h
@@ -34,7 +34,7 @@
 {
  	u16 mbox;
  	u16 data[1];
-} __attribute((packed));
+} __packed;
 
 struct skb_header
 {
@@ -43,7 +43,7 @@
 	u16 next;	/* Do not change! */
 	u16 length;
 	u32 data;
-} __attribute((packed));
+} __packed;
 
 struct mc32_stats
 {
@@ -68,7 +68,7 @@
 	u32 dataA[6];
 	u16 dataB[5];
 	u32 dataC[14];
-} __attribute((packed));
+} __packed;
 
 #define STATUS_MASK	0x0F
 #define COMPLETED	(1<<7)
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index d75803e..069a03f 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -435,7 +435,6 @@
    First the windows.  There are eight register windows, with the command
    and status registers available in each.
    */
-#define EL3WINDOW(win_num) iowrite16(SelectWindow + (win_num), ioaddr + EL3_CMD)
 #define EL3_CMD 0x0e
 #define EL3_STATUS 0x0e
 
@@ -645,10 +644,51 @@
 	u16 deferred;						/* Resend these interrupts when we
 										 * bale from the ISR */
 	u16 io_size;						/* Size of PCI region (for release_region) */
-	spinlock_t lock;					/* Serialise access to device & its vortex_private */
-	struct mii_if_info mii;				/* MII lib hooks/info */
+
+	/* Serialises access to hardware other than MII and variables below.
+	 * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
+	spinlock_t lock;
+
+	spinlock_t mii_lock;		/* Serialises access to MII */
+	struct mii_if_info mii;		/* MII lib hooks/info */
+	spinlock_t window_lock;		/* Serialises access to windowed regs */
+	int window;			/* Register window */
 };
 
+static void window_set(struct vortex_private *vp, int window)
+{
+	if (window != vp->window) {
+		iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
+		vp->window = window;
+	}
+}
+
+#define DEFINE_WINDOW_IO(size)						\
+static u ## size							\
+window_read ## size(struct vortex_private *vp, int window, int addr)	\
+{									\
+	unsigned long flags;						\
+	u ## size ret;							\
+	spin_lock_irqsave(&vp->window_lock, flags);			\
+	window_set(vp, window);						\
+	ret = ioread ## size(vp->ioaddr + addr);			\
+	spin_unlock_irqrestore(&vp->window_lock, flags);		\
+	return ret;							\
+}									\
+static void								\
+window_write ## size(struct vortex_private *vp, u ## size value,	\
+		     int window, int addr)				\
+{									\
+	unsigned long flags;						\
+	spin_lock_irqsave(&vp->window_lock, flags);			\
+	window_set(vp, window);						\
+	iowrite ## size(value, vp->ioaddr + addr);			\
+	spin_unlock_irqrestore(&vp->window_lock, flags);		\
+}
+DEFINE_WINDOW_IO(8)
+DEFINE_WINDOW_IO(16)
+DEFINE_WINDOW_IO(32)
+
 #ifdef CONFIG_PCI
 #define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
 #else
@@ -711,7 +751,7 @@
 static int vortex_up(struct net_device *dev);
 static void vortex_down(struct net_device *dev, int final);
 static int vortex_open(struct net_device *dev);
-static void mdio_sync(void __iomem *ioaddr, int bits);
+static void mdio_sync(struct vortex_private *vp, int bits);
 static int mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
 static void vortex_timer(unsigned long arg);
@@ -1119,6 +1159,7 @@
 	vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
 	vp->io_size = vci->io_size;
 	vp->card_idx = card_idx;
+	vp->window = -1;
 
 	/* module list only for Compaq device */
 	if (gendev == NULL) {
@@ -1154,6 +1195,8 @@
 	}
 
 	spin_lock_init(&vp->lock);
+	spin_lock_init(&vp->mii_lock);
+	spin_lock_init(&vp->window_lock);
 	vp->gendev = gendev;
 	vp->mii.dev = dev;
 	vp->mii.mdio_read = mdio_read;
@@ -1205,7 +1248,6 @@
 	vp->mii.force_media = vp->full_duplex;
 	vp->options = option;
 	/* Read the station address from the EEPROM. */
-	EL3WINDOW(0);
 	{
 		int base;
 
@@ -1218,14 +1260,15 @@
 
 		for (i = 0; i < 0x40; i++) {
 			int timer;
-			iowrite16(base + i, ioaddr + Wn0EepromCmd);
+			window_write16(vp, base + i, 0, Wn0EepromCmd);
 			/* Pause for at least 162 us. for the read to take place. */
 			for (timer = 10; timer >= 0; timer--) {
 				udelay(162);
-				if ((ioread16(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+				if ((window_read16(vp, 0, Wn0EepromCmd) &
+				     0x8000) == 0)
 					break;
 			}
-			eeprom[i] = ioread16(ioaddr + Wn0EepromData);
+			eeprom[i] = window_read16(vp, 0, Wn0EepromData);
 		}
 	}
 	for (i = 0; i < 0x18; i++)
@@ -1250,9 +1293,8 @@
 		pr_err("*** EEPROM MAC address is invalid.\n");
 		goto free_ring;	/* With every pack */
 	}
-	EL3WINDOW(2);
 	for (i = 0; i < 6; i++)
-		iowrite8(dev->dev_addr[i], ioaddr + i);
+		window_write8(vp, dev->dev_addr[i], 2, i);
 
 	if (print_info)
 		pr_cont(", IRQ %d\n", dev->irq);
@@ -1261,8 +1303,7 @@
 		pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n",
 			   dev->irq);
 
-	EL3WINDOW(4);
-	step = (ioread8(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
+	step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
 	if (print_info) {
 		pr_info("  product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
 			eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
@@ -1285,17 +1326,15 @@
 				(unsigned long long)pci_resource_start(pdev, 2),
 				vp->cb_fn_base);
 		}
-		EL3WINDOW(2);
 
-		n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010;
+		n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
 		if (vp->drv_flags & INVERT_LED_PWR)
 			n |= 0x10;
 		if (vp->drv_flags & INVERT_MII_PWR)
 			n |= 0x4000;
-		iowrite16(n, ioaddr + Wn2_ResetOptions);
+		window_write16(vp, n, 2, Wn2_ResetOptions);
 		if (vp->drv_flags & WNO_XCVR_PWR) {
-			EL3WINDOW(0);
-			iowrite16(0x0800, ioaddr);
+			window_write16(vp, 0x0800, 0, 0);
 		}
 	}
 
@@ -1313,14 +1352,13 @@
 	{
 		static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
 		unsigned int config;
-		EL3WINDOW(3);
-		vp->available_media = ioread16(ioaddr + Wn3_Options);
+		vp->available_media = window_read16(vp, 3, Wn3_Options);
 		if ((vp->available_media & 0xff) == 0)		/* Broken 3c916 */
 			vp->available_media = 0x40;
-		config = ioread32(ioaddr + Wn3_Config);
+		config = window_read32(vp, 3, Wn3_Config);
 		if (print_info) {
 			pr_debug("  Internal config register is %4.4x, transceivers %#x.\n",
-				config, ioread16(ioaddr + Wn3_Options));
+				config, window_read16(vp, 3, Wn3_Options));
 			pr_info("  %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
 				   8 << RAM_SIZE(config),
 				   RAM_WIDTH(config) ? "word" : "byte",
@@ -1346,7 +1384,6 @@
 	if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
 		dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
 		int phy, phy_idx = 0;
-		EL3WINDOW(4);
 		mii_preamble_required++;
 		if (vp->drv_flags & EXTRA_PREAMBLE)
 			mii_preamble_required++;
@@ -1478,18 +1515,17 @@
 vortex_set_duplex(struct net_device *dev)
 {
 	struct vortex_private *vp = netdev_priv(dev);
-	void __iomem *ioaddr = vp->ioaddr;
 
 	pr_info("%s:  setting %s-duplex.\n",
 		dev->name, (vp->full_duplex) ? "full" : "half");
 
-	EL3WINDOW(3);
 	/* Set the full-duplex bit. */
-	iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
-		 	(vp->large_frames ? 0x40 : 0) |
-			((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
-					0x100 : 0),
-			ioaddr + Wn3_MAC_Ctrl);
+	window_write16(vp,
+		       ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+		       (vp->large_frames ? 0x40 : 0) |
+		       ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
+			0x100 : 0),
+		       3, Wn3_MAC_Ctrl);
 }
 
 static void vortex_check_media(struct net_device *dev, unsigned int init)
@@ -1529,8 +1565,7 @@
 	}
 
 	/* Before initializing select the active media port. */
-	EL3WINDOW(3);
-	config = ioread32(ioaddr + Wn3_Config);
+	config = window_read32(vp, 3, Wn3_Config);
 
 	if (vp->media_override != 7) {
 		pr_info("%s: Media override to transceiver %d (%s).\n",
@@ -1577,10 +1612,9 @@
 	config = BFINS(config, dev->if_port, 20, 4);
 	if (vortex_debug > 6)
 		pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
-	iowrite32(config, ioaddr + Wn3_Config);
+	window_write32(vp, config, 3, Wn3_Config);
 
 	if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
-		EL3WINDOW(4);
 		mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
 		mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
 		vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
@@ -1601,51 +1635,46 @@
 	iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
 
 	if (vortex_debug > 1) {
-		EL3WINDOW(4);
 		pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
-			   dev->name, dev->irq, ioread16(ioaddr + Wn4_Media));
+			   dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
 	}
 
 	/* Set the station address and mask in window 2 each time opened. */
-	EL3WINDOW(2);
 	for (i = 0; i < 6; i++)
-		iowrite8(dev->dev_addr[i], ioaddr + i);
+		window_write8(vp, dev->dev_addr[i], 2, i);
 	for (; i < 12; i+=2)
-		iowrite16(0, ioaddr + i);
+		window_write16(vp, 0, 2, i);
 
 	if (vp->cb_fn_base) {
-		unsigned short n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010;
+		unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
 		if (vp->drv_flags & INVERT_LED_PWR)
 			n |= 0x10;
 		if (vp->drv_flags & INVERT_MII_PWR)
 			n |= 0x4000;
-		iowrite16(n, ioaddr + Wn2_ResetOptions);
+		window_write16(vp, n, 2, Wn2_ResetOptions);
 	}
 
 	if (dev->if_port == XCVR_10base2)
 		/* Start the thinnet transceiver. We should really wait 50ms...*/
 		iowrite16(StartCoax, ioaddr + EL3_CMD);
 	if (dev->if_port != XCVR_NWAY) {
-		EL3WINDOW(4);
-		iowrite16((ioread16(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
-			 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+		window_write16(vp,
+			       (window_read16(vp, 4, Wn4_Media) &
+				~(Media_10TP|Media_SQE)) |
+			       media_tbl[dev->if_port].media_bits,
+			       4, Wn4_Media);
 	}
 
 	/* Switch to the stats window, and clear all stats by reading. */
 	iowrite16(StatsDisable, ioaddr + EL3_CMD);
-	EL3WINDOW(6);
 	for (i = 0; i < 10; i++)
-		ioread8(ioaddr + i);
-	ioread16(ioaddr + 10);
-	ioread16(ioaddr + 12);
+		window_read8(vp, 6, i);
+	window_read16(vp, 6, 10);
+	window_read16(vp, 6, 12);
 	/* New: On the Vortex we must also clear the BadSSD counter. */
-	EL3WINDOW(4);
-	ioread8(ioaddr + 12);
+	window_read8(vp, 4, 12);
 	/* ..and on the Boomerang we enable the extra statistics bits. */
-	iowrite16(0x0040, ioaddr + Wn4_NetDiag);
-
-	/* Switch to register set 7 for normal use. */
-	EL3WINDOW(7);
+	window_write16(vp, 0x0040, 4, Wn4_NetDiag);
 
 	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
 		vp->cur_rx = vp->dirty_rx = 0;
@@ -1763,7 +1792,7 @@
 	void __iomem *ioaddr = vp->ioaddr;
 	int next_tick = 60*HZ;
 	int ok = 0;
-	int media_status, old_window;
+	int media_status;
 
 	if (vortex_debug > 2) {
 		pr_debug("%s: Media selection timer tick happened, %s.\n",
@@ -1771,10 +1800,7 @@
 		pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
 	}
 
-	disable_irq_lockdep(dev->irq);
-	old_window = ioread16(ioaddr + EL3_CMD) >> 13;
-	EL3WINDOW(4);
-	media_status = ioread16(ioaddr + Wn4_Media);
+	media_status = window_read16(vp, 4, Wn4_Media);
 	switch (dev->if_port) {
 	case XCVR_10baseT:  case XCVR_100baseTx:  case XCVR_100baseFx:
 		if (media_status & Media_LnkBeat) {
@@ -1794,10 +1820,7 @@
 	case XCVR_MII: case XCVR_NWAY:
 		{
 			ok = 1;
-			/* Interrupts are already disabled */
-			spin_lock(&vp->lock);
 			vortex_check_media(dev, 0);
-			spin_unlock(&vp->lock);
 		}
 		break;
 	  default:					/* Other media types handled by Tx timeouts. */
@@ -1816,6 +1839,8 @@
 	if (!ok) {
 		unsigned int config;
 
+		spin_lock_irq(&vp->lock);
+
 		do {
 			dev->if_port = media_tbl[dev->if_port].next;
 		} while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
@@ -1830,19 +1855,22 @@
 					   dev->name, media_tbl[dev->if_port].name);
 			next_tick = media_tbl[dev->if_port].wait;
 		}
-		iowrite16((media_status & ~(Media_10TP|Media_SQE)) |
-			 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+		window_write16(vp,
+			       (media_status & ~(Media_10TP|Media_SQE)) |
+			       media_tbl[dev->if_port].media_bits,
+			       4, Wn4_Media);
 
-		EL3WINDOW(3);
-		config = ioread32(ioaddr + Wn3_Config);
+		config = window_read32(vp, 3, Wn3_Config);
 		config = BFINS(config, dev->if_port, 20, 4);
-		iowrite32(config, ioaddr + Wn3_Config);
+		window_write32(vp, config, 3, Wn3_Config);
 
 		iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
 			 ioaddr + EL3_CMD);
 		if (vortex_debug > 1)
 			pr_debug("wrote 0x%08x to Wn3_Config\n", config);
 		/* AKPM: FIXME: Should reset Rx & Tx here.  P60 of 3c90xc.pdf */
+
+		spin_unlock_irq(&vp->lock);
 	}
 
 leave_media_alone:
@@ -1850,8 +1878,6 @@
 	  pr_debug("%s: Media selection timer finished, %s.\n",
 			 dev->name, media_tbl[dev->if_port].name);
 
-	EL3WINDOW(old_window);
-	enable_irq_lockdep(dev->irq);
 	mod_timer(&vp->timer, RUN_AT(next_tick));
 	if (vp->deferred)
 		iowrite16(FakeIntr, ioaddr + EL3_CMD);
@@ -1865,12 +1891,11 @@
 	pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
 		   dev->name, ioread8(ioaddr + TxStatus),
 		   ioread16(ioaddr + EL3_STATUS));
-	EL3WINDOW(4);
 	pr_err("  diagnostics: net %04x media %04x dma %08x fifo %04x\n",
-			ioread16(ioaddr + Wn4_NetDiag),
-			ioread16(ioaddr + Wn4_Media),
+			window_read16(vp, 4, Wn4_NetDiag),
+			window_read16(vp, 4, Wn4_Media),
 			ioread32(ioaddr + PktStatus),
-			ioread16(ioaddr + Wn4_FIFODiag));
+			window_read16(vp, 4, Wn4_FIFODiag));
 	/* Slight code bloat to be user friendly. */
 	if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
 		pr_err("%s: Transmitter encountered 16 collisions --"
@@ -1917,9 +1942,6 @@
 	/* Issue Tx Enable */
 	iowrite16(TxEnable, ioaddr + EL3_CMD);
 	dev->trans_start = jiffies; /* prevent tx timeout */
-
-	/* Switch to register set 7 for normal use. */
-	EL3WINDOW(7);
 }
 
 /*
@@ -1980,10 +2002,10 @@
 			ioread16(ioaddr + EL3_STATUS) & StatsFull) {
 			pr_warning("%s: Updating statistics failed, disabling "
 				   "stats as an interrupt source.\n", dev->name);
-			EL3WINDOW(5);
-			iowrite16(SetIntrEnb | (ioread16(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
+			iowrite16(SetIntrEnb |
+				  (window_read16(vp, 5, 10) & ~StatsFull),
+				  ioaddr + EL3_CMD);
 			vp->intr_enable &= ~StatsFull;
-			EL3WINDOW(7);
 			DoneDidThat++;
 		}
 	}
@@ -1993,8 +2015,7 @@
 	}
 	if (status & HostError) {
 		u16 fifo_diag;
-		EL3WINDOW(4);
-		fifo_diag = ioread16(ioaddr + Wn4_FIFODiag);
+		fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
 		pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
 			   dev->name, fifo_diag);
 		/* Adapter failure requires Tx/Rx reset and reinit. */
@@ -2043,9 +2064,13 @@
 	if (vp->bus_master) {
 		/* Set the bus-master controller to transfer the packet. */
 		int len = (skb->len + 3) & ~3;
-		iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
-				ioaddr + Wn7_MasterAddr);
+		vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
+						PCI_DMA_TODEVICE);
+		spin_lock_irq(&vp->window_lock);
+		window_set(vp, 7);
+		iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
 		iowrite16(len, ioaddr + Wn7_MasterLen);
+		spin_unlock_irq(&vp->window_lock);
 		vp->tx_skb = skb;
 		iowrite16(StartDMADown, ioaddr + EL3_CMD);
 		/* netif_wake_queue() will be called at the DMADone interrupt. */
@@ -2217,6 +2242,9 @@
 		pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
 			   dev->name, status, ioread8(ioaddr + Timer));
 
+	spin_lock(&vp->window_lock);
+	window_set(vp, 7);
+
 	do {
 		if (vortex_debug > 5)
 				pr_debug("%s: In interrupt loop, status %4.4x.\n",
@@ -2275,6 +2303,8 @@
 		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
 
+	spin_unlock(&vp->window_lock);
+
 	if (vortex_debug > 4)
 		pr_debug("%s: exiting interrupt, status %4.4x.\n",
 			   dev->name, status);
@@ -2760,85 +2790,58 @@
 static void update_stats(void __iomem *ioaddr, struct net_device *dev)
 {
 	struct vortex_private *vp = netdev_priv(dev);
-	int old_window = ioread16(ioaddr + EL3_CMD);
 
-	if (old_window == 0xffff)	/* Chip suspended or ejected. */
-		return;
 	/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
 	/* Switch to the stats window, and read everything. */
-	EL3WINDOW(6);
-	dev->stats.tx_carrier_errors		+= ioread8(ioaddr + 0);
-	dev->stats.tx_heartbeat_errors		+= ioread8(ioaddr + 1);
-	dev->stats.tx_window_errors		+= ioread8(ioaddr + 4);
-	dev->stats.rx_fifo_errors		+= ioread8(ioaddr + 5);
-	dev->stats.tx_packets			+= ioread8(ioaddr + 6);
-	dev->stats.tx_packets			+= (ioread8(ioaddr + 9)&0x30) << 4;
-	/* Rx packets	*/			ioread8(ioaddr + 7);   /* Must read to clear */
+	dev->stats.tx_carrier_errors		+= window_read8(vp, 6, 0);
+	dev->stats.tx_heartbeat_errors		+= window_read8(vp, 6, 1);
+	dev->stats.tx_window_errors		+= window_read8(vp, 6, 4);
+	dev->stats.rx_fifo_errors		+= window_read8(vp, 6, 5);
+	dev->stats.tx_packets			+= window_read8(vp, 6, 6);
+	dev->stats.tx_packets			+= (window_read8(vp, 6, 9) &
+						    0x30) << 4;
+	/* Rx packets	*/			window_read8(vp, 6, 7);   /* Must read to clear */
 	/* Don't bother with register 9, an extension of registers 6&7.
 	   If we do use the 6&7 values the atomic update assumption above
 	   is invalid. */
-	dev->stats.rx_bytes 			+= ioread16(ioaddr + 10);
-	dev->stats.tx_bytes 			+= ioread16(ioaddr + 12);
+	dev->stats.rx_bytes 			+= window_read16(vp, 6, 10);
+	dev->stats.tx_bytes 			+= window_read16(vp, 6, 12);
 	/* Extra stats for get_ethtool_stats() */
-	vp->xstats.tx_multiple_collisions	+= ioread8(ioaddr + 2);
-	vp->xstats.tx_single_collisions         += ioread8(ioaddr + 3);
-	vp->xstats.tx_deferred			+= ioread8(ioaddr + 8);
-	EL3WINDOW(4);
-	vp->xstats.rx_bad_ssd			+= ioread8(ioaddr + 12);
+	vp->xstats.tx_multiple_collisions	+= window_read8(vp, 6, 2);
+	vp->xstats.tx_single_collisions         += window_read8(vp, 6, 3);
+	vp->xstats.tx_deferred			+= window_read8(vp, 6, 8);
+	vp->xstats.rx_bad_ssd			+= window_read8(vp, 4, 12);
 
 	dev->stats.collisions = vp->xstats.tx_multiple_collisions
 		+ vp->xstats.tx_single_collisions
 		+ vp->xstats.tx_max_collisions;
 
 	{
-		u8 up = ioread8(ioaddr + 13);
+		u8 up = window_read8(vp, 4, 13);
 		dev->stats.rx_bytes += (up & 0x0f) << 16;
 		dev->stats.tx_bytes += (up & 0xf0) << 12;
 	}
-
-	EL3WINDOW(old_window >> 13);
 }
 
 static int vortex_nway_reset(struct net_device *dev)
 {
 	struct vortex_private *vp = netdev_priv(dev);
-	void __iomem *ioaddr = vp->ioaddr;
-	unsigned long flags;
-	int rc;
 
-	spin_lock_irqsave(&vp->lock, flags);
-	EL3WINDOW(4);
-	rc = mii_nway_restart(&vp->mii);
-	spin_unlock_irqrestore(&vp->lock, flags);
-	return rc;
+	return mii_nway_restart(&vp->mii);
 }
 
 static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct vortex_private *vp = netdev_priv(dev);
-	void __iomem *ioaddr = vp->ioaddr;
-	unsigned long flags;
-	int rc;
 
-	spin_lock_irqsave(&vp->lock, flags);
-	EL3WINDOW(4);
-	rc = mii_ethtool_gset(&vp->mii, cmd);
-	spin_unlock_irqrestore(&vp->lock, flags);
-	return rc;
+	return mii_ethtool_gset(&vp->mii, cmd);
 }
 
 static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct vortex_private *vp = netdev_priv(dev);
-	void __iomem *ioaddr = vp->ioaddr;
-	unsigned long flags;
-	int rc;
 
-	spin_lock_irqsave(&vp->lock, flags);
-	EL3WINDOW(4);
-	rc = mii_ethtool_sset(&vp->mii, cmd);
-	spin_unlock_irqrestore(&vp->lock, flags);
-	return rc;
+	return mii_ethtool_sset(&vp->mii, cmd);
 }
 
 static u32 vortex_get_msglevel(struct net_device *dev)
@@ -2930,7 +2933,6 @@
 {
 	int err;
 	struct vortex_private *vp = netdev_priv(dev);
-	void __iomem *ioaddr = vp->ioaddr;
 	unsigned long flags;
 	pci_power_t state = 0;
 
@@ -2942,7 +2944,6 @@
 	if(state != 0)
 		pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
 	spin_lock_irqsave(&vp->lock, flags);
-	EL3WINDOW(4);
 	err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
 	spin_unlock_irqrestore(&vp->lock, flags);
 	if(state != 0)
@@ -2985,8 +2986,6 @@
 static void set_8021q_mode(struct net_device *dev, int enable)
 {
 	struct vortex_private *vp = netdev_priv(dev);
-	void __iomem *ioaddr = vp->ioaddr;
-	int old_window = ioread16(ioaddr + EL3_CMD);
 	int mac_ctrl;
 
 	if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
@@ -2997,28 +2996,23 @@
 		if (enable)
 			max_pkt_size += 4;	/* 802.1Q VLAN tag */
 
-		EL3WINDOW(3);
-		iowrite16(max_pkt_size, ioaddr+Wn3_MaxPktSize);
+		window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
 
 		/* set VlanEtherType to let the hardware checksumming
 		   treat tagged frames correctly */
-		EL3WINDOW(7);
-		iowrite16(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType);
+		window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
 	} else {
 		/* on older cards we have to enable large frames */
 
 		vp->large_frames = dev->mtu > 1500 || enable;
 
-		EL3WINDOW(3);
-		mac_ctrl = ioread16(ioaddr+Wn3_MAC_Ctrl);
+		mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
 		if (vp->large_frames)
 			mac_ctrl |= 0x40;
 		else
 			mac_ctrl &= ~0x40;
-		iowrite16(mac_ctrl, ioaddr+Wn3_MAC_Ctrl);
+		window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
 	}
-
-	EL3WINDOW(old_window);
 }
 #else
 
@@ -3037,7 +3031,10 @@
 /* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
    met by back-to-back PCI I/O cycles, but we insert a delay to avoid
    "overclocking" issues. */
-#define mdio_delay() ioread32(mdio_addr)
+static void mdio_delay(struct vortex_private *vp)
+{
+	window_read32(vp, 4, Wn4_PhysicalMgmt);
+}
 
 #define MDIO_SHIFT_CLK	0x01
 #define MDIO_DIR_WRITE	0x04
@@ -3048,16 +3045,15 @@
 
 /* Generate the preamble required for initial synchronization and
    a few older transceivers. */
-static void mdio_sync(void __iomem *ioaddr, int bits)
+static void mdio_sync(struct vortex_private *vp, int bits)
 {
-	void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
-
 	/* Establish sync by sending at least 32 logic ones. */
 	while (-- bits >= 0) {
-		iowrite16(MDIO_DATA_WRITE1, mdio_addr);
-		mdio_delay();
-		iowrite16(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
-		mdio_delay();
+		window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
+		window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
+			       4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
 	}
 }
 
@@ -3065,59 +3061,70 @@
 {
 	int i;
 	struct vortex_private *vp = netdev_priv(dev);
-	void __iomem *ioaddr = vp->ioaddr;
 	int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 	unsigned int retval = 0;
-	void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+	spin_lock_bh(&vp->mii_lock);
 
 	if (mii_preamble_required)
-		mdio_sync(ioaddr, 32);
+		mdio_sync(vp, 32);
 
 	/* Shift the read command bits out. */
 	for (i = 14; i >= 0; i--) {
 		int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
-		iowrite16(dataval, mdio_addr);
-		mdio_delay();
-		iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr);
-		mdio_delay();
+		window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
+		window_write16(vp, dataval | MDIO_SHIFT_CLK,
+			       4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
 	}
 	/* Read the two transition, 16 data, and wire-idle bits. */
 	for (i = 19; i > 0; i--) {
-		iowrite16(MDIO_ENB_IN, mdio_addr);
-		mdio_delay();
-		retval = (retval << 1) | ((ioread16(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
-		iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
-		mdio_delay();
+		window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
+		retval = (retval << 1) |
+			((window_read16(vp, 4, Wn4_PhysicalMgmt) &
+			  MDIO_DATA_READ) ? 1 : 0);
+		window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
+			       4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
 	}
+
+	spin_unlock_bh(&vp->mii_lock);
+
 	return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
 }
 
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 {
 	struct vortex_private *vp = netdev_priv(dev);
-	void __iomem *ioaddr = vp->ioaddr;
 	int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
-	void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
 	int i;
 
+	spin_lock_bh(&vp->mii_lock);
+
 	if (mii_preamble_required)
-		mdio_sync(ioaddr, 32);
+		mdio_sync(vp, 32);
 
 	/* Shift the command bits out. */
 	for (i = 31; i >= 0; i--) {
 		int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
-		iowrite16(dataval, mdio_addr);
-		mdio_delay();
-		iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr);
-		mdio_delay();
+		window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
+		window_write16(vp, dataval | MDIO_SHIFT_CLK,
+			       4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
 	}
 	/* Leave the interface idle. */
 	for (i = 1; i >= 0; i--) {
-		iowrite16(MDIO_ENB_IN, mdio_addr);
-		mdio_delay();
-		iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
-		mdio_delay();
+		window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
+		window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
+			       4, Wn4_PhysicalMgmt);
+		mdio_delay(vp);
 	}
+
+	spin_unlock_bh(&vp->mii_lock);
 }
 
 /* ACPI: Advanced Configuration and Power Interface. */
@@ -3131,8 +3138,7 @@
 
 	if (vp->enable_wol) {
 		/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
-		EL3WINDOW(7);
-		iowrite16(2, ioaddr + 0x0c);
+		window_write16(vp, 2, 7, 0x0c);
 		/* The RxFilter must accept the WOL frames. */
 		iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
 		iowrite16(RxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 284a5f4..4a4f6b8 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -322,7 +322,7 @@
 	__le32			rx_ok_mcast;
 	__le16			tx_abort;
 	__le16			tx_underrun;
-} __attribute__((packed));
+} __packed;
 
 struct cp_extra_stats {
 	unsigned long		rx_frags;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 97d8068..f5166dc 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -662,7 +662,7 @@
 /* read MMIO register */
 #define RTL_R8(reg)		ioread8 (ioaddr + (reg))
 #define RTL_R16(reg)		ioread16 (ioaddr + (reg))
-#define RTL_R32(reg)		((unsigned long) ioread32 (ioaddr + (reg)))
+#define RTL_R32(reg)		ioread32 (ioaddr + (reg))
 
 
 static const u16 rtl8139_intr_mask =
@@ -862,7 +862,7 @@
 	/* if unknown chip, assume array element #0, original RTL-8139 in this case */
 	i = 0;
 	dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
-	dev_dbg(&pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig));
+	dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
 	tp->chipset = 0;
 
 match:
@@ -1643,7 +1643,7 @@
 	netdev_dbg(dev, "Tx queue start entry %ld  dirty entry %ld\n",
 		   tp->cur_tx, tp->dirty_tx);
 	for (i = 0; i < NUM_TX_DESC; i++)
-		netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
+		netdev_dbg(dev, "Tx descriptor %d is %08x%s\n",
 			   i, RTL_R32(TxStatus0 + (i * 4)),
 			   i == tp->dirty_tx % NUM_TX_DESC ?
 			   " (queue head)" : "");
@@ -2487,7 +2487,7 @@
 	int rx_mode;
 	u32 tmp;
 
-	netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n",
+	netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n",
 		   dev->flags, RTL_R32(RxConfig));
 
 	/* Note: do not reorder, GCC is clever about common statements. */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ce2fcdd..f65857e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -530,14 +530,15 @@
 	depends on SUPERH && \
 		(CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
 		 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
-		 CPU_SUBTYPE_SH7724)
+		 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757)
 	select CRC32
 	select MII
 	select MDIO_BITBANG
 	select PHYLIB
 	help
 	  Renesas SuperH Ethernet device driver.
-	  This driver support SH7710, SH7712, SH7763, SH7619, and SH7724.
+	  This driver supporting CPUs are:
+		- SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757.
 
 config SUNLANCE
 	tristate "Sun LANCE support"
@@ -1659,6 +1660,7 @@
 	depends on NET_PCI && PCI
 	select CRC32
 	select MII
+	select PHYLIB
 	help
 	  This is a driver for the R6040 Fast Ethernet MACs found in the
 	  the RDC R-321x System-on-chips.
@@ -2601,6 +2603,29 @@
 	  To compile this driver as a module choose M here; the module
 	  will be called cxgb4.
 
+config CHELSIO_T4VF_DEPENDS
+	tristate
+	depends on PCI && INET
+	default y
+
+config CHELSIO_T4VF
+	tristate "Chelsio Communications T4 Virtual Function Ethernet support"
+	depends on CHELSIO_T4VF_DEPENDS
+	help
+	  This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
+	  adapters with PCI-E SR-IOV Virtual Functions.
+
+	  For general information about Chelsio and our products, visit
+	  our website at <http://www.chelsio.com>.
+
+	  For customer support, please visit our customer support page at
+	  <http://www.chelsio.com/support.htm>.
+
+	  Please send feedback to <linux-bugs@chelsio.com>.
+
+	  To compile this driver as a module choose M here; the module
+	  will be called cxgb4vf.
+
 config EHEA
 	tristate "eHEA Ethernet support"
 	depends on IBMEBUS && INET && SPARSEMEM
@@ -2614,7 +2639,6 @@
 config ENIC
 	tristate "Cisco VIC Ethernet NIC Support"
 	depends on PCI && INET
-	select INET_LRO
 	help
 	  This enables the support for the Cisco VIC Ethernet card.
 
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 0a0512a..ce55581 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_CHELSIO_T1) += chelsio/
 obj-$(CONFIG_CHELSIO_T3) += cxgb3/
 obj-$(CONFIG_CHELSIO_T4) += cxgb4/
+obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
 obj-$(CONFIG_EHEA) += ehea/
 obj-$(CONFIG_CAN) += can/
 obj-$(CONFIG_BONDING) += bonding/
@@ -275,7 +276,7 @@
 obj-$(CONFIG_USB_ZD1201)        += usb/
 obj-$(CONFIG_USB_IPHETH)        += usb/
 
-obj-y += wireless/
+obj-$(CONFIG_WLAN) += wireless/
 obj-$(CONFIG_NET_TULIP) += tulip/
 obj-$(CONFIG_HAMRADIO) += hamradio/
 obj-$(CONFIG_IRDA) += irda/
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 355797f..42fce91 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -37,69 +37,6 @@
 
 #define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n"
 
-
-static void rx(struct net_device *dev, int bufnum,
-	       struct archdr *pkthdr, int length);
-static int build_header(struct sk_buff *skb,
-			struct net_device *dev,
-			unsigned short type,
-			uint8_t daddr);
-static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
-		      int bufnum);
-static int ack_tx(struct net_device *dev, int acked);
-
-
-static struct ArcProto capmode_proto =
-{
-	'r',
-	XMTU,
-	0,
-       	rx,
-	build_header,
-	prepare_tx,
-	NULL,
-	ack_tx
-};
-
-
-static void arcnet_cap_init(void)
-{
-	int count;
-
-	for (count = 1; count <= 8; count++)
-		if (arc_proto_map[count] == arc_proto_default)
-			arc_proto_map[count] = &capmode_proto;
-
-	/* for cap mode, we only set the bcast proto if there's no better one */
-	if (arc_bcast_proto == arc_proto_default)
-		arc_bcast_proto = &capmode_proto;
-
-	arc_proto_default = &capmode_proto;
-	arc_raw_proto = &capmode_proto;
-}
-
-
-#ifdef MODULE
-
-static int __init capmode_module_init(void)
-{
-	printk(VERSION);
-	arcnet_cap_init();
-	return 0;
-}
-
-static void __exit capmode_module_exit(void)
-{
-	arcnet_unregister_proto(&capmode_proto);
-}
-module_init(capmode_module_init);
-module_exit(capmode_module_exit);
-
-MODULE_LICENSE("GPL");
-#endif				/* MODULE */
-
-
-
 /* packet receiver */
 static void rx(struct net_device *dev, int bufnum,
 	       struct archdr *pkthdr, int length)
@@ -231,65 +168,107 @@
 	BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
 	       length,ofs);
 
-	// Copy the arcnet-header + the protocol byte down:
+	/* Copy the arcnet-header + the protocol byte down: */
 	lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
 	lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto,
 			    sizeof(pkt->soft.cap.proto));
 
-	// Skip the extra integer we have written into it as a cookie
-	// but write the rest of the message:
+	/* Skip the extra integer we have written into it as a cookie
+	   but write the rest of the message: */
 	lp->hw.copy_to_card(dev, bufnum, ofs+1,
 			    ((unsigned char*)&pkt->soft.cap.mes),length-1);
 
 	lp->lastload_dest = hard->dest;
 
-	return 1;		/* done */
+	return 1;	/* done */
 }
 
-
 static int ack_tx(struct net_device *dev, int acked)
 {
-  struct arcnet_local *lp = netdev_priv(dev);
-  struct sk_buff *ackskb;
-  struct archdr *ackpkt;
-  int length=sizeof(struct arc_cap);
+	struct arcnet_local *lp = netdev_priv(dev);
+	struct sk_buff *ackskb;
+	struct archdr *ackpkt;
+	int length=sizeof(struct arc_cap);
 
-  BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
-	 lp->outgoing.skb->protocol, acked);
+	BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
+		lp->outgoing.skb->protocol, acked);
 
-  BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
+	BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
 
-  /* Now alloc a skb to send back up through the layers: */
-  ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
-  if (ackskb == NULL) {
-	  BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
-	  goto free_outskb;
-  }
+	/* Now alloc a skb to send back up through the layers: */
+	ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
+	if (ackskb == NULL) {
+		BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
+		goto free_outskb;
+	}
 
-  skb_put(ackskb, length + ARC_HDR_SIZE );
-  ackskb->dev = dev;
+	skb_put(ackskb, length + ARC_HDR_SIZE );
+	ackskb->dev = dev;
 
-  skb_reset_mac_header(ackskb);
-  ackpkt = (struct archdr *)skb_mac_header(ackskb);
-  /* skb_pull(ackskb, ARC_HDR_SIZE); */
+	skb_reset_mac_header(ackskb);
+	ackpkt = (struct archdr *)skb_mac_header(ackskb);
+	/* skb_pull(ackskb, ARC_HDR_SIZE); */
 
+	skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
+				  ARC_HDR_SIZE + sizeof(struct arc_cap));
+	ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
+	ackpkt->soft.cap.mes.ack=acked;
 
-  skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
-		ARC_HDR_SIZE + sizeof(struct arc_cap));
-  ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
-  ackpkt->soft.cap.mes.ack=acked;
+	BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
+			*((int*)&ackpkt->soft.cap.cookie[0]));
 
-  BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
-	 *((int*)&ackpkt->soft.cap.cookie[0]));
+	ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
 
-  ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
+	BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
+	netif_rx(ackskb);
 
-  BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
-  netif_rx(ackskb);
+free_outskb:
+	dev_kfree_skb_irq(lp->outgoing.skb);
+	lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
 
- free_outskb:
-  dev_kfree_skb_irq(lp->outgoing.skb);
-  lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
-
-  return 0;
+	return 0;
 }
+
+static struct ArcProto capmode_proto =
+{
+	'r',
+	XMTU,
+	0,
+	rx,
+	build_header,
+	prepare_tx,
+	NULL,
+	ack_tx
+};
+
+static void arcnet_cap_init(void)
+{
+	int count;
+
+	for (count = 1; count <= 8; count++)
+		if (arc_proto_map[count] == arc_proto_default)
+			arc_proto_map[count] = &capmode_proto;
+
+	/* for cap mode, we only set the bcast proto if there's no better one */
+	if (arc_bcast_proto == arc_proto_default)
+		arc_bcast_proto = &capmode_proto;
+
+	arc_proto_default = &capmode_proto;
+	arc_raw_proto = &capmode_proto;
+}
+
+static int __init capmode_module_init(void)
+{
+	printk(VERSION);
+	arcnet_cap_init();
+	return 0;
+}
+
+static void __exit capmode_module_exit(void)
+{
+	arcnet_unregister_proto(&capmode_proto);
+}
+module_init(capmode_module_init);
+module_exit(capmode_module_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 24df032..ee2f842 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -738,6 +738,17 @@
 	struct netdev_hw_addr *ha;
 	u8 diffs[ETH_ALEN], *addr;
 	int i;
+	static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+	if (dev->flags & IFF_ALLMULTI) {
+		for (i = 0; i < ETH_ALEN; i++) {
+			__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
+			__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
+		}
+		__raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
+			&port->regs->rx_control[0]);
+		return;
+	}
 
 	if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
 		__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 84ae905..52abbbd 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -73,7 +73,8 @@
 #define FULL_DUPLEX        2
 
 #define AT_RX_BUF_SIZE		(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MAX_JUMBO_FRAME_SIZE 	(9*1024)
+#define MAX_JUMBO_FRAME_SIZE	(6*1024)
+#define MAX_TSO_FRAME_SIZE      (7*1024)
 #define MAX_TX_OFFLOAD_THRESH	(9*1024)
 
 #define AT_MAX_RECEIVE_QUEUE    4
@@ -87,10 +88,11 @@
 #define AT_MAX_INT_WORK		5
 #define AT_TWSI_EEPROM_TIMEOUT 	100
 #define AT_HW_MAX_IDLE_DELAY 	10
-#define AT_SUSPEND_LINK_TIMEOUT 28
+#define AT_SUSPEND_LINK_TIMEOUT 100
 
 #define AT_ASPM_L0S_TIMER	6
 #define AT_ASPM_L1_TIMER	12
+#define AT_LCKDET_TIMER		12
 
 #define ATL1C_PCIE_L0S_L1_DISABLE 	0x01
 #define ATL1C_PCIE_PHY_RESET		0x02
@@ -316,6 +318,7 @@
 	athr_l2c_b,
 	athr_l2c_b2,
 	athr_l1d,
+	athr_l1d_2,
 };
 
 enum atl1c_trans_queue {
@@ -392,6 +395,8 @@
 	u16 subsystem_id;
 	u16 subsystem_vendor_id;
 	u8 revision_id;
+	u16 phy_id1;
+	u16 phy_id2;
 
 	u32 intr_mask;
 	u8 dmaw_dly_cnt;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index f1389d6..d8501f0 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -37,6 +37,9 @@
 	if (data & TWSI_DEBUG_DEV_EXIST)
 		return 1;
 
+	AT_READ_REG(hw, REG_MASTER_CTRL, &data);
+	if (data & MASTER_CTRL_OTP_SEL)
+		return 1;
 	return 0;
 }
 
@@ -69,6 +72,8 @@
 	u32 i;
 	u32 otp_ctrl_data;
 	u32 twsi_ctrl_data;
+	u32 ltssm_ctrl_data;
+	u32 wol_data;
 	u8  eth_addr[ETH_ALEN];
 	u16 phy_data;
 	bool raise_vol = false;
@@ -104,6 +109,15 @@
 			udelay(20);
 			raise_vol = true;
 		}
+		/* close open bit of ReadOnly*/
+		AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &ltssm_ctrl_data);
+		ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO;
+		AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data);
+
+		/* clear any WOL settings */
+		AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+		AT_READ_REG(hw, REG_WOL_CTRL, &wol_data);
+
 
 		AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
 		twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
@@ -119,17 +133,15 @@
 	}
 	/* Disable OTP_CLK */
 	if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
-		if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
-			otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
-			AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
-			AT_WRITE_FLUSH(hw);
-			msleep(1);
-		}
+		otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
+		AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+		msleep(1);
 	}
 	if (raise_vol) {
 		if (hw->nic_type == athr_l2c_b ||
 		    hw->nic_type == athr_l2c_b2 ||
-		    hw->nic_type == athr_l1d) {
+		    hw->nic_type == athr_l1d ||
+		    hw->nic_type == athr_l1d_2) {
 			atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
 			if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
 				goto out;
@@ -456,14 +468,22 @@
 
 	if (hw->nic_type == athr_l2c_b ||
 	    hw->nic_type == athr_l2c_b2 ||
-	    hw->nic_type == athr_l1d) {
+	    hw->nic_type == athr_l1d ||
+	    hw->nic_type == athr_l1d_2) {
 		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
 		atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
 		atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
 		msleep(20);
 	}
-
-	/*Enable PHY LinkChange Interrupt */
+	if (hw->nic_type == athr_l1d) {
+		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+		atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
+	}
+	if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
+		|| hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) {
+		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+		atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
+	}
 	err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
 	if (err) {
 		if (netif_msg_hw(adapter))
@@ -482,12 +502,10 @@
 	struct pci_dev *pdev = adapter->pdev;
 	int ret_val;
 	u16 mii_bmcr_data = BMCR_RESET;
-	u16 phy_id1, phy_id2;
 
-	if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) ||
-		(atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) {
-			if (netif_msg_link(adapter))
-				dev_err(&pdev->dev, "Error get phy ID\n");
+	if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
+		(atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
+		dev_err(&pdev->dev, "Error get phy ID\n");
 		return -1;
 	}
 	switch (hw->media_type) {
@@ -572,6 +590,65 @@
 	return 0;
 }
 
+int atl1c_phy_power_saving(struct atl1c_hw *hw)
+{
+	struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+	struct pci_dev *pdev = adapter->pdev;
+	int ret = 0;
+	u16 autoneg_advertised = ADVERTISED_10baseT_Half;
+	u16 save_autoneg_advertised;
+	u16 phy_data;
+	u16 mii_lpa_data;
+	u16 speed = SPEED_0;
+	u16 duplex = FULL_DUPLEX;
+	int i;
+
+	atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+	atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+	if (phy_data & BMSR_LSTATUS) {
+		atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data);
+		if (mii_lpa_data & LPA_10FULL)
+			autoneg_advertised = ADVERTISED_10baseT_Full;
+		else if (mii_lpa_data & LPA_10HALF)
+			autoneg_advertised = ADVERTISED_10baseT_Half;
+		else if (mii_lpa_data & LPA_100HALF)
+			autoneg_advertised = ADVERTISED_100baseT_Half;
+		else if (mii_lpa_data & LPA_100FULL)
+			autoneg_advertised = ADVERTISED_100baseT_Full;
+
+		save_autoneg_advertised = hw->autoneg_advertised;
+		hw->phy_configured = false;
+		hw->autoneg_advertised = autoneg_advertised;
+		if (atl1c_restart_autoneg(hw) != 0) {
+			dev_dbg(&pdev->dev, "phy autoneg failed\n");
+			ret = -1;
+		}
+		hw->autoneg_advertised = save_autoneg_advertised;
+
+		if (mii_lpa_data) {
+			for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
+				mdelay(100);
+				atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+				atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+				if (phy_data & BMSR_LSTATUS) {
+					if (atl1c_get_speed_and_duplex(hw, &speed,
+									&duplex) != 0)
+						dev_dbg(&pdev->dev,
+							"get speed and duplex failed\n");
+					break;
+				}
+			}
+		}
+	} else {
+		speed = SPEED_10;
+		duplex = HALF_DUPLEX;
+	}
+	adapter->link_speed = speed;
+	adapter->link_duplex = duplex;
+
+	return ret;
+}
+
 int atl1c_restart_autoneg(struct atl1c_hw *hw)
 {
 	int err = 0;
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 1eeb3ed..3dd6759 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -42,7 +42,7 @@
 int atl1c_phy_init(struct atl1c_hw *hw);
 int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
 int atl1c_restart_autoneg(struct atl1c_hw *hw);
-
+int atl1c_phy_power_saving(struct atl1c_hw *hw);
 /* register definition */
 #define REG_DEVICE_CAP              	0x5C
 #define DEVICE_CAP_MAX_PAYLOAD_MASK     0x7
@@ -120,6 +120,12 @@
 #define REG_PCIE_PHYMISC	    	0x1000
 #define PCIE_PHYMISC_FORCE_RCV_DET	0x4
 
+#define REG_PCIE_PHYMISC2		0x1004
+#define PCIE_PHYMISC2_SERDES_CDR_MASK	0x3
+#define PCIE_PHYMISC2_SERDES_CDR_SHIFT	16
+#define PCIE_PHYMISC2_SERDES_TH_MASK	0x3
+#define PCIE_PHYMISC2_SERDES_TH_SHIFT	18
+
 #define REG_TWSI_DEBUG			0x1108
 #define TWSI_DEBUG_DEV_EXIST		0x20000000
 
@@ -150,24 +156,28 @@
 #define PM_CTRL_ASPM_L0S_EN		0x00001000
 #define PM_CTRL_CLK_SWH_L1		0x00002000
 #define PM_CTRL_CLK_PWM_VER1_1		0x00004000
-#define PM_CTRL_PCIE_RECV		0x00008000
+#define PM_CTRL_RCVR_WT_TIMER		0x00008000
 #define PM_CTRL_L1_ENTRY_TIMER_MASK	0xF
 #define PM_CTRL_L1_ENTRY_TIMER_SHIFT	16
 #define PM_CTRL_PM_REQ_TIMER_MASK	0xF
 #define PM_CTRL_PM_REQ_TIMER_SHIFT	20
-#define PM_CTRL_LCKDET_TIMER_MASK	0x3F
+#define PM_CTRL_LCKDET_TIMER_MASK	0xF
 #define PM_CTRL_LCKDET_TIMER_SHIFT	24
 #define PM_CTRL_EN_BUFS_RX_L0S		0x10000000
 #define PM_CTRL_SA_DLY_EN		0x20000000
 #define PM_CTRL_MAC_ASPM_CHK		0x40000000
 #define PM_CTRL_HOTRST			0x80000000
 
+#define REG_LTSSM_ID_CTRL		0x12FC
+#define LTSSM_ID_EN_WRO			0x1000
 /* Selene Master Control Register */
 #define REG_MASTER_CTRL			0x1400
 #define MASTER_CTRL_SOFT_RST            0x1
 #define MASTER_CTRL_TEST_MODE_MASK	0x3
 #define MASTER_CTRL_TEST_MODE_SHIFT	2
 #define MASTER_CTRL_BERT_START		0x10
+#define MASTER_CTRL_OOB_DIS_OFF		0x40
+#define MASTER_CTRL_SA_TIMER_EN		0x80
 #define MASTER_CTRL_MTIMER_EN           0x100
 #define MASTER_CTRL_MANUAL_INT          0x200
 #define MASTER_CTRL_TX_ITIMER_EN	0x400
@@ -220,6 +230,12 @@
 		GPHY_CTRL_PWDOWN_HW	|\
 		GPHY_CTRL_PHY_IDDQ)
 
+#define GPHY_CTRL_POWER_SAVING (	\
+		GPHY_CTRL_SEL_ANA_RST	|\
+		GPHY_CTRL_HIB_EN	|\
+		GPHY_CTRL_HIB_PULSE	|\
+		GPHY_CTRL_PWDOWN_HW	|\
+		GPHY_CTRL_PHY_IDDQ)
 /* Block IDLE Status Register */
 #define REG_IDLE_STATUS  		0x1410
 #define IDLE_STATUS_MASK		0x00FF
@@ -287,6 +303,14 @@
 #define SERDES_LOCK_DETECT          	0x1  /* SerDes lock detected. This signal
 					      * comes from Analog SerDes */
 #define SERDES_LOCK_DETECT_EN       	0x2  /* 1: Enable SerDes Lock detect function */
+#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE
+#define SERDES_LOCK_STS_SELFB_PLL_MASK  0x3
+#define SERDES_OVCLK_18_25		0x0
+#define SERDES_OVCLK_12_18		0x1
+#define SERDES_OVCLK_0_4		0x2
+#define SERDES_OVCLK_4_12		0x3
+#define SERDES_MAC_CLK_SLOWDOWN		0x20000
+#define SERDES_PYH_CLK_SLOWDOWN		0x40000
 
 /* MAC Control Register  */
 #define REG_MAC_CTRL         		0x1480
@@ -693,6 +717,21 @@
 #define REG_MAC_TX_STATUS_BIN 		0x1760
 #define REG_MAC_TX_STATUS_END 		0x17c0
 
+#define REG_CLK_GATING_CTRL		0x1814
+#define CLK_GATING_DMAW_EN		0x0001
+#define CLK_GATING_DMAR_EN		0x0002
+#define CLK_GATING_TXQ_EN		0x0004
+#define CLK_GATING_RXQ_EN		0x0008
+#define CLK_GATING_TXMAC_EN		0x0010
+#define CLK_GATING_RXMAC_EN		0x0020
+
+#define CLK_GATING_EN_ALL	(CLK_GATING_DMAW_EN |\
+				 CLK_GATING_DMAR_EN |\
+				 CLK_GATING_TXQ_EN  |\
+				 CLK_GATING_RXQ_EN  |\
+				 CLK_GATING_TXMAC_EN|\
+				 CLK_GATING_RXMAC_EN)
+
 /* DEBUG ADDR */
 #define REG_DEBUG_DATA0 		0x1900
 #define REG_DEBUG_DATA1 		0x1904
@@ -734,6 +773,10 @@
 
 #define MII_PHYSID1			0x02
 #define MII_PHYSID2			0x03
+#define L1D_MPW_PHYID1			0xD01C  /* V7 */
+#define L1D_MPW_PHYID2			0xD01D  /* V1-V6 */
+#define L1D_MPW_PHYID3			0xD01E  /* V8 */
+
 
 /* Autoneg Advertisement Register */
 #define MII_ADVERTISE			0x04
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1c3c046..c7b8ef5 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -21,7 +21,7 @@
 
 #include "atl1c.h"
 
-#define ATL1C_DRV_VERSION "1.0.0.2-NAPI"
+#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
 char atl1c_driver_name[] = "atl1c";
 char atl1c_driver_version[] = ATL1C_DRV_VERSION;
 #define PCI_DEVICE_ID_ATTANSIC_L2C      0x1062
@@ -29,7 +29,7 @@
 #define PCI_DEVICE_ID_ATHEROS_L2C_B	0x2060 /* AR8152 v1.1 Fast 10/100 */
 #define PCI_DEVICE_ID_ATHEROS_L2C_B2	0x2062 /* AR8152 v2.0 Fast 10/100 */
 #define PCI_DEVICE_ID_ATHEROS_L1D	0x1073 /* AR8151 v1.0 Gigabit 1000 */
-
+#define PCI_DEVICE_ID_ATHEROS_L1D_2_0	0x1083 /* AR8151 v2.0 Gigabit 1000 */
 #define L2CB_V10			0xc0
 #define L2CB_V11			0xc1
 
@@ -97,7 +97,28 @@
 
 static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
+static void atl1c_pcie_patch(struct atl1c_hw *hw)
+{
+	u32 data;
 
+	AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
+	data |= PCIE_PHYMISC_FORCE_RCV_DET;
+	AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
+
+	if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
+		AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
+
+		data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK <<
+			PCIE_PHYMISC2_SERDES_CDR_SHIFT);
+		data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
+		data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
+			PCIE_PHYMISC2_SERDES_TH_SHIFT);
+		data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
+		AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
+	}
+}
+
+/* FIXME: no need any more ? */
 /*
  * atl1c_init_pcie - init PCIE module
  */
@@ -127,6 +148,11 @@
 	data &= ~PCIE_UC_SERVRITY_FCP;
 	AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
 
+	AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
+	data &= ~LTSSM_ID_EN_WRO;
+	AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
+
+	atl1c_pcie_patch(hw);
 	if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
 		atl1c_disable_l0s_l1(hw);
 	if (flag & ATL1C_PCIE_PHY_RESET)
@@ -135,7 +161,7 @@
 		AT_WRITE_REG(hw, REG_GPHY_CTRL,
 			GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
 
-	msleep(1);
+	msleep(5);
 }
 
 /*
@@ -159,6 +185,7 @@
 {
 	atomic_inc(&adapter->irq_sem);
 	AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
+	AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
 	AT_WRITE_FLUSH(&adapter->hw);
 	synchronize_irq(adapter->pdev->irq);
 }
@@ -231,15 +258,15 @@
 
 	if ((phy_data & BMSR_LSTATUS) == 0) {
 		/* link down */
-		if (netif_carrier_ok(netdev)) {
-			hw->hibernate = true;
-			if (atl1c_stop_mac(hw) != 0)
-				if (netif_msg_hw(adapter))
-					dev_warn(&pdev->dev,
-						"stop mac failed\n");
-			atl1c_set_aspm(hw, false);
-		}
+		hw->hibernate = true;
+		if (atl1c_stop_mac(hw) != 0)
+			if (netif_msg_hw(adapter))
+				dev_warn(&pdev->dev, "stop mac failed\n");
+		atl1c_set_aspm(hw, false);
 		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+		atl1c_phy_reset(hw);
+		atl1c_phy_init(&adapter->hw);
 	} else {
 		/* Link Up */
 		hw->hibernate = false;
@@ -308,6 +335,7 @@
 	netdev = adapter->netdev;
 
 	if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
+		adapter->work_event &= ~ATL1C_WORK_EVENT_RESET;
 		netif_device_detach(netdev);
 		atl1c_down(adapter);
 		atl1c_up(adapter);
@@ -315,8 +343,11 @@
 		return;
 	}
 
-	if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
+	if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) {
+		adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE;
 		atl1c_check_link_status(adapter);
+	}
+	return;
 }
 
 
@@ -476,6 +507,13 @@
 		netdev->mtu = new_mtu;
 		adapter->hw.max_frame_size = new_mtu;
 		atl1c_set_rxbufsize(adapter, netdev);
+		if (new_mtu > MAX_TSO_FRAME_SIZE) {
+			adapter->netdev->features &= ~NETIF_F_TSO;
+			adapter->netdev->features &= ~NETIF_F_TSO6;
+		} else {
+			adapter->netdev->features |= NETIF_F_TSO;
+			adapter->netdev->features |= NETIF_F_TSO6;
+		}
 		atl1c_down(adapter);
 		atl1c_up(adapter);
 		clear_bit(__AT_RESETTING, &adapter->flags);
@@ -613,6 +651,9 @@
 	case PCI_DEVICE_ID_ATHEROS_L1D:
 		hw->nic_type = athr_l1d;
 		break;
+	case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
+		hw->nic_type = athr_l1d_2;
+		break;
 	default:
 		break;
 	}
@@ -627,9 +668,7 @@
 	AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
 	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
 
-	hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ |
-			 ATL1C_INTR_MODRT_ENABLE  |
-			 ATL1C_RX_IPV6_CHKSUM	  |
+	hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE  |
 			 ATL1C_TXQ_MODE_ENHANCE;
 	if (link_ctrl_data & LINK_CTRL_L0S_EN)
 		hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
@@ -637,12 +676,12 @@
 		hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
 	if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
 		hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
+	hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
 
 	if (hw->nic_type == athr_l1c ||
-	    hw->nic_type == athr_l1d) {
-		hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
+	    hw->nic_type == athr_l1d ||
+	    hw->nic_type == athr_l1d_2)
 		hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
-	}
 	return 0;
 }
 /*
@@ -657,6 +696,8 @@
 {
 	struct atl1c_hw *hw   = &adapter->hw;
 	struct pci_dev	*pdev = adapter->pdev;
+	u32 revision;
+
 
 	adapter->wol = 0;
 	adapter->link_speed = SPEED_0;
@@ -669,7 +710,8 @@
 	hw->device_id = pdev->device;
 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 	hw->subsystem_id = pdev->subsystem_device;
-
+	AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
+	hw->revision_id = revision & 0xFF;
 	/* before link up, we assume hibernate is true */
 	hw->hibernate = true;
 	hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
@@ -974,6 +1016,7 @@
 	struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
 	struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
 	int i;
+	u32 data;
 
 	/* TPD */
 	AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
@@ -1017,6 +1060,23 @@
 			(u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
 	AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
 			(u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
+	if (hw->nic_type == athr_l2c_b) {
+		AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
+		AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
+		AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
+		AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
+		AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
+		AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
+		AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0);	/* TX watermark, to enter l1 state.*/
+		AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0);		/* RXD threshold.*/
+	}
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
+			/* Power Saving for L2c_B */
+		AT_READ_REG(hw, REG_SERDES_LOCK, &data);
+		data |= SERDES_MAC_CLK_SLOWDOWN;
+		data |= SERDES_PYH_CLK_SLOWDOWN;
+		AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
+	}
 	/* Load all of base address above */
 	AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
 }
@@ -1029,6 +1089,7 @@
 	u16 tx_offload_thresh;
 	u32 txq_ctrl_data;
 	u32 extra_size = 0;     /* Jumbo frame threshold in QWORD unit */
+	u32 max_pay_load_data;
 
 	extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
 	tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
@@ -1046,8 +1107,11 @@
 			TXQ_NUM_TPD_BURST_SHIFT;
 	if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
 		txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
-	txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] &
+	max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] &
 			TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2)
+		max_pay_load_data >>= 1;
+	txq_ctrl_data |= max_pay_load_data;
 
 	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
 }
@@ -1078,7 +1142,7 @@
 	rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
 			RSS_HASH_BITS_SHIFT;
 	if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
-		rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M &
+		rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
 			ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
 
 	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
@@ -1198,21 +1262,23 @@
 {
 	struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
 	struct pci_dev *pdev = adapter->pdev;
-	int ret;
+	u32 master_ctrl_data = 0;
 
 	AT_WRITE_REG(hw, REG_IMR, 0);
 	AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
 
-	ret = atl1c_stop_mac(hw);
-	if (ret)
-		return ret;
+	atl1c_stop_mac(hw);
 	/*
 	 * Issue Soft Reset to the MAC.  This will reset the chip's
 	 * transmit, receive, DMA.  It will not effect
 	 * the current PCI configuration.  The global reset bit is self-
 	 * clearing, and should clear within a microsecond.
 	 */
-	AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
+	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+	master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF;
+	AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST)
+			& 0xFFFF));
+
 	AT_WRITE_FLUSH(hw);
 	msleep(10);
 	/* Wait at least 10ms for All module to be Idle */
@@ -1253,42 +1319,39 @@
 {
 	u32 pm_ctrl_data;
 	u32 link_ctrl_data;
+	u32 link_l1_timer = 0xF;
 
 	AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
 	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
-	pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
 
+	pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
 	pm_ctrl_data &=  ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
 			PM_CTRL_L1_ENTRY_TIMER_SHIFT);
 	pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
-			  PM_CTRL_LCKDET_TIMER_SHIFT);
+			PM_CTRL_LCKDET_TIMER_SHIFT);
+	pm_ctrl_data |= AT_LCKDET_TIMER	<< PM_CTRL_LCKDET_TIMER_SHIFT;
 
-	pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
-	pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-	pm_ctrl_data |= PM_CTRL_RBER_EN;
-	pm_ctrl_data |= PM_CTRL_SDES_EN;
-
-	if (hw->nic_type == athr_l2c_b ||
-	    hw->nic_type == athr_l1d ||
-	    hw->nic_type == athr_l2c_b2) {
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+		hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
 		link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
 		if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
-			if (hw->nic_type == athr_l2c_b &&
-			    hw->revision_id == L2CB_V10)
+			if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10)
 				link_ctrl_data |= LINK_CTRL_EXT_SYNC;
 		}
 
 		AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
 
-		pm_ctrl_data |= PM_CTRL_PCIE_RECV;
-		pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT;
-		pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S;
+		pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER;
+		pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK <<
+			PM_CTRL_PM_REQ_TIMER_SHIFT);
+		pm_ctrl_data |= AT_ASPM_L1_TIMER <<
+			PM_CTRL_PM_REQ_TIMER_SHIFT;
 		pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
 		pm_ctrl_data &= ~PM_CTRL_HOTRST;
 		pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
 		pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
 	}
-
+	pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
 	if (linkup) {
 		pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
 		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
@@ -1297,27 +1360,26 @@
 		if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
 			pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
 
-		if (hw->nic_type == athr_l2c_b ||
-		    hw->nic_type == athr_l1d ||
-		    hw->nic_type == athr_l2c_b2) {
+		if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+			hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
 			if (hw->nic_type == athr_l2c_b)
 				if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
-					pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN;
+					pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
 			pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
 			pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
 			pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
 			pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
-			if (hw->adapter->link_speed == SPEED_100 ||
-			    hw->adapter->link_speed == SPEED_1000) {
-				pm_ctrl_data &=
-					~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
-					  PM_CTRL_L1_ENTRY_TIMER_SHIFT);
-				if (hw->nic_type == athr_l1d)
-					pm_ctrl_data |= 0xF <<
-						PM_CTRL_L1_ENTRY_TIMER_SHIFT;
-				else
-					pm_ctrl_data |= 7 <<
-						PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+		if (hw->adapter->link_speed == SPEED_100 ||
+				hw->adapter->link_speed == SPEED_1000) {
+				pm_ctrl_data &=  ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
+					PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+				if (hw->nic_type == athr_l2c_b)
+					link_l1_timer = 7;
+				else if (hw->nic_type == athr_l2c_b2 ||
+					hw->nic_type == athr_l1d_2)
+					link_l1_timer = 4;
+				pm_ctrl_data |= link_l1_timer <<
+					PM_CTRL_L1_ENTRY_TIMER_SHIFT;
 			}
 		} else {
 			pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
@@ -1326,24 +1388,12 @@
 			pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
 			pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
 			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-		}
-		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
-		if (hw->adapter->link_speed == SPEED_10)
-			if (hw->nic_type == athr_l1d)
-				atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D);
-			else
-				atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
-		else if (hw->adapter->link_speed == SPEED_100)
-			atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD);
-		else
-			atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD);
 
+		}
 	} else {
-		pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
 		pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
 		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
 		pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
-
 		pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
 
 		if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
@@ -1351,8 +1401,9 @@
 		else
 			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
 	}
-
 	AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
+
+	return;
 }
 
 static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
@@ -1391,7 +1442,8 @@
 		mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
 
 	mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
-	if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) {
+	if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
+	    hw->nic_type == athr_l1d_2) {
 		mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
 		mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
 	}
@@ -1409,6 +1461,7 @@
 	struct atl1c_hw *hw = &adapter->hw;
 	u32 master_ctrl_data = 0;
 	u32 intr_modrt_data;
+	u32 data;
 
 	/* clear interrupt status */
 	AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
@@ -1418,6 +1471,15 @@
 	 * HW will enable self to assert interrupt event to system after
 	 * waiting x-time for software to notify it accept interrupt.
 	 */
+
+	data = CLK_GATING_EN_ALL;
+	if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
+		if (hw->nic_type == athr_l2c_b)
+			data &= ~CLK_GATING_RXMAC_EN;
+	} else
+		data = 0;
+	AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
+
 	AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
 		hw->ict & INT_RETRIG_TIMER_MASK);
 
@@ -1436,6 +1498,7 @@
 	if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
 		master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
 
+	master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
 	AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
 
 	if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
@@ -1624,11 +1687,9 @@
 					"atl1c hardware error (status = 0x%x)\n",
 					status & ISR_ERROR);
 			/* reset MAC */
-			hw->intr_mask &= ~ISR_ERROR;
-			AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
 			adapter->work_event |= ATL1C_WORK_EVENT_RESET;
 			schedule_work(&adapter->common_task);
-			break;
+			return IRQ_HANDLED;
 		}
 
 		if (status & ISR_OVER)
@@ -2303,7 +2364,6 @@
 	napi_disable(&adapter->napi);
 	atl1c_irq_disable(adapter);
 	atl1c_free_irq(adapter);
-	AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
 	/* reset MAC to disable all RX/TX */
 	atl1c_reset_mac(&adapter->hw);
 	msleep(1);
@@ -2387,79 +2447,68 @@
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 	struct atl1c_hw *hw = &adapter->hw;
-	u32 ctrl;
-	u32 mac_ctrl_data;
-	u32 master_ctrl_data;
+	u32 mac_ctrl_data = 0;
+	u32 master_ctrl_data = 0;
 	u32 wol_ctrl_data = 0;
-	u16 mii_bmsr_data;
-	u16 save_autoneg_advertised;
-	u16 mii_intr_status_data;
+	u16 mii_intr_status_data = 0;
 	u32 wufc = adapter->wol;
-	u32 i;
 	int retval = 0;
 
+	atl1c_disable_l0s_l1(hw);
 	if (netif_running(netdev)) {
 		WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
 		atl1c_down(adapter);
 	}
 	netif_device_detach(netdev);
-	atl1c_disable_l0s_l1(hw);
 	retval = pci_save_state(pdev);
 	if (retval)
 		return retval;
-	if (wufc) {
-		AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
-		master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
 
-		/* get link status */
-		atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
-		atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
-		save_autoneg_advertised = hw->autoneg_advertised;
-		hw->autoneg_advertised = ADVERTISED_10baseT_Half;
-		if (atl1c_restart_autoneg(hw) != 0)
-			if (netif_msg_link(adapter))
-				dev_warn(&pdev->dev, "phy autoneg failed\n");
-		hw->phy_configured = false; /* re-init PHY when resume */
-		hw->autoneg_advertised = save_autoneg_advertised;
+	if (wufc)
+		if (atl1c_phy_power_saving(hw) != 0)
+			dev_dbg(&pdev->dev, "phy power saving failed");
+
+	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+	AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
+
+	master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
+	mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
+	mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
+			MAC_CTRL_PRMLEN_MASK) <<
+			MAC_CTRL_PRMLEN_SHIFT);
+	mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
+	mac_ctrl_data &= ~MAC_CTRL_DUPLX;
+
+	if (wufc) {
+		mac_ctrl_data |= MAC_CTRL_RX_EN;
+		if (adapter->link_speed == SPEED_1000 ||
+			adapter->link_speed == SPEED_0) {
+			mac_ctrl_data |= atl1c_mac_speed_1000 <<
+					MAC_CTRL_SPEED_SHIFT;
+			mac_ctrl_data |= MAC_CTRL_DUPLX;
+		} else
+			mac_ctrl_data |= atl1c_mac_speed_10_100 <<
+					MAC_CTRL_SPEED_SHIFT;
+
+		if (adapter->link_duplex == DUPLEX_FULL)
+			mac_ctrl_data |= MAC_CTRL_DUPLX;
+
 		/* turn on magic packet wol */
 		if (wufc & AT_WUFC_MAG)
-			wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+			wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
 
 		if (wufc & AT_WUFC_LNKC) {
-			for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
-				msleep(100);
-				atl1c_read_phy_reg(hw, MII_BMSR,
-					(u16 *)&mii_bmsr_data);
-				if (mii_bmsr_data & BMSR_LSTATUS)
-					break;
-			}
-			if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
-				if (netif_msg_link(adapter))
-					dev_warn(&pdev->dev,
-						"%s: Link may change"
-						"when suspend\n",
-						atl1c_driver_name);
 			wol_ctrl_data |=  WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
 			/* only link up can wake up */
 			if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
-				if (netif_msg_link(adapter))
-					dev_err(&pdev->dev,
-						"%s: read write phy "
-						"register failed.\n",
-						atl1c_driver_name);
-				goto wol_dis;
+				dev_dbg(&pdev->dev, "%s: read write phy "
+						  "register failed.\n",
+						  atl1c_driver_name);
 			}
 		}
 		/* clear phy interrupt */
 		atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
 		/* Config MAC Ctrl register */
-		mac_ctrl_data = MAC_CTRL_RX_EN;
-		/* set to 10/100M halt duplex */
-		mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
-		mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
-				 MAC_CTRL_PRMLEN_MASK) <<
-				 MAC_CTRL_PRMLEN_SHIFT);
-
 		if (adapter->vlgrp)
 			mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
 
@@ -2467,37 +2516,30 @@
 		if (wufc & AT_WUFC_MAG)
 			mac_ctrl_data |= MAC_CTRL_BC_EN;
 
-		if (netif_msg_hw(adapter))
-			dev_dbg(&pdev->dev,
-				"%s: suspend MAC=0x%x\n",
-				atl1c_driver_name, mac_ctrl_data);
+		dev_dbg(&pdev->dev,
+			"%s: suspend MAC=0x%x\n",
+			atl1c_driver_name, mac_ctrl_data);
 		AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
 		AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
 		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
 
 		/* pcie patch */
-		AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
-		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
-		AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+		device_set_wakeup_enable(&pdev->dev, 1);
 
-		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-		goto suspend_exit;
+		AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
+			GPHY_CTRL_EXT_RESET);
+		pci_prepare_to_sleep(pdev);
+	} else {
+		AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
+		master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
+		mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
+		mac_ctrl_data |= MAC_CTRL_DUPLX;
+		AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
+		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+		AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+		hw->phy_configured = false; /* re-init PHY when resume */
+		pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
 	}
-wol_dis:
-
-	/* WOL disabled */
-	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
-
-	/* pcie patch */
-	AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
-	ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
-	AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
-
-	atl1c_phy_disable(hw);
-	hw->phy_configured = false; /* re-init PHY when resume */
-
-	pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-suspend_exit:
 
 	pci_disable_device(pdev);
 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -2516,9 +2558,19 @@
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
 	AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
+	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
+			ATL1C_PCIE_PHY_RESET);
 
 	atl1c_phy_reset(&adapter->hw);
 	atl1c_reset_mac(&adapter->hw);
+	atl1c_phy_init(&adapter->hw);
+
+#if 0
+	AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
+	pm_data &= ~PM_CTRLSTAT_PME_EN;
+	AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
+#endif
+
 	netif_device_attach(netdev);
 	if (netif_running(netdev))
 		atl1c_up(adapter);
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 146372f..9c0ddb2 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -436,8 +436,8 @@
 	__le16 buf_len;		/* Size of the receive buffer in host memory */
 	u16 coalese;		/* Update consumer index to host after the
 				 * reception of this frame */
-	/* __attribute__ ((packed)) is required */
-} __attribute__ ((packed));
+	/* __packed is required */
+} __packed;
 
 /*
  * The L1 transmit packet descriptor is comprised of four 32-bit words.
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 293f9c1..3d52538 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -150,9 +150,8 @@
 						unsigned long offset,
 						enum dma_data_direction dir)
 {
-	ssb_dma_sync_single_range_for_device(sdev, dma_base,
-					     offset & dma_desc_align_mask,
-					     dma_desc_sync_size, dir);
+	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
+				   dma_desc_sync_size, dir);
 }
 
 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
@@ -160,9 +159,8 @@
 					     unsigned long offset,
 					     enum dma_data_direction dir)
 {
-	ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
-					  offset & dma_desc_align_mask,
-					  dma_desc_sync_size, dir);
+	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
+				dma_desc_sync_size, dir);
 }
 
 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
@@ -608,10 +606,10 @@
 
 		BUG_ON(skb == NULL);
 
-		ssb_dma_unmap_single(bp->sdev,
-				     rp->mapping,
-				     skb->len,
-				     DMA_TO_DEVICE);
+		dma_unmap_single(bp->sdev->dma_dev,
+				 rp->mapping,
+				 skb->len,
+				 DMA_TO_DEVICE);
 		rp->skb = NULL;
 		dev_kfree_skb_irq(skb);
 	}
@@ -648,29 +646,29 @@
 	if (skb == NULL)
 		return -ENOMEM;
 
-	mapping = ssb_dma_map_single(bp->sdev, skb->data,
-				     RX_PKT_BUF_SZ,
-				     DMA_FROM_DEVICE);
+	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+				 RX_PKT_BUF_SZ,
+				 DMA_FROM_DEVICE);
 
 	/* Hardware bug work-around, the chip is unable to do PCI DMA
 	   to/from anything above 1GB :-( */
-	if (ssb_dma_mapping_error(bp->sdev, mapping) ||
+	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
 		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
 		/* Sigh... */
-		if (!ssb_dma_mapping_error(bp->sdev, mapping))
-			ssb_dma_unmap_single(bp->sdev, mapping,
+		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+			dma_unmap_single(bp->sdev->dma_dev, mapping,
 					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
 		dev_kfree_skb_any(skb);
 		skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
 		if (skb == NULL)
 			return -ENOMEM;
-		mapping = ssb_dma_map_single(bp->sdev, skb->data,
-					     RX_PKT_BUF_SZ,
-					     DMA_FROM_DEVICE);
-		if (ssb_dma_mapping_error(bp->sdev, mapping) ||
-			mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
-			if (!ssb_dma_mapping_error(bp->sdev, mapping))
-				ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+					 RX_PKT_BUF_SZ,
+					 DMA_FROM_DEVICE);
+		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
 			dev_kfree_skb_any(skb);
 			return -ENOMEM;
 		}
@@ -745,9 +743,9 @@
 					     dest_idx * sizeof(*dest_desc),
 					     DMA_BIDIRECTIONAL);
 
-	ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
-				       RX_PKT_BUF_SZ,
-				       DMA_FROM_DEVICE);
+	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
+				   RX_PKT_BUF_SZ,
+				   DMA_FROM_DEVICE);
 }
 
 static int b44_rx(struct b44 *bp, int budget)
@@ -767,9 +765,9 @@
 		struct rx_header *rh;
 		u16 len;
 
-		ssb_dma_sync_single_for_cpu(bp->sdev, map,
-					    RX_PKT_BUF_SZ,
-					    DMA_FROM_DEVICE);
+		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
+					RX_PKT_BUF_SZ,
+					DMA_FROM_DEVICE);
 		rh = (struct rx_header *) skb->data;
 		len = le16_to_cpu(rh->len);
 		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
@@ -801,8 +799,8 @@
 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
 			if (skb_size < 0)
 				goto drop_it;
-			ssb_dma_unmap_single(bp->sdev, map,
-					     skb_size, DMA_FROM_DEVICE);
+			dma_unmap_single(bp->sdev->dma_dev, map,
+					 skb_size, DMA_FROM_DEVICE);
 			/* Leave out rx_header */
 			skb_put(skb, len + RX_PKT_OFFSET);
 			skb_pull(skb, RX_PKT_OFFSET);
@@ -954,24 +952,24 @@
 		goto err_out;
 	}
 
-	mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
-	if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
 		struct sk_buff *bounce_skb;
 
 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
-		if (!ssb_dma_mapping_error(bp->sdev, mapping))
-			ssb_dma_unmap_single(bp->sdev, mapping, len,
+		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
 					     DMA_TO_DEVICE);
 
 		bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
 		if (!bounce_skb)
 			goto err_out;
 
-		mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
-					     len, DMA_TO_DEVICE);
-		if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
-			if (!ssb_dma_mapping_error(bp->sdev, mapping))
-				ssb_dma_unmap_single(bp->sdev, mapping,
+		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
+					 len, DMA_TO_DEVICE);
+		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+				dma_unmap_single(bp->sdev->dma_dev, mapping,
 						     len, DMA_TO_DEVICE);
 			dev_kfree_skb_any(bounce_skb);
 			goto err_out;
@@ -1068,8 +1066,8 @@
 
 		if (rp->skb == NULL)
 			continue;
-		ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
-				     DMA_FROM_DEVICE);
+		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
+				 DMA_FROM_DEVICE);
 		dev_kfree_skb_any(rp->skb);
 		rp->skb = NULL;
 	}
@@ -1080,8 +1078,8 @@
 
 		if (rp->skb == NULL)
 			continue;
-		ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
-				     DMA_TO_DEVICE);
+		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
+				 DMA_TO_DEVICE);
 		dev_kfree_skb_any(rp->skb);
 		rp->skb = NULL;
 	}
@@ -1103,14 +1101,12 @@
 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
 
 	if (bp->flags & B44_FLAG_RX_RING_HACK)
-		ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
-					       DMA_TABLE_BYTES,
-					       DMA_BIDIRECTIONAL);
+		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
+					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
 
 	if (bp->flags & B44_FLAG_TX_RING_HACK)
-		ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
-					       DMA_TABLE_BYTES,
-					       DMA_TO_DEVICE);
+		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
+					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
 
 	for (i = 0; i < bp->rx_pending; i++) {
 		if (b44_alloc_rx_skb(bp, -1, i) < 0)
@@ -1130,27 +1126,23 @@
 	bp->tx_buffers = NULL;
 	if (bp->rx_ring) {
 		if (bp->flags & B44_FLAG_RX_RING_HACK) {
-			ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
-					     DMA_TABLE_BYTES,
-					     DMA_BIDIRECTIONAL);
+			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
+					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
 			kfree(bp->rx_ring);
 		} else
-			ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
-						bp->rx_ring, bp->rx_ring_dma,
-						GFP_KERNEL);
+			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+					  bp->rx_ring, bp->rx_ring_dma);
 		bp->rx_ring = NULL;
 		bp->flags &= ~B44_FLAG_RX_RING_HACK;
 	}
 	if (bp->tx_ring) {
 		if (bp->flags & B44_FLAG_TX_RING_HACK) {
-			ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
-					     DMA_TABLE_BYTES,
-					     DMA_TO_DEVICE);
+			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
+					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
 			kfree(bp->tx_ring);
 		} else
-			ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
-						bp->tx_ring, bp->tx_ring_dma,
-						GFP_KERNEL);
+			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+					  bp->tx_ring, bp->tx_ring_dma);
 		bp->tx_ring = NULL;
 		bp->flags &= ~B44_FLAG_TX_RING_HACK;
 	}
@@ -1175,7 +1167,8 @@
 		goto out_err;
 
 	size = DMA_TABLE_BYTES;
-	bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
+	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+					 &bp->rx_ring_dma, gfp);
 	if (!bp->rx_ring) {
 		/* Allocation may have failed due to pci_alloc_consistent
 		   insisting on use of GFP_DMA, which is more restrictive
@@ -1187,11 +1180,11 @@
 		if (!rx_ring)
 			goto out_err;
 
-		rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
-						 DMA_TABLE_BYTES,
-						 DMA_BIDIRECTIONAL);
+		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
+					     DMA_TABLE_BYTES,
+					     DMA_BIDIRECTIONAL);
 
-		if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
+		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
 			rx_ring_dma + size > DMA_BIT_MASK(30)) {
 			kfree(rx_ring);
 			goto out_err;
@@ -1202,7 +1195,8 @@
 		bp->flags |= B44_FLAG_RX_RING_HACK;
 	}
 
-	bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
+	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+					 &bp->tx_ring_dma, gfp);
 	if (!bp->tx_ring) {
 		/* Allocation may have failed due to ssb_dma_alloc_consistent
 		   insisting on use of GFP_DMA, which is more restrictive
@@ -1214,11 +1208,11 @@
 		if (!tx_ring)
 			goto out_err;
 
-		tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
-			                    DMA_TABLE_BYTES,
-			                    DMA_TO_DEVICE);
+		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
+					     DMA_TABLE_BYTES,
+					     DMA_TO_DEVICE);
 
-		if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
+		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
 			tx_ring_dma + size > DMA_BIT_MASK(30)) {
 			kfree(tx_ring);
 			goto out_err;
@@ -2176,12 +2170,14 @@
 			"Failed to powerup the bus\n");
 		goto err_out_free_dev;
 	}
-	err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
-	if (err) {
+
+	if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
+	    dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
 		dev_err(sdev->dev,
 			"Required 30BIT DMA mask unsupported by the system\n");
 		goto err_out_powerdown;
 	}
+
 	err = b44_get_invariants(bp);
 	if (err) {
 		dev_err(sdev->dev,
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index b46be49..1a0d2d0 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -282,6 +282,7 @@
 	int link_speed;
 	u8 port_type;
 	u8 transceiver;
+	u8 autoneg;
 	u8 generation;		/* BladeEngine ASIC generation */
 	u32 flash_status;
 	struct completion flash_compl;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index b9ad799..344e062 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -25,6 +25,8 @@
 
 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+
+	wmb();
 	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
 }
 
@@ -186,7 +188,7 @@
 
 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 {
-	int cnt = 0, wait = 5;
+	int msecs = 0;
 	u32 ready;
 
 	do {
@@ -201,15 +203,14 @@
 		if (ready)
 			break;
 
-		if (cnt > 4000000) {
+		if (msecs > 4000) {
 			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
 			return -1;
 		}
 
-		if (cnt > 50)
-			wait = 200;
-		cnt += wait;
-		udelay(wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(msecs_to_jiffies(1));
+		msecs++;
 	} while (true);
 
 	return 0;
@@ -1694,3 +1695,38 @@
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
+
+int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_get_phy_info *req;
+	struct be_sge *sge;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = cmd->va;
+	sge = nonembedded_sgl(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+				OPCODE_COMMON_GET_PHY_DETAILS);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			OPCODE_COMMON_GET_PHY_DETAILS,
+			sizeof(*req));
+
+	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
+	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
+	sge->len = cpu_to_le32(cmd->size);
+
+	status = be_mcc_notify_wait(adapter);
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 763dc19..912a058 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -144,6 +144,7 @@
 #define OPCODE_COMMON_ENABLE_DISABLE_BEACON		69
 #define OPCODE_COMMON_GET_BEACON_STATE			70
 #define OPCODE_COMMON_READ_TRANSRECV_DATA		73
+#define OPCODE_COMMON_GET_PHY_DETAILS			102
 
 #define OPCODE_ETH_ACPI_CONFIG				2
 #define OPCODE_ETH_PROMISCUOUS				3
@@ -869,6 +870,30 @@
 	u8 seeprom_data[BE_READ_SEEPROM_LEN];
 };
 
+enum {
+	PHY_TYPE_CX4_10GB = 0,
+	PHY_TYPE_XFP_10GB,
+	PHY_TYPE_SFP_1GB,
+	PHY_TYPE_SFP_PLUS_10GB,
+	PHY_TYPE_KR_10GB,
+	PHY_TYPE_KX4_10GB,
+	PHY_TYPE_BASET_10GB,
+	PHY_TYPE_BASET_1GB,
+	PHY_TYPE_DISABLED = 255
+};
+
+struct be_cmd_req_get_phy_info {
+	struct be_cmd_req_hdr hdr;
+	u8 rsvd0[24];
+};
+struct be_cmd_resp_get_phy_info {
+	struct be_cmd_req_hdr hdr;
+	u16 phy_type;
+	u16 interface_type;
+	u32 misc_params;
+	u32 future_use[4];
+};
+
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -947,4 +972,6 @@
 				struct be_dma_mem *nonemb_cmd);
 extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
 				u8 loopback_type, u8 enable);
+extern int be_cmd_get_phy_info(struct be_adapter *adapter,
+		struct be_dma_mem *cmd);
 
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 200e985..c0ade24 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -314,10 +314,13 @@
 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
-	u8 mac_speed = 0, connector = 0;
+	struct be_dma_mem phy_cmd;
+	struct be_cmd_resp_get_phy_info *resp;
+	u8 mac_speed = 0;
 	u16 link_speed = 0;
 	bool link_up = false;
 	int status;
+	u16 intf_type;
 
 	if (adapter->link_speed < 0) {
 		status = be_cmd_link_status_query(adapter, &link_up,
@@ -337,40 +340,57 @@
 			}
 		}
 
-		status = be_cmd_read_port_type(adapter, adapter->port_num,
-						&connector);
+		phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
+		phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
+					&phy_cmd.dma);
+		if (!phy_cmd.va) {
+			dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+			return -ENOMEM;
+		}
+		status = be_cmd_get_phy_info(adapter, &phy_cmd);
 		if (!status) {
-			switch (connector) {
-			case 7:
+			resp = (struct be_cmd_resp_get_phy_info *) phy_cmd.va;
+			intf_type = le16_to_cpu(resp->interface_type);
+
+			switch (intf_type) {
+			case PHY_TYPE_XFP_10GB:
+			case PHY_TYPE_SFP_1GB:
+			case PHY_TYPE_SFP_PLUS_10GB:
 				ecmd->port = PORT_FIBRE;
-				ecmd->transceiver = XCVR_EXTERNAL;
-				break;
-			case 0:
-				ecmd->port = PORT_TP;
-				ecmd->transceiver = XCVR_EXTERNAL;
 				break;
 			default:
 				ecmd->port = PORT_TP;
-				ecmd->transceiver = XCVR_INTERNAL;
 				break;
 			}
-		} else {
-			ecmd->port = PORT_AUI;
+
+			switch (intf_type) {
+			case PHY_TYPE_KR_10GB:
+			case PHY_TYPE_KX4_10GB:
+				ecmd->autoneg = AUTONEG_ENABLE;
 			ecmd->transceiver = XCVR_INTERNAL;
+				break;
+			default:
+				ecmd->autoneg = AUTONEG_DISABLE;
+				ecmd->transceiver = XCVR_EXTERNAL;
+				break;
+			}
 		}
 
 		/* Save for future use */
 		adapter->link_speed = ecmd->speed;
 		adapter->port_type = ecmd->port;
 		adapter->transceiver = ecmd->transceiver;
+		adapter->autoneg = ecmd->autoneg;
+		pci_free_consistent(adapter->pdev, phy_cmd.size,
+					phy_cmd.va, phy_cmd.dma);
 	} else {
 		ecmd->speed = adapter->link_speed;
 		ecmd->port = adapter->port_type;
 		ecmd->transceiver = adapter->transceiver;
+		ecmd->autoneg = adapter->autoneg;
 	}
 
 	ecmd->duplex = DUPLEX_FULL;
-	ecmd->autoneg = AUTONEG_DISABLE;
 	ecmd->phy_address = adapter->port_num;
 	switch (ecmd->port) {
 	case PORT_FIBRE:
@@ -384,6 +404,13 @@
 		break;
 	}
 
+	if (ecmd->autoneg) {
+		ecmd->supported |= SUPPORTED_1000baseT_Full;
+		ecmd->supported |= SUPPORTED_Autoneg;
+		ecmd->advertising |= (ADVERTISED_10000baseT_Full |
+				ADVERTISED_1000baseT_Full);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 063026d..0683967 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -192,7 +192,7 @@
 	u8 event;
 	u8 crc;
 	u8 forward;
-	u8 ipsec;
+	u8 lso6;
 	u8 mgmt;
 	u8 ipcs;
 	u8 udpcs;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 54b1427..b636879 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -89,6 +89,8 @@
 	u32 val = 0;
 	val |= qid & DB_RQ_RING_ID_MASK;
 	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
+
+	wmb();
 	iowrite32(val, adapter->db + DB_RQ_OFFSET);
 }
 
@@ -97,6 +99,8 @@
 	u32 val = 0;
 	val |= qid & DB_TXULP_RING_ID_MASK;
 	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
+
+	wmb();
 	iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
 }
 
@@ -373,10 +377,12 @@
 
 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
 
-	if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
+	if (skb_is_gso(skb)) {
 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
 			hdr, skb_shinfo(skb)->gso_size);
+		if (skb_is_gso_v6(skb))
+			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		if (is_tcp_pkt(skb))
 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -971,6 +977,7 @@
 	if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
 		return NULL;
 
+	rmb();
 	be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
 
 	queue_tail_inc(&adapter->rx_obj.cq);
@@ -1064,6 +1071,7 @@
 	if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
 		return NULL;
 
+	rmb();
 	be_dws_le_to_cpu(txcp, sizeof(*txcp));
 
 	txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
@@ -1111,6 +1119,7 @@
 	if (!eqe->evt)
 		return NULL;
 
+	rmb();
 	eqe->evt = le32_to_cpu(eqe->evt);
 	queue_tail_inc(&eq_obj->q);
 	return eqe;
@@ -1735,6 +1744,44 @@
 	adapter->isr_registered = false;
 }
 
+static int be_close(struct net_device *netdev)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+	struct be_eq_obj *rx_eq = &adapter->rx_eq;
+	struct be_eq_obj *tx_eq = &adapter->tx_eq;
+	int vec;
+
+	cancel_delayed_work_sync(&adapter->work);
+
+	be_async_mcc_disable(adapter);
+
+	netif_stop_queue(netdev);
+	netif_carrier_off(netdev);
+	adapter->link_up = false;
+
+	be_intr_set(adapter, false);
+
+	if (adapter->msix_enabled) {
+		vec = be_msix_vec_get(adapter, tx_eq->q.id);
+		synchronize_irq(vec);
+		vec = be_msix_vec_get(adapter, rx_eq->q.id);
+		synchronize_irq(vec);
+	} else {
+		synchronize_irq(netdev->irq);
+	}
+	be_irq_unregister(adapter);
+
+	napi_disable(&rx_eq->napi);
+	napi_disable(&tx_eq->napi);
+
+	/* Wait for all pending tx completions to arrive so that
+	 * all tx skbs are freed.
+	 */
+	be_tx_compl_clean(adapter);
+
+	return 0;
+}
+
 static int be_open(struct net_device *netdev)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
@@ -1765,27 +1812,29 @@
 	/* Now that interrupts are on we can process async mcc */
 	be_async_mcc_enable(adapter);
 
+	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+
 	status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
 			&link_speed);
 	if (status)
-		goto ret_sts;
+		goto err;
 	be_link_status_update(adapter, link_up);
 
-	if (be_physfn(adapter))
-		status = be_vid_config(adapter);
-	if (status)
-		goto ret_sts;
-
 	if (be_physfn(adapter)) {
+		status = be_vid_config(adapter);
+		if (status)
+			goto err;
+
 		status = be_cmd_set_flow_control(adapter,
 				adapter->tx_fc, adapter->rx_fc);
 		if (status)
-			goto ret_sts;
+			goto err;
 	}
 
-	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-ret_sts:
-	return status;
+	return 0;
+err:
+	be_close(adapter->netdev);
+	return -EIO;
 }
 
 static int be_setup_wol(struct be_adapter *adapter, bool enable)
@@ -1913,43 +1962,6 @@
 	return 0;
 }
 
-static int be_close(struct net_device *netdev)
-{
-	struct be_adapter *adapter = netdev_priv(netdev);
-	struct be_eq_obj *rx_eq = &adapter->rx_eq;
-	struct be_eq_obj *tx_eq = &adapter->tx_eq;
-	int vec;
-
-	cancel_delayed_work_sync(&adapter->work);
-
-	be_async_mcc_disable(adapter);
-
-	netif_stop_queue(netdev);
-	netif_carrier_off(netdev);
-	adapter->link_up = false;
-
-	be_intr_set(adapter, false);
-
-	if (adapter->msix_enabled) {
-		vec = be_msix_vec_get(adapter, tx_eq->q.id);
-		synchronize_irq(vec);
-		vec = be_msix_vec_get(adapter, rx_eq->q.id);
-		synchronize_irq(vec);
-	} else {
-		synchronize_irq(netdev->irq);
-	}
-	be_irq_unregister(adapter);
-
-	napi_disable(&rx_eq->napi);
-	napi_disable(&tx_eq->napi);
-
-	/* Wait for all pending tx completions to arrive so that
-	 * all tx skbs are freed.
-	 */
-	be_tx_compl_clean(adapter);
-
-	return 0;
-}
 
 #define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
 char flash_cookie[2][16] =	{"*** SE FLAS",
@@ -2183,7 +2195,7 @@
 
 	netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
 		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
-		NETIF_F_GRO;
+		NETIF_F_GRO | NETIF_F_TSO6;
 
 	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
 
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 368f333..012613f 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -922,61 +922,73 @@
 # define bfin_tx_hwtstamp(dev, skb)
 #endif
 
-static void adjust_tx_list(void)
+static inline void _tx_reclaim_skb(void)
 {
-	int timeout_cnt = MAX_TIMEOUT_CNT;
-
-	if (tx_list_head->status.status_word != 0 &&
-	    current_tx_ptr != tx_list_head) {
-		goto adjust_head;	/* released something, just return; */
-	}
-
-	/*
-	 * if nothing released, check wait condition
-	 * current's next can not be the head,
-	 * otherwise the dma will not stop as we want
-	 */
-	if (current_tx_ptr->next->next == tx_list_head) {
-		while (tx_list_head->status.status_word == 0) {
-			udelay(10);
-			if (tx_list_head->status.status_word != 0 ||
-			    !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
-				goto adjust_head;
-			}
-			if (timeout_cnt-- < 0) {
-				printk(KERN_ERR DRV_NAME
-				": wait for adjust tx list head timeout\n");
-				break;
-			}
-		}
-		if (tx_list_head->status.status_word != 0) {
-			goto adjust_head;
-		}
-	}
-
-	return;
-
-adjust_head:
 	do {
 		tx_list_head->desc_a.config &= ~DMAEN;
 		tx_list_head->status.status_word = 0;
 		if (tx_list_head->skb) {
 			dev_kfree_skb(tx_list_head->skb);
 			tx_list_head->skb = NULL;
-		} else {
-			printk(KERN_ERR DRV_NAME
-			       ": no sk_buff in a transmitted frame!\n");
 		}
 		tx_list_head = tx_list_head->next;
-	} while (tx_list_head->status.status_word != 0 &&
-		 current_tx_ptr != tx_list_head);
-	return;
 
+	} while (tx_list_head->status.status_word != 0);
+}
+
+static void tx_reclaim_skb(struct bfin_mac_local *lp)
+{
+	int timeout_cnt = MAX_TIMEOUT_CNT;
+
+	if (tx_list_head->status.status_word != 0)
+		_tx_reclaim_skb();
+
+	if (current_tx_ptr->next == tx_list_head) {
+		while (tx_list_head->status.status_word == 0) {
+			/* slow down polling to avoid too many queue stop. */
+			udelay(10);
+			/* reclaim skb if DMA is not running. */
+			if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
+				break;
+			if (timeout_cnt-- < 0)
+				break;
+		}
+
+		if (timeout_cnt >= 0)
+			_tx_reclaim_skb();
+		else
+			netif_stop_queue(lp->ndev);
+	}
+
+	if (current_tx_ptr->next != tx_list_head &&
+		netif_queue_stopped(lp->ndev))
+		netif_wake_queue(lp->ndev);
+
+	if (tx_list_head != current_tx_ptr) {
+		/* shorten the timer interval if tx queue is stopped */
+		if (netif_queue_stopped(lp->ndev))
+			lp->tx_reclaim_timer.expires =
+				jiffies + (TX_RECLAIM_JIFFIES >> 4);
+		else
+			lp->tx_reclaim_timer.expires =
+				jiffies + TX_RECLAIM_JIFFIES;
+
+		mod_timer(&lp->tx_reclaim_timer,
+			lp->tx_reclaim_timer.expires);
+	}
+
+	return;
+}
+
+static void tx_reclaim_skb_timeout(unsigned long lp)
+{
+	tx_reclaim_skb((struct bfin_mac_local *)lp);
 }
 
 static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
 				struct net_device *dev)
 {
+	struct bfin_mac_local *lp = netdev_priv(dev);
 	u16 *data;
 	u32 data_align = (unsigned long)(skb->data) & 0x3;
 	union skb_shared_tx *shtx = skb_tx(skb);
@@ -1009,8 +1021,6 @@
 			skb->len);
 		current_tx_ptr->desc_a.start_addr =
 			(u32)current_tx_ptr->packet;
-		if (current_tx_ptr->status.status_word != 0)
-			current_tx_ptr->status.status_word = 0;
 		blackfin_dcache_flush_range(
 			(u32)current_tx_ptr->packet,
 			(u32)(current_tx_ptr->packet + skb->len + 2));
@@ -1022,6 +1032,9 @@
 	 */
 	SSYNC();
 
+	/* always clear status buffer before start tx dma */
+	current_tx_ptr->status.status_word = 0;
+
 	/* enable this packet's dma */
 	current_tx_ptr->desc_a.config |= DMAEN;
 
@@ -1037,13 +1050,14 @@
 	bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
 
 out:
-	adjust_tx_list();
-
 	bfin_tx_hwtstamp(dev, skb);
 
 	current_tx_ptr = current_tx_ptr->next;
 	dev->stats.tx_packets++;
 	dev->stats.tx_bytes += (skb->len);
+
+	tx_reclaim_skb(lp);
+
 	return NETDEV_TX_OK;
 }
 
@@ -1167,8 +1181,11 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void bfin_mac_poll(struct net_device *dev)
 {
+	struct bfin_mac_local *lp = netdev_priv(dev);
+
 	disable_irq(IRQ_MAC_RX);
 	bfin_mac_interrupt(IRQ_MAC_RX, dev);
+	tx_reclaim_skb(lp);
 	enable_irq(IRQ_MAC_RX);
 }
 #endif				/* CONFIG_NET_POLL_CONTROLLER */
@@ -1232,12 +1249,27 @@
 /* Our watchdog timed out. Called by the networking layer */
 static void bfin_mac_timeout(struct net_device *dev)
 {
+	struct bfin_mac_local *lp = netdev_priv(dev);
+
 	pr_debug("%s: %s\n", dev->name, __func__);
 
 	bfin_mac_disable();
 
-	/* reset tx queue */
-	tx_list_tail = tx_list_head->next;
+	del_timer(&lp->tx_reclaim_timer);
+
+	/* reset tx queue and free skb */
+	while (tx_list_head != current_tx_ptr) {
+		tx_list_head->desc_a.config &= ~DMAEN;
+		tx_list_head->status.status_word = 0;
+		if (tx_list_head->skb) {
+			dev_kfree_skb(tx_list_head->skb);
+			tx_list_head->skb = NULL;
+		}
+		tx_list_head = tx_list_head->next;
+	}
+
+	if (netif_queue_stopped(lp->ndev))
+		netif_wake_queue(lp->ndev);
 
 	bfin_mac_enable();
 
@@ -1430,6 +1462,7 @@
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 	platform_set_drvdata(pdev, ndev);
 	lp = netdev_priv(ndev);
+	lp->ndev = ndev;
 
 	/* Grab the MAC address in the MAC */
 	*(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
@@ -1485,6 +1518,10 @@
 	ndev->netdev_ops = &bfin_mac_netdev_ops;
 	ndev->ethtool_ops = &bfin_mac_ethtool_ops;
 
+	init_timer(&lp->tx_reclaim_timer);
+	lp->tx_reclaim_timer.data = (unsigned long)lp;
+	lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
+
 	spin_lock_init(&lp->lock);
 
 	/* now, enable interrupts */
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 1ae7b82..04e4050 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -13,9 +13,12 @@
 #include <linux/net_tstamp.h>
 #include <linux/clocksource.h>
 #include <linux/timecompare.h>
+#include <linux/timer.h>
 
 #define BFIN_MAC_CSUM_OFFLOAD
 
+#define TX_RECLAIM_JIFFIES (HZ / 5)
+
 struct dma_descriptor {
 	struct dma_descriptor *next_dma_desc;
 	unsigned long start_addr;
@@ -68,6 +71,8 @@
 
 	int wol;		/* Wake On Lan */
 	int irq_wake_requested;
+	struct timer_list tx_reclaim_timer;
+	struct net_device *ndev;
 
 	/* MII and PHY stuffs */
 	int old_link;          /* used by bf537_adjust_link */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 1174322..22fa1e9 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,8 +58,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME		"bnx2"
-#define DRV_MODULE_VERSION	"2.0.15"
-#define DRV_MODULE_RELDATE	"May 4, 2010"
+#define DRV_MODULE_VERSION	"2.0.16"
+#define DRV_MODULE_RELDATE	"July 2, 2010"
 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-5.0.0.j6.fw"
 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-5.0.0.j15.fw"
@@ -1446,7 +1446,8 @@
 static void
 bnx2_enable_forced_2g5(struct bnx2 *bp)
 {
-	u32 bmcr;
+	u32 uninitialized_var(bmcr);
+	int err;
 
 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
 		return;
@@ -1456,22 +1457,28 @@
 
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
-		bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
-		val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
-		val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
-		bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
+			val |= MII_BNX2_SD_MISC1_FORCE |
+				MII_BNX2_SD_MISC1_FORCE_2_5G;
+			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		}
 
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
-		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
-		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
-		bmcr |= BCM5708S_BMCR_FORCE_2500;
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (!err)
+			bmcr |= BCM5708S_BMCR_FORCE_2500;
 	} else {
 		return;
 	}
 
+	if (err)
+		return;
+
 	if (bp->autoneg & AUTONEG_SPEED) {
 		bmcr &= ~BMCR_ANENABLE;
 		if (bp->req_duplex == DUPLEX_FULL)
@@ -1483,7 +1490,8 @@
 static void
 bnx2_disable_forced_2g5(struct bnx2 *bp)
 {
-	u32 bmcr;
+	u32 uninitialized_var(bmcr);
+	int err;
 
 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
 		return;
@@ -1493,21 +1501,26 @@
 
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
-		bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
-		val &= ~MII_BNX2_SD_MISC1_FORCE;
-		bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+			val &= ~MII_BNX2_SD_MISC1_FORCE;
+			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		}
 
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
-		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
-		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
-		bmcr &= ~BCM5708S_BMCR_FORCE_2500;
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (!err)
+			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
 	} else {
 		return;
 	}
 
+	if (err)
+		return;
+
 	if (bp->autoneg & AUTONEG_SPEED)
 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
@@ -3206,6 +3219,10 @@
 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
 				skb->ip_summed = CHECKSUM_UNNECESSARY;
 		}
+		if ((bp->dev->features & NETIF_F_RXHASH) &&
+		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
+		     L2_FHDR_STATUS_USE_RXHASH))
+			skb->rxhash = rx_hdr->l2_fhdr_hash;
 
 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
 
@@ -6172,7 +6189,7 @@
 	bp->irq_nvecs = 1;
 	bp->irq_tbl[0].vector = bp->pdev->irq;
 
-	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
+	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
 		bnx2_enable_msix(bp, msix_vecs);
 
 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
@@ -6296,9 +6313,14 @@
 bnx2_dump_state(struct bnx2 *bp)
 {
 	struct net_device *dev = bp->dev;
-	u32 mcp_p0, mcp_p1;
+	u32 mcp_p0, mcp_p1, val1, val2;
 
-	netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
+	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
+	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
+		   atomic_read(&bp->intr_sem), val1);
+	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
+	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
+	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
 		   REG_RD(bp, BNX2_EMAC_TX_STATUS),
 		   REG_RD(bp, BNX2_EMAC_RX_STATUS));
@@ -7545,6 +7567,12 @@
 		return (ethtool_op_set_tx_csum(dev, data));
 }
 
+static int
+bnx2_set_flags(struct net_device *dev, u32 data)
+{
+	return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
+}
+
 static const struct ethtool_ops bnx2_ethtool_ops = {
 	.get_settings		= bnx2_get_settings,
 	.set_settings		= bnx2_set_settings,
@@ -7574,6 +7602,8 @@
 	.phys_id		= bnx2_phys_id,
 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
 	.get_sset_count		= bnx2_get_sset_count,
+	.set_flags		= bnx2_set_flags,
+	.get_flags		= ethtool_op_get_flags,
 };
 
 /* Called with rtnl_lock */
@@ -8320,7 +8350,8 @@
 	memcpy(dev->dev_addr, bp->mac_addr, 6);
 	memcpy(dev->perm_addr, bp->mac_addr, 6);
 
-	dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
+	dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
+			 NETIF_F_RXHASH;
 	vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
 		dev->features |= NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index ddaa3fc..b9af6bc 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -295,6 +295,9 @@
 		#define L2_FHDR_ERRORS_TCP_XSUM		(1<<28)
 		#define L2_FHDR_ERRORS_UDP_XSUM		(1<<31)
 
+		#define L2_FHDR_STATUS_USE_RXHASH	\
+			(L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_RSS_HASH)
+
 	u32 l2_fhdr_hash;
 #if defined(__BIG_ENDIAN)
 	u16 l2_fhdr_pkt_len;
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ff70be8..0383e30 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -4266,14 +4266,16 @@
 					       MDIO_PMA_REG_10G_CTRL2, 0x0008);
 			}
 
-			/* Set 2-wire transfer rate to 400Khz since 100Khz
-			is not operational */
+			/* Set 2-wire transfer rate of SFP+ module EEPROM
+			 * to 100Khz since some DACs(direct attached cables) do
+			 * not work at 400Khz.
+			 */
 			bnx2x_cl45_write(bp, params->port,
 				       ext_phy_type,
 				       ext_phy_addr,
 				       MDIO_PMA_DEVAD,
 				       MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
-				       0xa101);
+				       0xa001);
 
 			/* Set TX PreEmphasis if needed */
 			if ((params->feature_config_flags &
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 57ff5b3..51b7883 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1545,6 +1545,20 @@
 	   fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
 }
 
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+					struct sk_buff *skb)
+{
+	/* Set Toeplitz hash from CQE */
+	if ((bp->dev->features & NETIF_F_RXHASH) &&
+	    (cqe->fast_path_cqe.status_flags &
+	     ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+		skb->rxhash =
+		le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+}
+
 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
 	struct bnx2x *bp = fp->bp;
@@ -1582,7 +1596,7 @@
 		struct sw_rx_bd *rx_buf = NULL;
 		struct sk_buff *skb;
 		union eth_rx_cqe *cqe;
-		u8 cqe_fp_flags, cqe_fp_status_flags;
+		u8 cqe_fp_flags;
 		u16 len, pad;
 
 		comp_ring_cons = RCQ_BD(sw_comp_cons);
@@ -1598,7 +1612,6 @@
 
 		cqe = &fp->rx_comp_ring[comp_ring_cons];
 		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
-		cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
 
 		DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
 		   "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
@@ -1634,6 +1647,10 @@
 
 					bnx2x_tpa_start(fp, queue, skb,
 							bd_cons, bd_prod);
+
+					/* Set Toeplitz hash for an LRO skb */
+					bnx2x_set_skb_rxhash(bp, cqe, skb);
+
 					goto next_rx;
 				}
 
@@ -1726,11 +1743,8 @@
 
 			skb->protocol = eth_type_trans(skb, bp->dev);
 
-			if ((bp->dev->features & NETIF_F_RXHASH) &&
-			    (cqe_fp_status_flags &
-			     ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
-				skb->rxhash = le32_to_cpu(
-				    cqe->fast_path_cqe.rss_hash_result);
+			/* Set Toeplitz hash for a none-LRO skb */
+			bnx2x_set_skb_rxhash(bp, cqe, skb);
 
 			skb->ip_summed = CHECKSUM_NONE;
 			if (bp->rx_csum) {
@@ -10982,6 +10996,9 @@
 	int changed = 0;
 	int rc = 0;
 
+	if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
+		return -EINVAL;
+
 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
 		return -EAGAIN;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index df48307..3662d6e 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -233,34 +233,27 @@
 	_unlock_tx_hashtbl(bond);
 }
 
+static long long compute_gap(struct slave *slave)
+{
+	return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
+	       (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
+}
+
 /* Caller must hold bond lock for read */
 static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 {
 	struct slave *slave, *least_loaded;
-	s64 max_gap;
-	int i, found = 0;
+	long long max_gap;
+	int i;
 
-	/* Find the first enabled slave */
-	bond_for_each_slave(bond, slave, i) {
-		if (SLAVE_IS_OK(slave)) {
-			found = 1;
-			break;
-		}
-	}
-
-	if (!found) {
-		return NULL;
-	}
-
-	least_loaded = slave;
-	max_gap = (s64)(slave->speed << 20) - /* Convert to Megabit per sec */
-			(s64)(SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
+	least_loaded = NULL;
+	max_gap = LLONG_MIN;
 
 	/* Find the slave with the largest gap */
-	bond_for_each_slave_from(bond, slave, i, least_loaded) {
+	bond_for_each_slave(bond, slave, i) {
 		if (SLAVE_IS_OK(slave)) {
-			s64 gap = (s64)(slave->speed << 20) -
-					(s64)(SLAVE_TLB_INFO(slave).load << 3);
+			long long gap = compute_gap(slave);
+
 			if (max_gap < gap) {
 				least_loaded = slave;
 				max_gap = gap;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c3d98dd..8228088 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -90,6 +90,7 @@
 #define BOND_LINK_ARP_INTERV	0
 
 static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
+static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
 static int num_grat_arp = 1;
 static int num_unsol_na = 1;
 static int miimon	= BOND_LINK_MON_INTERV;
@@ -106,10 +107,13 @@
 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
 static char *arp_validate;
 static char *fail_over_mac;
+static int all_slaves_active = 0;
 static struct bond_params bonding_defaults;
 
 module_param(max_bonds, int, 0);
 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
+module_param(tx_queues, int, 0);
+MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
 module_param(num_grat_arp, int, 0644);
 MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
 module_param(num_unsol_na, int, 0644);
@@ -155,6 +159,10 @@
 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
 module_param(fail_over_mac, charp, 0);
 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC.  none (default), active or follow");
+module_param(all_slaves_active, int, 0);
+MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
+				     "by setting active flag for all slaves.  "
+				     "0 for never (default), 1 for always.");
 
 /*----------------------------- Global variables ----------------------------*/
 
@@ -1522,16 +1530,32 @@
 		}
 	}
 
+	/* If this is the first slave, then we need to set the master's hardware
+	 * address to be the same as the slave's. */
+	if (bond->slave_cnt == 0)
+		memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
+		       slave_dev->addr_len);
+
+
 	new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
 	if (!new_slave) {
 		res = -ENOMEM;
 		goto err_undo_flags;
 	}
 
-	/* save slave's original flags before calling
-	 * netdev_set_master and dev_open
+	/*
+	 * Set the new_slave's queue_id to be zero.  Queue ID mapping
+	 * is set via sysfs or module option if desired.
 	 */
-	new_slave->original_flags = slave_dev->flags;
+	new_slave->queue_id = 0;
+
+	/* Save slave's original mtu and then set it to match the bond */
+	new_slave->original_mtu = slave_dev->mtu;
+	res = dev_set_mtu(slave_dev, bond->dev->mtu);
+	if (res) {
+		pr_debug("Error %d calling dev_set_mtu\n", res);
+		goto err_free;
+	}
 
 	/*
 	 * Save slave's original ("permanent") mac address for modes
@@ -1550,7 +1574,7 @@
 		res = dev_set_mac_address(slave_dev, &addr);
 		if (res) {
 			pr_debug("Error %d calling set_mac_address\n", res);
-			goto err_free;
+			goto err_restore_mtu;
 		}
 	}
 
@@ -1793,6 +1817,9 @@
 		dev_set_mac_address(slave_dev, &addr);
 	}
 
+err_restore_mtu:
+	dev_set_mtu(slave_dev, new_slave->original_mtu);
+
 err_free:
 	kfree(new_slave);
 
@@ -1980,6 +2007,8 @@
 		dev_set_mac_address(slave_dev, &addr);
 	}
 
+	dev_set_mtu(slave_dev, slave->original_mtu);
+
 	slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
 				   IFF_SLAVE_INACTIVE | IFF_BONDING |
 				   IFF_SLAVE_NEEDARP);
@@ -2566,7 +2595,7 @@
 		/*
 		 * This target is not on a VLAN
 		 */
-		if (rt->u.dst.dev == bond->dev) {
+		if (rt->dst.dev == bond->dev) {
 			ip_rt_put(rt);
 			pr_debug("basa: rtdev == bond->dev: arp_send\n");
 			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
@@ -2577,7 +2606,7 @@
 		vlan_id = 0;
 		list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
 			vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
-			if (vlan_dev == rt->u.dst.dev) {
+			if (vlan_dev == rt->dst.dev) {
 				vlan_id = vlan->vlan_id;
 				pr_debug("basa: vlan match on %s %d\n",
 				       vlan_dev->name, vlan_id);
@@ -2595,7 +2624,7 @@
 		if (net_ratelimit()) {
 			pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
 				   bond->dev->name, &fl.fl4_dst,
-				   rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
+				   rt->dst.dev ? rt->dst.dev->name : "NULL");
 		}
 		ip_rt_put(rt);
 	}
@@ -3276,6 +3305,7 @@
 		else
 			seq_puts(seq, "Aggregator ID: N/A\n");
 	}
+	seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
 }
 
 static int bond_info_seq_show(struct seq_file *seq, void *v)
@@ -3785,50 +3815,49 @@
 	return 0;
 }
 
-static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
+static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+						struct rtnl_link_stats64 *stats)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
-	struct net_device_stats *stats = &bond_dev->stats;
-	struct net_device_stats local_stats;
+	struct rtnl_link_stats64 temp;
 	struct slave *slave;
 	int i;
 
-	memset(&local_stats, 0, sizeof(struct net_device_stats));
+	memset(stats, 0, sizeof(*stats));
 
 	read_lock_bh(&bond->lock);
 
 	bond_for_each_slave(bond, slave, i) {
-		const struct net_device_stats *sstats = dev_get_stats(slave->dev);
+		const struct rtnl_link_stats64 *sstats =
+			dev_get_stats(slave->dev, &temp);
 
-		local_stats.rx_packets += sstats->rx_packets;
-		local_stats.rx_bytes += sstats->rx_bytes;
-		local_stats.rx_errors += sstats->rx_errors;
-		local_stats.rx_dropped += sstats->rx_dropped;
+		stats->rx_packets += sstats->rx_packets;
+		stats->rx_bytes += sstats->rx_bytes;
+		stats->rx_errors += sstats->rx_errors;
+		stats->rx_dropped += sstats->rx_dropped;
 
-		local_stats.tx_packets += sstats->tx_packets;
-		local_stats.tx_bytes += sstats->tx_bytes;
-		local_stats.tx_errors += sstats->tx_errors;
-		local_stats.tx_dropped += sstats->tx_dropped;
+		stats->tx_packets += sstats->tx_packets;
+		stats->tx_bytes += sstats->tx_bytes;
+		stats->tx_errors += sstats->tx_errors;
+		stats->tx_dropped += sstats->tx_dropped;
 
-		local_stats.multicast += sstats->multicast;
-		local_stats.collisions += sstats->collisions;
+		stats->multicast += sstats->multicast;
+		stats->collisions += sstats->collisions;
 
-		local_stats.rx_length_errors += sstats->rx_length_errors;
-		local_stats.rx_over_errors += sstats->rx_over_errors;
-		local_stats.rx_crc_errors += sstats->rx_crc_errors;
-		local_stats.rx_frame_errors += sstats->rx_frame_errors;
-		local_stats.rx_fifo_errors += sstats->rx_fifo_errors;
-		local_stats.rx_missed_errors += sstats->rx_missed_errors;
+		stats->rx_length_errors += sstats->rx_length_errors;
+		stats->rx_over_errors += sstats->rx_over_errors;
+		stats->rx_crc_errors += sstats->rx_crc_errors;
+		stats->rx_frame_errors += sstats->rx_frame_errors;
+		stats->rx_fifo_errors += sstats->rx_fifo_errors;
+		stats->rx_missed_errors += sstats->rx_missed_errors;
 
-		local_stats.tx_aborted_errors += sstats->tx_aborted_errors;
-		local_stats.tx_carrier_errors += sstats->tx_carrier_errors;
-		local_stats.tx_fifo_errors += sstats->tx_fifo_errors;
-		local_stats.tx_heartbeat_errors += sstats->tx_heartbeat_errors;
-		local_stats.tx_window_errors += sstats->tx_window_errors;
+		stats->tx_aborted_errors += sstats->tx_aborted_errors;
+		stats->tx_carrier_errors += sstats->tx_carrier_errors;
+		stats->tx_fifo_errors += sstats->tx_fifo_errors;
+		stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
+		stats->tx_window_errors += sstats->tx_window_errors;
 	}
 
-	memcpy(stats, &local_stats, sizeof(struct net_device_stats));
-
 	read_unlock_bh(&bond->lock);
 
 	return stats;
@@ -4412,9 +4441,59 @@
 	}
 }
 
+/*
+ * Lookup the slave that corresponds to a qid
+ */
+static inline int bond_slave_override(struct bonding *bond,
+				      struct sk_buff *skb)
+{
+	int i, res = 1;
+	struct slave *slave = NULL;
+	struct slave *check_slave;
+
+	read_lock(&bond->lock);
+
+	if (!BOND_IS_OK(bond) || !skb->queue_mapping)
+		goto out;
+
+	/* Find out if any slaves have the same mapping as this skb. */
+	bond_for_each_slave(bond, check_slave, i) {
+		if (check_slave->queue_id == skb->queue_mapping) {
+			slave = check_slave;
+			break;
+		}
+	}
+
+	/* If the slave isn't UP, use default transmit policy. */
+	if (slave && slave->queue_id && IS_UP(slave->dev) &&
+	    (slave->link == BOND_LINK_UP)) {
+		res = bond_dev_queue_xmit(bond, skb, slave->dev);
+	}
+
+out:
+	read_unlock(&bond->lock);
+	return res;
+}
+
+static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	/*
+	 * This helper function exists to help dev_pick_tx get the correct
+	 * destination queue.  Using a helper function skips the a call to
+	 * skb_tx_hash and will put the skbs in the queue we expect on their
+	 * way down to the bonding driver.
+	 */
+	return skb->queue_mapping;
+}
+
 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	const struct bonding *bond = netdev_priv(dev);
+	struct bonding *bond = netdev_priv(dev);
+
+	if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
+		if (!bond_slave_override(bond, skb))
+			return NETDEV_TX_OK;
+	}
 
 	switch (bond->params.mode) {
 	case BOND_MODE_ROUNDROBIN:
@@ -4499,7 +4578,8 @@
 	.ndo_open		= bond_open,
 	.ndo_stop		= bond_close,
 	.ndo_start_xmit		= bond_start_xmit,
-	.ndo_get_stats		= bond_get_stats,
+	.ndo_select_queue	= bond_select_queue,
+	.ndo_get_stats64	= bond_get_stats,
 	.ndo_do_ioctl		= bond_do_ioctl,
 	.ndo_set_multicast_list	= bond_set_multicast_list,
 	.ndo_change_mtu		= bond_change_mtu,
@@ -4767,6 +4847,20 @@
 		}
 	}
 
+	if (tx_queues < 1 || tx_queues > 255) {
+		pr_warning("Warning: tx_queues (%d) should be between "
+			   "1 and 255, resetting to %d\n",
+			   tx_queues, BOND_DEFAULT_TX_QUEUES);
+		tx_queues = BOND_DEFAULT_TX_QUEUES;
+	}
+
+	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
+		pr_warning("Warning: all_slaves_active module parameter (%d), "
+			   "not of valid value (0/1), so it was set to "
+			   "0\n", all_slaves_active);
+		all_slaves_active = 0;
+	}
+
 	/* reset values for TLB/ALB */
 	if ((bond_mode == BOND_MODE_TLB) ||
 	    (bond_mode == BOND_MODE_ALB)) {
@@ -4937,6 +5031,8 @@
 	params->primary[0] = 0;
 	params->primary_reselect = primary_reselect_value;
 	params->fail_over_mac = fail_over_mac_value;
+	params->tx_queues = tx_queues;
+	params->all_slaves_active = all_slaves_active;
 
 	if (primary) {
 		strncpy(params->primary, primary, IFNAMSIZ);
@@ -5023,8 +5119,8 @@
 
 	rtnl_lock();
 
-	bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
-				bond_setup);
+	bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "",
+				bond_setup, tx_queues);
 	if (!bond_dev) {
 		pr_err("%s: eek! can't alloc netdev!\n", name);
 		rtnl_unlock();
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b8bec08..f9a0343 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -211,7 +211,8 @@
 /*
  * Set the slaves in the current bond.  The bond interface must be
  * up for this to succeed.
- * This function is largely the same flow as bonding_update_bonds().
+ * This is supposed to be only thin wrapper for bond_enslave and bond_release.
+ * All hard work should be done there.
  */
 static ssize_t bonding_store_slaves(struct device *d,
 				    struct device_attribute *attr,
@@ -219,10 +220,8 @@
 {
 	char command[IFNAMSIZ + 1] = { 0, };
 	char *ifname;
-	int i, res, found, ret = count;
-	u32 original_mtu;
-	struct slave *slave;
-	struct net_device *dev = NULL;
+	int res, ret = count;
+	struct net_device *dev;
 	struct bonding *bond = to_bond(d);
 
 	/* Quick sanity check -- is the bond interface up? */
@@ -231,8 +230,6 @@
 			   bond->dev->name);
 	}
 
-	/* Note:  We can't hold bond->lock here, as bond_create grabs it. */
-
 	if (!rtnl_trylock())
 		return restart_syscall();
 
@@ -242,90 +239,32 @@
 	    !dev_valid_name(ifname))
 		goto err_no_cmd;
 
-	if (command[0] == '+') {
+	dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+	if (!dev) {
+		pr_info("%s: Interface %s does not exist!\n",
+			bond->dev->name, ifname);
+		ret = -ENODEV;
+		goto out;
+	}
 
-		/* Got a slave name in ifname.  Is it already in the list? */
-		found = 0;
-
-		dev = __dev_get_by_name(dev_net(bond->dev), ifname);
-		if (!dev) {
-			pr_info("%s: Interface %s does not exist!\n",
-				bond->dev->name, ifname);
-			ret = -ENODEV;
-			goto out;
-		}
-
-		if (dev->flags & IFF_UP) {
-			pr_err("%s: Error: Unable to enslave %s because it is already up.\n",
-			       bond->dev->name, dev->name);
-			ret = -EPERM;
-			goto out;
-		}
-
-		read_lock(&bond->lock);
-		bond_for_each_slave(bond, slave, i)
-			if (slave->dev == dev) {
-				pr_err("%s: Interface %s is already enslaved!\n",
-				       bond->dev->name, ifname);
-				ret = -EPERM;
-				read_unlock(&bond->lock);
-				goto out;
-			}
-		read_unlock(&bond->lock);
-
-		pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname);
-
-		/* If this is the first slave, then we need to set
-		   the master's hardware address to be the same as the
-		   slave's. */
-		if (is_zero_ether_addr(bond->dev->dev_addr))
-			memcpy(bond->dev->dev_addr, dev->dev_addr,
-			       dev->addr_len);
-
-		/* Set the slave's MTU to match the bond */
-		original_mtu = dev->mtu;
-		res = dev_set_mtu(dev, bond->dev->mtu);
-		if (res) {
-			ret = res;
-			goto out;
-		}
-
+	switch (command[0]) {
+	case '+':
+		pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
 		res = bond_enslave(bond->dev, dev);
-		bond_for_each_slave(bond, slave, i)
-			if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0)
-				slave->original_mtu = original_mtu;
-		if (res)
-			ret = res;
+		break;
 
-		goto out;
+	case '-':
+		pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+		res = bond_release(bond->dev, dev);
+		break;
+
+	default:
+		goto err_no_cmd;
 	}
 
-	if (command[0] == '-') {
-		dev = NULL;
-		original_mtu = 0;
-		bond_for_each_slave(bond, slave, i)
-			if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
-				dev = slave->dev;
-				original_mtu = slave->original_mtu;
-				break;
-			}
-		if (dev) {
-			pr_info("%s: Removing slave %s\n",
-				bond->dev->name, dev->name);
-				res = bond_release(bond->dev, dev);
-			if (res) {
-				ret = res;
-				goto out;
-			}
-			/* set the slave MTU to the default */
-			dev_set_mtu(dev, original_mtu);
-		} else {
-			pr_err("unable to remove non-existent slave %s for bond %s.\n",
-			       ifname, bond->dev->name);
-			ret = -ENODEV;
-		}
-		goto out;
-	}
+	if (res)
+		ret = res;
+	goto out;
 
 err_no_cmd:
 	pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
@@ -1472,7 +1411,173 @@
 }
 static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
 
+/*
+ * Show the queue_ids of the slaves in the current bond.
+ */
+static ssize_t bonding_show_queue_id(struct device *d,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct slave *slave;
+	int i, res = 0;
+	struct bonding *bond = to_bond(d);
 
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	read_lock(&bond->lock);
+	bond_for_each_slave(bond, slave, i) {
+		if (res > (PAGE_SIZE - 6)) {
+			/* not enough space for another interface name */
+			if ((PAGE_SIZE - res) > 10)
+				res = PAGE_SIZE - 10;
+			res += sprintf(buf + res, "++more++ ");
+			break;
+		}
+		res += sprintf(buf + res, "%s:%d ",
+			       slave->dev->name, slave->queue_id);
+	}
+	read_unlock(&bond->lock);
+	if (res)
+		buf[res-1] = '\n'; /* eat the leftover space */
+	rtnl_unlock();
+	return res;
+}
+
+/*
+ * Set the queue_ids of the  slaves in the current bond.  The bond
+ * interface must be enslaved for this to work.
+ */
+static ssize_t bonding_store_queue_id(struct device *d,
+				      struct device_attribute *attr,
+				      const char *buffer, size_t count)
+{
+	struct slave *slave, *update_slave;
+	struct bonding *bond = to_bond(d);
+	u16 qid;
+	int i, ret = count;
+	char *delim;
+	struct net_device *sdev = NULL;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	/* delim will point to queue id if successful */
+	delim = strchr(buffer, ':');
+	if (!delim)
+		goto err_no_cmd;
+
+	/*
+	 * Terminate string that points to device name and bump it
+	 * up one, so we can read the queue id there.
+	 */
+	*delim = '\0';
+	if (sscanf(++delim, "%hd\n", &qid) != 1)
+		goto err_no_cmd;
+
+	/* Check buffer length, valid ifname and queue id */
+	if (strlen(buffer) > IFNAMSIZ ||
+	    !dev_valid_name(buffer) ||
+	    qid > bond->params.tx_queues)
+		goto err_no_cmd;
+
+	/* Get the pointer to that interface if it exists */
+	sdev = __dev_get_by_name(dev_net(bond->dev), buffer);
+	if (!sdev)
+		goto err_no_cmd;
+
+	read_lock(&bond->lock);
+
+	/* Search for thes slave and check for duplicate qids */
+	update_slave = NULL;
+	bond_for_each_slave(bond, slave, i) {
+		if (sdev == slave->dev)
+			/*
+			 * We don't need to check the matching
+			 * slave for dups, since we're overwriting it
+			 */
+			update_slave = slave;
+		else if (qid && qid == slave->queue_id) {
+			goto err_no_cmd_unlock;
+		}
+	}
+
+	if (!update_slave)
+		goto err_no_cmd_unlock;
+
+	/* Actually set the qids for the slave */
+	update_slave->queue_id = qid;
+
+	read_unlock(&bond->lock);
+out:
+	rtnl_unlock();
+	return ret;
+
+err_no_cmd_unlock:
+	read_unlock(&bond->lock);
+err_no_cmd:
+	pr_info("invalid input for queue_id set for %s.\n",
+		bond->dev->name);
+	ret = -EPERM;
+	goto out;
+}
+
+static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
+		   bonding_store_queue_id);
+
+
+/*
+ * Show and set the all_slaves_active flag.
+ */
+static ssize_t bonding_show_slaves_active(struct device *d,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct bonding *bond = to_bond(d);
+
+	return sprintf(buf, "%d\n", bond->params.all_slaves_active);
+}
+
+static ssize_t bonding_store_slaves_active(struct device *d,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count)
+{
+	int i, new_value, ret = count;
+	struct bonding *bond = to_bond(d);
+	struct slave *slave;
+
+	if (sscanf(buf, "%d", &new_value) != 1) {
+		pr_err("%s: no all_slaves_active value specified.\n",
+		       bond->dev->name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (new_value == bond->params.all_slaves_active)
+		goto out;
+
+	if ((new_value == 0) || (new_value == 1)) {
+		bond->params.all_slaves_active = new_value;
+	} else {
+		pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
+			bond->dev->name, new_value);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	bond_for_each_slave(bond, slave, i) {
+		if (slave->state == BOND_STATE_BACKUP) {
+			if (new_value)
+				slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE;
+			else
+				slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
+		}
+	}
+out:
+	return count;
+}
+static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
+		   bonding_show_slaves_active, bonding_store_slaves_active);
 
 static struct attribute *per_bond_attrs[] = {
 	&dev_attr_slaves.attr,
@@ -1499,6 +1604,8 @@
 	&dev_attr_ad_actor_key.attr,
 	&dev_attr_ad_partner_key.attr,
 	&dev_attr_ad_partner_mac.attr,
+	&dev_attr_queue_id.attr,
+	&dev_attr_all_slaves_active.attr,
 	NULL,
 };
 
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2aa3367..c6fdd85 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,8 +23,8 @@
 #include "bond_3ad.h"
 #include "bond_alb.h"
 
-#define DRV_VERSION	"3.6.0"
-#define DRV_RELDATE	"September 26, 2009"
+#define DRV_VERSION	"3.7.0"
+#define DRV_RELDATE	"June 2, 2010"
 #define DRV_NAME	"bonding"
 #define DRV_DESCRIPTION	"Ethernet Channel Bonding Driver"
 
@@ -60,6 +60,9 @@
 		 ((mode) == BOND_MODE_TLB)          ||	\
 		 ((mode) == BOND_MODE_ALB))
 
+#define TX_QUEUE_OVERRIDE(mode)				\
+			(((mode) == BOND_MODE_ACTIVEBACKUP) ||	\
+			 ((mode) == BOND_MODE_ROUNDROBIN))
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
@@ -131,6 +134,8 @@
 	char primary[IFNAMSIZ];
 	int primary_reselect;
 	__be32 arp_targets[BOND_MAX_ARP_TARGETS];
+	int tx_queues;
+	int all_slaves_active;
 };
 
 struct bond_parm_tbl {
@@ -159,12 +164,12 @@
 	s8     link;    /* one of BOND_LINK_XXXX */
 	s8     new_link;
 	s8     state;   /* one of BOND_STATE_XXXX */
-	u32    original_flags;
 	u32    original_mtu;
 	u32    link_failure_count;
 	u8     perm_hwaddr[ETH_ALEN];
 	u16    speed;
 	u8     duplex;
+	u16    queue_id;
 	struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
 	struct tlb_slave_info tlb_info;
 };
@@ -291,7 +296,8 @@
 	struct bonding *bond = netdev_priv(slave->dev->master);
 	if (!bond_is_lb(bond))
 		slave->state = BOND_STATE_BACKUP;
-	slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
+	if (!bond->params.all_slaves_active)
+		slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
 	if (slave_do_arp_validate(bond, slave))
 		slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
 }
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 0b28e01..631a624 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -2,16 +2,32 @@
 # CAIF physical drivers
 #
 
-if CAIF
-
 comment "CAIF transport drivers"
 
 config CAIF_TTY
 	tristate "CAIF TTY transport driver"
+	depends on CAIF
 	default n
 	---help---
 	The CAIF TTY transport driver is a Line Discipline (ldisc)
 	identified as N_CAIF. When this ldisc is opened from user space
 	it will redirect the TTY's traffic into the CAIF stack.
 
-endif # CAIF
+config CAIF_SPI_SLAVE
+	tristate "CAIF SPI transport driver for slave interface"
+	depends on CAIF
+	default n
+	---help---
+	The CAIF Link layer SPI Protocol driver for Slave SPI interface.
+	This driver implements a platform driver to accommodate for a
+	platform specific SPI device. A sample CAIF SPI Platform device is
+	provided in Documentation/networking/caif/spi_porting.txt
+
+config CAIF_SPI_SYNC
+	bool "Next command and length in start of frame"
+	depends on CAIF_SPI_SLAVE
+	default n
+	---help---
+	Putting the next command and length in the start of the frame can
+	help to synchronize to the next transfer in case of over or under-runs.
+	This option also needs to be enabled on the modem.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 52b6d1f..3a11d61 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -1,12 +1,10 @@
-ifeq ($(CONFIG_CAIF_DEBUG),1)
-CAIF_DBG_FLAGS := -DDEBUG
+ifeq ($(CONFIG_CAIF_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
 endif
 
-KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers
-
-ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
-clean-dirs:= .tmp_versions
-clean-files:= Module.symvers modules.order *.cmd *~ \
-
 # Serial interface
 obj-$(CONFIG_CAIF_TTY) += caif_serial.o
+
+# SPI slave physical interfaces module
+cfspi_slave-objs := caif_spi.o caif_spi_slave.o
+obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 09257ca..3df0c0f 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -174,6 +174,7 @@
 	struct ser_device *ser;
 	int ret;
 	u8 *p;
+
 	ser = tty->disc_data;
 
 	/*
@@ -221,6 +222,7 @@
 	struct tty_struct *tty;
 	struct sk_buff *skb;
 	int tty_wr, len, room;
+
 	tty = ser->tty;
 	ser->tx_started = true;
 
@@ -281,6 +283,7 @@
 static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ser_device *ser;
+
 	BUG_ON(dev == NULL);
 	ser = netdev_priv(dev);
 
@@ -299,6 +302,7 @@
 static void ldisc_tx_wakeup(struct tty_struct *tty)
 {
 	struct ser_device *ser;
+
 	ser = tty->disc_data;
 	BUG_ON(ser == NULL);
 	BUG_ON(ser->tty != tty);
@@ -348,6 +352,7 @@
 	struct ser_device *ser = tty->disc_data;
 	/* Remove may be called inside or outside of rtnl_lock */
 	int islocked = rtnl_is_locked();
+
 	if (!islocked)
 		rtnl_lock();
 	/* device is freed automagically by net-sysfs */
@@ -374,6 +379,7 @@
 static int register_ldisc(void)
 {
 	int result;
+
 	result = tty_register_ldisc(N_CAIF, &caif_ldisc);
 	if (result < 0) {
 		pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
@@ -391,12 +397,12 @@
 static void caifdev_setup(struct net_device *dev)
 {
 	struct ser_device *serdev = netdev_priv(dev);
+
 	dev->features = 0;
 	dev->netdev_ops = &netdev_ops;
 	dev->type = ARPHRD_CAIF;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 	dev->mtu = CAIF_MAX_MTU;
-	dev->hard_header_len = CAIF_NEEDED_HEADROOM;
 	dev->tx_queue_len = 0;
 	dev->destructor = free_netdev;
 	skb_queue_head_init(&serdev->head);
@@ -410,8 +416,6 @@
 
 static int caif_net_open(struct net_device *dev)
 {
-	struct ser_device *ser;
-	ser = netdev_priv(dev);
 	netif_wake_queue(dev);
 	return 0;
 }
@@ -425,6 +429,7 @@
 static int __init caif_ser_init(void)
 {
 	int ret;
+
 	ret = register_ldisc();
 	debugfsdir = debugfs_create_dir("caif_serial", NULL);
 	return ret;
@@ -435,6 +440,7 @@
 	struct ser_device *ser = NULL;
 	struct list_head *node;
 	struct list_head *_tmp;
+
 	list_for_each_safe(node, _tmp, &ser_list) {
 		ser = list_entry(node, struct ser_device, node);
 		dev_close(ser->dev);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
new file mode 100644
index 0000000..03049e8
--- /dev/null
+++ b/drivers/net/caif/caif_spi.c
@@ -0,0 +1,847 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author:  Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/if_arp.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_spi.h>
+
+#ifndef CONFIG_CAIF_SPI_SYNC
+#define FLAVOR "Flavour: Vanilla.\n"
+#else
+#define FLAVOR "Flavour: Master CMD&LEN at start.\n"
+#endif /* CONFIG_CAIF_SPI_SYNC */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
+MODULE_DESCRIPTION("CAIF SPI driver");
+
+static int spi_loop;
+module_param(spi_loop, bool, S_IRUGO);
+MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
+
+/* SPI frame alignment. */
+module_param(spi_frm_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
+
+/* SPI padding options. */
+module_param(spi_up_head_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
+
+module_param(spi_up_tail_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment.");
+
+module_param(spi_down_head_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment.");
+
+module_param(spi_down_tail_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
+
+#ifdef CONFIG_ARM
+#define BYTE_HEX_FMT "%02X"
+#else
+#define BYTE_HEX_FMT "%02hhX"
+#endif
+
+#define SPI_MAX_PAYLOAD_SIZE 4096
+/*
+ * Threshold values for the SPI packet queue. Flowcontrol will be asserted
+ * when the number of packets exceeds HIGH_WATER_MARK. It will not be
+ * deasserted before the number of packets drops below LOW_WATER_MARK.
+ */
+#define LOW_WATER_MARK   100
+#define HIGH_WATER_MARK  (LOW_WATER_MARK*5)
+
+#ifdef CONFIG_UML
+
+/*
+ * We sometimes use UML for debugging, but it cannot handle
+ * dma_alloc_coherent so we have to wrap it.
+ */
+static inline void *dma_alloc(dma_addr_t *daddr)
+{
+	return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
+}
+
+static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+{
+	kfree(cpu_addr);
+}
+
+#else
+
+static inline void *dma_alloc(dma_addr_t *daddr)
+{
+	return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
+				GFP_KERNEL);
+}
+
+static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+{
+	dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
+}
+#endif	/* CONFIG_UML */
+
+#ifdef CONFIG_DEBUG_FS
+
+#define DEBUGFS_BUF_SIZE	4096
+
+static struct dentry *dbgfs_root;
+
+static inline void driver_debugfs_create(void)
+{
+	dbgfs_root = debugfs_create_dir(cfspi_spi_driver.driver.name, NULL);
+}
+
+static inline void driver_debugfs_remove(void)
+{
+	debugfs_remove(dbgfs_root);
+}
+
+static inline void dev_debugfs_rem(struct cfspi *cfspi)
+{
+	debugfs_remove(cfspi->dbgfs_frame);
+	debugfs_remove(cfspi->dbgfs_state);
+	debugfs_remove(cfspi->dbgfs_dir);
+}
+
+static int dbgfs_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	char *buf;
+	int len = 0;
+	ssize_t size;
+	struct cfspi *cfspi = (struct cfspi *)file->private_data;
+
+	buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	/* Print out debug information. */
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"CAIF SPI debug information:\n");
+
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
+
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"STATE: %d\n", cfspi->dbg_state);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Previous CMD: 0x%x\n", cfspi->pcmd);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Current CMD: 0x%x\n", cfspi->cmd);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Previous TX len: %d\n", cfspi->tx_ppck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Previous RX len: %d\n", cfspi->rx_ppck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Current TX len: %d\n", cfspi->tx_cpck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Current RX len: %d\n", cfspi->rx_cpck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Next TX len: %d\n", cfspi->tx_npck_len);
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Next RX len: %d\n", cfspi->rx_npck_len);
+
+	size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return size;
+}
+
+static ssize_t print_frame(char *buf, size_t size, char *frm,
+			   size_t count, size_t cut)
+{
+	int len = 0;
+	int i;
+	for (i = 0; i < count; i++) {
+		len += snprintf((buf + len), (size - len),
+					"[0x" BYTE_HEX_FMT "]",
+					frm[i]);
+		if ((i == cut) && (count > (cut * 2))) {
+			/* Fast forward. */
+			i = count - cut;
+			len += snprintf((buf + len), (size - len),
+					"--- %u bytes skipped ---\n",
+					(int)(count - (cut * 2)));
+		}
+
+		if ((!(i % 10)) && i) {
+			len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+					"\n");
+		}
+	}
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
+	return len;
+}
+
+static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	char *buf;
+	int len = 0;
+	ssize_t size;
+	struct cfspi *cfspi;
+
+	cfspi = (struct cfspi *)file->private_data;
+	buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	/* Print out debug information. */
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Current frame:\n");
+
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
+
+	len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
+			   cfspi->xfer.va_tx,
+			   (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
+
+	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+			"Rx data (Len: %d):\n", cfspi->rx_cpck_len);
+
+	len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
+			   cfspi->xfer.va_rx,
+			   (cfspi->rx_cpck_len + SPI_CMD_SZ), 100);
+
+	size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return size;
+}
+
+static const struct file_operations dbgfs_state_fops = {
+	.open = dbgfs_open,
+	.read = dbgfs_state,
+	.owner = THIS_MODULE
+};
+
+static const struct file_operations dbgfs_frame_fops = {
+	.open = dbgfs_open,
+	.read = dbgfs_frame,
+	.owner = THIS_MODULE
+};
+
+static inline void dev_debugfs_add(struct cfspi *cfspi)
+{
+	cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root);
+	cfspi->dbgfs_state = debugfs_create_file("state", S_IRUGO,
+						 cfspi->dbgfs_dir, cfspi,
+						 &dbgfs_state_fops);
+	cfspi->dbgfs_frame = debugfs_create_file("frame", S_IRUGO,
+						 cfspi->dbgfs_dir, cfspi,
+						 &dbgfs_frame_fops);
+}
+
+inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
+{
+	cfspi->dbg_state = state;
+};
+#else
+
+static inline void driver_debugfs_create(void)
+{
+}
+
+static inline void driver_debugfs_remove(void)
+{
+}
+
+static inline void dev_debugfs_add(struct cfspi *cfspi)
+{
+}
+
+static inline void dev_debugfs_rem(struct cfspi *cfspi)
+{
+}
+
+inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
+{
+}
+#endif				/* CONFIG_DEBUG_FS */
+
+static LIST_HEAD(cfspi_list);
+static spinlock_t cfspi_list_lock;
+
+/* SPI uplink head alignment. */
+static ssize_t show_up_head_align(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_up_head_align);
+}
+
+static DRIVER_ATTR(up_head_align, S_IRUSR, show_up_head_align, NULL);
+
+/* SPI uplink tail alignment. */
+static ssize_t show_up_tail_align(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_up_tail_align);
+}
+
+static DRIVER_ATTR(up_tail_align, S_IRUSR, show_up_tail_align, NULL);
+
+/* SPI downlink head alignment. */
+static ssize_t show_down_head_align(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_down_head_align);
+}
+
+static DRIVER_ATTR(down_head_align, S_IRUSR, show_down_head_align, NULL);
+
+/* SPI downlink tail alignment. */
+static ssize_t show_down_tail_align(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_down_tail_align);
+}
+
+static DRIVER_ATTR(down_tail_align, S_IRUSR, show_down_tail_align, NULL);
+
+/* SPI frame alignment. */
+static ssize_t show_frame_align(struct device_driver *driver, char *buf)
+{
+	return sprintf(buf, "%d\n", spi_frm_align);
+}
+
+static DRIVER_ATTR(frame_align, S_IRUSR, show_frame_align, NULL);
+
+int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
+{
+	u8 *dst = buf;
+	caif_assert(buf);
+
+	do {
+		struct sk_buff *skb;
+		struct caif_payload_info *info;
+		int spad = 0;
+		int epad;
+
+		skb = skb_dequeue(&cfspi->chead);
+		if (!skb)
+			break;
+
+		/*
+		 * Calculate length of frame including SPI padding.
+		 * The payload position is found in the control buffer.
+		 */
+		info = (struct caif_payload_info *)&skb->cb;
+
+		/*
+		 * Compute head offset i.e. number of bytes to add to
+		 * get the start of the payload aligned.
+		 */
+		if (spi_up_head_align) {
+			spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+			*dst = (u8)(spad - 1);
+			dst += spad;
+		}
+
+		/* Copy in CAIF frame. */
+		skb_copy_bits(skb, 0, dst, skb->len);
+		dst += skb->len;
+		cfspi->ndev->stats.tx_packets++;
+		cfspi->ndev->stats.tx_bytes += skb->len;
+
+		/*
+		 * Compute tail offset i.e. number of bytes to add to
+		 * get the complete CAIF frame aligned.
+		 */
+		epad = (skb->len + spad) & spi_up_tail_align;
+		dst += epad;
+
+		dev_kfree_skb(skb);
+
+	} while ((dst - buf) < len);
+
+	return dst - buf;
+}
+
+int cfspi_xmitlen(struct cfspi *cfspi)
+{
+	struct sk_buff *skb = NULL;
+	int frm_len = 0;
+	int pkts = 0;
+
+	/*
+	 * Decommit previously commited frames.
+	 * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
+	 */
+	while (skb_peek(&cfspi->chead)) {
+		skb = skb_dequeue_tail(&cfspi->chead);
+		skb_queue_head(&cfspi->qhead, skb);
+	}
+
+	do {
+		struct caif_payload_info *info = NULL;
+		int spad = 0;
+		int epad = 0;
+
+		skb = skb_dequeue(&cfspi->qhead);
+		if (!skb)
+			break;
+
+		/*
+		 * Calculate length of frame including SPI padding.
+		 * The payload position is found in the control buffer.
+		 */
+		info = (struct caif_payload_info *)&skb->cb;
+
+		/*
+		 * Compute head offset i.e. number of bytes to add to
+		 * get the start of the payload aligned.
+		 */
+		if (spi_up_head_align)
+			spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+
+		/*
+		 * Compute tail offset i.e. number of bytes to add to
+		 * get the complete CAIF frame aligned.
+		 */
+		epad = (skb->len + spad) & spi_up_tail_align;
+
+		if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
+			skb_queue_tail(&cfspi->chead, skb);
+			pkts++;
+			frm_len += skb->len + spad + epad;
+		} else {
+			/* Put back packet. */
+			skb_queue_head(&cfspi->qhead, skb);
+		}
+	} while (pkts <= CAIF_MAX_SPI_PKTS);
+
+	/*
+	 * Send flow on if previously sent flow off
+	 * and now go below the low water mark
+	 */
+	if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark &&
+		cfspi->cfdev.flowctrl) {
+		cfspi->flow_off_sent = 0;
+		cfspi->cfdev.flowctrl(cfspi->ndev, 1);
+	}
+
+	return frm_len;
+}
+
+static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
+{
+	struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+
+	if (!in_interrupt())
+		spin_lock(&cfspi->lock);
+	if (assert) {
+		set_bit(SPI_SS_ON, &cfspi->state);
+		set_bit(SPI_XFER, &cfspi->state);
+	} else {
+		set_bit(SPI_SS_OFF, &cfspi->state);
+	}
+	if (!in_interrupt())
+		spin_unlock(&cfspi->lock);
+
+	/* Wake up the xfer thread. */
+	wake_up_interruptible(&cfspi->wait);
+}
+
+static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
+{
+	struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+
+	/* Transfer done, complete work queue */
+	complete(&cfspi->comp);
+}
+
+static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct cfspi *cfspi = NULL;
+	unsigned long flags;
+	if (!dev)
+		return -EINVAL;
+
+	cfspi = netdev_priv(dev);
+
+	skb_queue_tail(&cfspi->qhead, skb);
+
+	spin_lock_irqsave(&cfspi->lock, flags);
+	if (!test_and_set_bit(SPI_XFER, &cfspi->state)) {
+		/* Wake up xfer thread. */
+		wake_up_interruptible(&cfspi->wait);
+	}
+	spin_unlock_irqrestore(&cfspi->lock, flags);
+
+	/* Send flow off if number of bytes is above high water mark */
+	if (!cfspi->flow_off_sent &&
+		cfspi->qhead.qlen > cfspi->qd_high_mark &&
+		cfspi->cfdev.flowctrl) {
+		cfspi->flow_off_sent = 1;
+		cfspi->cfdev.flowctrl(cfspi->ndev, 0);
+	}
+
+	return 0;
+}
+
+int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
+{
+	u8 *src = buf;
+
+	caif_assert(buf != NULL);
+
+	do {
+		int res;
+		struct sk_buff *skb = NULL;
+		int spad = 0;
+		int epad = 0;
+		u8 *dst = NULL;
+		int pkt_len = 0;
+
+		/*
+		 * Compute head offset i.e. number of bytes added to
+		 * get the start of the payload aligned.
+		 */
+		if (spi_down_head_align) {
+			spad = 1 + *src;
+			src += spad;
+		}
+
+		/* Read length of CAIF frame (little endian). */
+		pkt_len = *src;
+		pkt_len |= ((*(src+1)) << 8) & 0xFF00;
+		pkt_len += 2;	/* Add FCS fields. */
+
+		/* Get a suitable caif packet and copy in data. */
+
+		skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
+		caif_assert(skb != NULL);
+
+		dst = skb_put(skb, pkt_len);
+		memcpy(dst, src, pkt_len);
+		src += pkt_len;
+
+		skb->protocol = htons(ETH_P_CAIF);
+		skb_reset_mac_header(skb);
+		skb->dev = cfspi->ndev;
+
+		/*
+		 * Push received packet up the stack.
+		 */
+		if (!spi_loop)
+			res = netif_rx_ni(skb);
+		else
+			res = cfspi_xmit(skb, cfspi->ndev);
+
+		if (!res) {
+			cfspi->ndev->stats.rx_packets++;
+			cfspi->ndev->stats.rx_bytes += pkt_len;
+		} else
+			cfspi->ndev->stats.rx_dropped++;
+
+		/*
+		 * Compute tail offset i.e. number of bytes added to
+		 * get the complete CAIF frame aligned.
+		 */
+		epad = (pkt_len + spad) & spi_down_tail_align;
+		src += epad;
+	} while ((src - buf) < len);
+
+	return src - buf;
+}
+
+static int cfspi_open(struct net_device *dev)
+{
+	netif_wake_queue(dev);
+	return 0;
+}
+
+static int cfspi_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+static const struct net_device_ops cfspi_ops = {
+	.ndo_open = cfspi_open,
+	.ndo_stop = cfspi_close,
+	.ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+	struct cfspi *cfspi = netdev_priv(dev);
+	dev->features = 0;
+	dev->netdev_ops = &cfspi_ops;
+	dev->type = ARPHRD_CAIF;
+	dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+	dev->tx_queue_len = 0;
+	dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+	dev->destructor = free_netdev;
+	skb_queue_head_init(&cfspi->qhead);
+	skb_queue_head_init(&cfspi->chead);
+	cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+	cfspi->cfdev.use_frag = false;
+	cfspi->cfdev.use_stx = false;
+	cfspi->cfdev.use_fcs = false;
+	cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+	struct cfspi *cfspi = NULL;
+	struct net_device *ndev;
+	struct cfspi_dev *dev;
+	int res;
+	dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+	ndev = alloc_netdev(sizeof(struct cfspi),
+			"cfspi%d", cfspi_setup);
+	if (!dev)
+		return -ENODEV;
+
+	cfspi = netdev_priv(ndev);
+	netif_stop_queue(ndev);
+	cfspi->ndev = ndev;
+	cfspi->pdev = pdev;
+
+	/* Set flow info */
+	cfspi->flow_off_sent = 0;
+	cfspi->qd_low_mark = LOW_WATER_MARK;
+	cfspi->qd_high_mark = HIGH_WATER_MARK;
+
+	/* Assign the SPI device. */
+	cfspi->dev = dev;
+	/* Assign the device ifc to this SPI interface. */
+	dev->ifc = &cfspi->ifc;
+
+	/* Allocate DMA buffers. */
+	cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
+	if (!cfspi->xfer.va_tx) {
+		printk(KERN_WARNING
+		       "CFSPI: failed to allocate dma TX buffer.\n");
+		res = -ENODEV;
+		goto err_dma_alloc_tx;
+	}
+
+	cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
+
+	if (!cfspi->xfer.va_rx) {
+		printk(KERN_WARNING
+		       "CFSPI: failed to allocate dma TX buffer.\n");
+		res = -ENODEV;
+		goto err_dma_alloc_rx;
+	}
+
+	/* Initialize the work queue. */
+	INIT_WORK(&cfspi->work, cfspi_xfer);
+
+	/* Initialize spin locks. */
+	spin_lock_init(&cfspi->lock);
+
+	/* Initialize flow control state. */
+	cfspi->flow_stop = false;
+
+	/* Initialize wait queue. */
+	init_waitqueue_head(&cfspi->wait);
+
+	/* Create work thread. */
+	cfspi->wq = create_singlethread_workqueue(dev->name);
+	if (!cfspi->wq) {
+		printk(KERN_WARNING "CFSPI: failed to create work queue.\n");
+		res = -ENODEV;
+		goto err_create_wq;
+	}
+
+	/* Initialize work queue. */
+	init_completion(&cfspi->comp);
+
+	/* Create debugfs entries. */
+	dev_debugfs_add(cfspi);
+
+	/* Set up the ifc. */
+	cfspi->ifc.ss_cb = cfspi_ss_cb;
+	cfspi->ifc.xfer_done_cb = cfspi_xfer_done_cb;
+	cfspi->ifc.priv = cfspi;
+
+	/* Add CAIF SPI device to list. */
+	spin_lock(&cfspi_list_lock);
+	list_add_tail(&cfspi->list, &cfspi_list);
+	spin_unlock(&cfspi_list_lock);
+
+	/* Schedule the work queue. */
+	queue_work(cfspi->wq, &cfspi->work);
+
+	/* Register network device. */
+	res = register_netdev(ndev);
+	if (res) {
+		printk(KERN_ERR "CFSPI: Reg. error: %d.\n", res);
+		goto err_net_reg;
+	}
+	return res;
+
+ err_net_reg:
+	dev_debugfs_rem(cfspi);
+	set_bit(SPI_TERMINATE, &cfspi->state);
+	wake_up_interruptible(&cfspi->wait);
+	destroy_workqueue(cfspi->wq);
+ err_create_wq:
+	dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+	dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
+ err_dma_alloc_tx:
+	free_netdev(ndev);
+
+	return res;
+}
+
+int cfspi_spi_remove(struct platform_device *pdev)
+{
+	struct list_head *list_node;
+	struct list_head *n;
+	struct cfspi *cfspi = NULL;
+	struct cfspi_dev *dev;
+
+	dev = (struct cfspi_dev *)pdev->dev.platform_data;
+	spin_lock(&cfspi_list_lock);
+	list_for_each_safe(list_node, n, &cfspi_list) {
+		cfspi = list_entry(list_node, struct cfspi, list);
+		/* Find the corresponding device. */
+		if (cfspi->dev == dev) {
+			/* Remove from list. */
+			list_del(list_node);
+			/* Free DMA buffers. */
+			dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+			dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
+			set_bit(SPI_TERMINATE, &cfspi->state);
+			wake_up_interruptible(&cfspi->wait);
+			destroy_workqueue(cfspi->wq);
+			/* Destroy debugfs directory and files. */
+			dev_debugfs_rem(cfspi);
+			unregister_netdev(cfspi->ndev);
+			spin_unlock(&cfspi_list_lock);
+			return 0;
+		}
+	}
+	spin_unlock(&cfspi_list_lock);
+	return -ENODEV;
+}
+
+static void __exit cfspi_exit_module(void)
+{
+	struct list_head *list_node;
+	struct list_head *n;
+	struct cfspi *cfspi = NULL;
+
+	list_for_each_safe(list_node, n, &cfspi_list) {
+		cfspi = list_entry(list_node, struct cfspi, list);
+		platform_device_unregister(cfspi->pdev);
+	}
+
+	/* Destroy sysfs files. */
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_up_head_align);
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_up_tail_align);
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_down_head_align);
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_down_tail_align);
+	driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_frame_align);
+	/* Unregister platform driver. */
+	platform_driver_unregister(&cfspi_spi_driver);
+	/* Destroy debugfs root directory. */
+	driver_debugfs_remove();
+}
+
+static int __init cfspi_init_module(void)
+{
+	int result;
+
+	/* Initialize spin lock. */
+	spin_lock_init(&cfspi_list_lock);
+
+	/* Register platform driver. */
+	result = platform_driver_register(&cfspi_spi_driver);
+	if (result) {
+		printk(KERN_ERR "Could not register platform SPI driver.\n");
+		goto err_dev_register;
+	}
+
+	/* Create sysfs files. */
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_up_head_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 1.\n");
+		goto err_create_up_head_align;
+	}
+
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_up_tail_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 2.\n");
+		goto err_create_up_tail_align;
+	}
+
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_down_head_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 3.\n");
+		goto err_create_down_head_align;
+	}
+
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_down_tail_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 4.\n");
+		goto err_create_down_tail_align;
+	}
+
+	result =
+	    driver_create_file(&cfspi_spi_driver.driver,
+			       &driver_attr_frame_align);
+	if (result) {
+		printk(KERN_ERR "Sysfs creation failed 5.\n");
+		goto err_create_frame_align;
+	}
+	driver_debugfs_create();
+	return result;
+
+ err_create_frame_align:
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_down_tail_align);
+ err_create_down_tail_align:
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_down_head_align);
+ err_create_down_head_align:
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_up_tail_align);
+ err_create_up_tail_align:
+	driver_remove_file(&cfspi_spi_driver.driver,
+			   &driver_attr_up_head_align);
+ err_create_up_head_align:
+ err_dev_register:
+	return result;
+}
+
+module_init(cfspi_init_module);
+module_exit(cfspi_exit_module);
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
new file mode 100644
index 0000000..077ccf8
--- /dev/null
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author:  Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <net/caif/caif_spi.h>
+
+#ifndef CONFIG_CAIF_SPI_SYNC
+#define SPI_DATA_POS SPI_CMD_SZ
+static inline int forward_to_spi_cmd(struct cfspi *cfspi)
+{
+	return cfspi->rx_cpck_len;
+}
+#else
+#define SPI_DATA_POS 0
+static inline int forward_to_spi_cmd(struct cfspi *cfspi)
+{
+	return 0;
+}
+#endif
+
+int spi_frm_align = 2;
+int spi_up_head_align = 1;
+int spi_up_tail_align;
+int spi_down_head_align = 3;
+int spi_down_tail_align = 1;
+
+#ifdef CONFIG_DEBUG_FS
+static inline void debugfs_store_prev(struct cfspi *cfspi)
+{
+	/* Store previous command for debugging reasons.*/
+	cfspi->pcmd = cfspi->cmd;
+	/* Store previous transfer. */
+	cfspi->tx_ppck_len = cfspi->tx_cpck_len;
+	cfspi->rx_ppck_len = cfspi->rx_cpck_len;
+}
+#else
+static inline void debugfs_store_prev(struct cfspi *cfspi)
+{
+}
+#endif
+
+void cfspi_xfer(struct work_struct *work)
+{
+	struct cfspi *cfspi;
+	u8 *ptr = NULL;
+	unsigned long flags;
+	int ret;
+	cfspi = container_of(work, struct cfspi, work);
+
+	/* Initialize state. */
+	cfspi->cmd = SPI_CMD_EOT;
+
+	for (;;) {
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING);
+
+		/* Wait for master talk or transmit event. */
+		wait_event_interruptible(cfspi->wait,
+				 test_bit(SPI_XFER, &cfspi->state) ||
+				 test_bit(SPI_TERMINATE, &cfspi->state));
+
+		if (test_bit(SPI_TERMINATE, &cfspi->state))
+			return;
+
+#if CFSPI_DBG_PREFILL
+		/* Prefill buffers for easier debugging. */
+		memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN);
+		memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN);
+#endif	/* CFSPI_DBG_PREFILL */
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE);
+
+	/* Check whether we have a committed frame. */
+		if (cfspi->tx_cpck_len) {
+			int len;
+
+			cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
+
+			/* Copy commited SPI frames after the SPI indication. */
+			ptr = (u8 *) cfspi->xfer.va_tx;
+			ptr += SPI_IND_SZ;
+			len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
+			WARN_ON(len != cfspi->tx_cpck_len);
+	}
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT);
+
+		/* Get length of next frame to commit. */
+		cfspi->tx_npck_len = cfspi_xmitlen(cfspi);
+
+		WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN);
+
+		/*
+		 * Add indication and length at the beginning of the frame,
+		 * using little endian.
+		 */
+		ptr = (u8 *) cfspi->xfer.va_tx;
+		*ptr++ = SPI_CMD_IND;
+		*ptr++ = (SPI_CMD_IND  & 0xFF00) >> 8;
+		*ptr++ = cfspi->tx_npck_len & 0x00FF;
+		*ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8;
+
+		/* Calculate length of DMAs. */
+		cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ;
+		cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ;
+
+		/* Add SPI TX frame alignment padding, if necessary. */
+		if (cfspi->tx_cpck_len &&
+			(cfspi->xfer.tx_dma_len % spi_frm_align)) {
+
+			cfspi->xfer.tx_dma_len += spi_frm_align -
+			    (cfspi->xfer.tx_dma_len % spi_frm_align);
+		}
+
+		/* Add SPI RX frame alignment padding, if necessary. */
+		if (cfspi->rx_cpck_len &&
+			(cfspi->xfer.rx_dma_len % spi_frm_align)) {
+
+			cfspi->xfer.rx_dma_len += spi_frm_align -
+			    (cfspi->xfer.rx_dma_len % spi_frm_align);
+		}
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER);
+
+		/* Start transfer. */
+		ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev);
+		WARN_ON(ret);
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE);
+
+		/*
+		 * TODO: We might be able to make an assumption if this is the
+		 * first loop. Make sure that minimum toggle time is respected.
+		 */
+		udelay(MIN_TRANSITION_TIME_USEC);
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
+
+		/* Signal that we are ready to recieve data. */
+		cfspi->dev->sig_xfer(true, cfspi->dev);
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
+
+		/* Wait for transfer completion. */
+		wait_for_completion(&cfspi->comp);
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE);
+
+		if (cfspi->cmd == SPI_CMD_EOT) {
+			/*
+			 * Clear the master talk bit. A xfer is always at
+			 *  least two bursts.
+			 */
+			clear_bit(SPI_SS_ON, &cfspi->state);
+		}
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE);
+
+		/* Make sure that the minimum toggle time is respected. */
+		if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len,
+					cfspi->dev->clk_mhz) <
+			MIN_TRANSITION_TIME_USEC) {
+
+			udelay(MIN_TRANSITION_TIME_USEC -
+				SPI_XFER_TIME_USEC
+				(cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz));
+		}
+
+		cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE);
+
+		/* De-assert transfer signal. */
+		cfspi->dev->sig_xfer(false, cfspi->dev);
+
+		/* Check whether we received a CAIF packet. */
+		if (cfspi->rx_cpck_len) {
+			int len;
+
+			cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT);
+
+			/* Parse SPI frame. */
+			ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS));
+
+			len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len);
+			WARN_ON(len != cfspi->rx_cpck_len);
+		}
+
+		/* Check the next SPI command and length. */
+		ptr = (u8 *) cfspi->xfer.va_rx;
+
+		ptr += forward_to_spi_cmd(cfspi);
+
+		cfspi->cmd = *ptr++;
+		cfspi->cmd |= ((*ptr++) << 8) & 0xFF00;
+		cfspi->rx_npck_len = *ptr++;
+		cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00;
+
+		WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN);
+		WARN_ON(cfspi->cmd > SPI_CMD_EOT);
+
+		debugfs_store_prev(cfspi);
+
+		/* Check whether the master issued an EOT command. */
+		if (cfspi->cmd == SPI_CMD_EOT) {
+			/* Reset state. */
+			cfspi->tx_cpck_len = 0;
+			cfspi->rx_cpck_len = 0;
+		} else {
+			/* Update state. */
+			cfspi->tx_cpck_len = cfspi->tx_npck_len;
+			cfspi->rx_cpck_len = cfspi->rx_npck_len;
+		}
+
+		/*
+		 * Check whether we need to clear the xfer bit.
+		 * Spin lock needed for packet insertion.
+		 * Test and clear of different bits
+		 * are not supported.
+		 */
+		spin_lock_irqsave(&cfspi->lock, flags);
+		if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi)
+			&& !test_bit(SPI_SS_ON, &cfspi->state))
+			clear_bit(SPI_XFER, &cfspi->state);
+
+		spin_unlock_irqrestore(&cfspi->lock, flags);
+	}
+}
+
+struct platform_driver cfspi_spi_driver = {
+	.probe = cfspi_spi_probe,
+	.remove = cfspi_spi_remove,
+	.driver = {
+		   .name = "cfspi_sspi",
+		   .owner = THIS_MODULE,
+		   },
+};
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 4ff9664..b43e9f5 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -227,7 +227,7 @@
 		u16 time;			/* + 0x7c     0x3e */
 	} tx;
 	_MSCAN_RESERVED_(32, 2);		/* + 0x7e          */
-} __attribute__ ((packed));
+} __packed;
 
 #undef _MSCAN_RESERVED_
 #define MSCAN_REGION 	sizeof(struct mscan)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 1fc0871..e75f1a8 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -197,7 +197,7 @@
 };
 
 /* Main message type used between library and application */
-struct __attribute__ ((packed)) ems_cpc_msg {
+struct __packed ems_cpc_msg {
 	u8 type;	/* type of message */
 	u8 length;	/* length of data within union 'msg' */
 	u8 msgid;	/* confirmation handle */
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 036b2df..092f31a 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -286,7 +286,6 @@
 	unsigned int            clock_mc3;
 	unsigned int            clock_mc4;
 	unsigned int            espi_nports;
-	unsigned int            clock_cspi;
 	unsigned int            clock_elmer0;
 	unsigned char           mdio_mdien;
 	unsigned char           mdio_mdiinv;
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 53bde15..599d178 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -185,9 +185,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-#include "cspi.h"
-#endif
 #ifdef CONFIG_CHELSIO_T1_1G
 #include "fpga_defs.h"
 
@@ -280,7 +277,7 @@
 	t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
 }
 
-#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+#if defined(CONFIG_CHELSIO_T1_1G)
 /*
  * Elmer MI1 MDIO read/write operations.
  */
@@ -317,7 +314,7 @@
 	return 0;
 }
 
-#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+#if defined(CONFIG_CHELSIO_T1_1G)
 static const struct mdio_ops mi1_mdio_ops = {
 	.init = mi1_mdio_init,
 	.read = mi1_mdio_read,
@@ -752,31 +749,6 @@
 					 mod_detect ? "removed" : "inserted");
 		}
 		break;
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-	case CHBT_BOARD_COUGAR:
-		if (adapter->params.nports == 1) {
-			if (cause & ELMER0_GP_BIT1) {         /* Vitesse MAC */
-				struct cmac *mac = adapter->port[0].mac;
-				mac->ops->interrupt_handler(mac);
-			}
-			if (cause & ELMER0_GP_BIT5) {     /* XPAK MOD_DETECT */
-			}
-		} else {
-			int i, port_bit;
-
-			for_each_port(adapter, i) {
-				port_bit = i ? i + 1 : 0;
-				if (!(cause & (1 << port_bit)))
-					continue;
-
-				phy = adapter->port[i].phy;
-				phy_cause = phy->ops->interrupt_handler(phy);
-				if (phy_cause & cphy_cause_link_change)
-					t1_link_changed(adapter, i);
-			}
-		}
-		break;
-#endif
 	}
 	t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
 	return 0;
@@ -955,7 +927,6 @@
 	case CHBT_BOARD_N110:
 	case CHBT_BOARD_N210:
 	case CHBT_BOARD_CHT210:
-	case CHBT_BOARD_COUGAR:
 		t1_tpi_par(adapter, 0xf);
 		t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
 		break;
@@ -1004,10 +975,6 @@
 		       adapter->regs + A_MC5_CONFIG);
 	}
 
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-	if (adapter->cspi && t1_cspi_init(adapter->cspi))
-		goto out_err;
-#endif
 	if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
 					  bi->espi_nports))
 		goto out_err;
@@ -1061,10 +1028,6 @@
 		t1_tp_destroy(adapter->tp);
 	if (adapter->espi)
 		t1_espi_destroy(adapter->espi);
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-	if (adapter->cspi)
-		t1_cspi_destroy(adapter->cspi);
-#endif
 }
 
 static void __devinit init_link_config(struct link_config *lc,
@@ -1084,14 +1047,6 @@
 	}
 }
 
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-	if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
-		pr_err("%s: CSPI initialization failed\n",
-		       adapter->name);
-		goto error;
-	}
-#endif
-
 /*
  * Allocate and initialize the data structures that hold the SW state of
  * the Terminator HW modules.
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 8047126..5ecf0bc 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -257,7 +257,7 @@
 {
 	u32 i;
 
-	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+	for (i = 0; i < cp->max_cid_space; i++) {
 		if (cp->ctx_tbl[i].cid == cid) {
 			*l5_cid = i;
 			return 0;
@@ -804,7 +804,7 @@
 	cnic_free_dma(dev, &cp->conn_buf_info);
 	cnic_free_dma(dev, &cp->kwq_info);
 	cnic_free_dma(dev, &cp->kwq_16_data_info);
-	cnic_free_dma(dev, &cp->kcq_info);
+	cnic_free_dma(dev, &cp->kcq1.dma);
 	kfree(cp->iscsi_tbl);
 	cp->iscsi_tbl = NULL;
 	kfree(cp->ctx_tbl);
@@ -863,6 +863,37 @@
 	return 0;
 }
 
+static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
+{
+	int err, i, is_bnx2 = 0;
+	struct kcqe **kcq;
+
+	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
+		is_bnx2 = 1;
+
+	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
+	if (err)
+		return err;
+
+	kcq = (struct kcqe **) info->dma.pg_arr;
+	info->kcq = kcq;
+
+	if (is_bnx2)
+		return 0;
+
+	for (i = 0; i < KCQ_PAGE_CNT; i++) {
+		struct bnx2x_bd_chain_next *next =
+			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
+		int j = i + 1;
+
+		if (j >= KCQ_PAGE_CNT)
+			j = 0;
+		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
+		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
+	}
+	return 0;
+}
+
 static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -954,10 +985,9 @@
 		goto error;
 	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
 
-	ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
+	ret = cnic_alloc_kcq(dev, &cp->kcq1);
 	if (ret)
 		goto error;
-	cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
 
 	ret = cnic_alloc_context(dev);
 	if (ret)
@@ -981,17 +1011,10 @@
 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
-	struct cnic_eth_dev *ethdev = cp->ethdev;
 	int ctx_blk_size = cp->ethdev->ctx_blk_size;
-	int total_mem, blks, i, cid_space;
+	int total_mem, blks, i;
 
-	if (BNX2X_ISCSI_START_CID < ethdev->starting_cid)
-		return -EINVAL;
-
-	cid_space = MAX_ISCSI_TBL_SZ +
-		    (BNX2X_ISCSI_START_CID - ethdev->starting_cid);
-
-	total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space;
+	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
 	blks = total_mem / ctx_blk_size;
 	if (total_mem % ctx_blk_size)
 		blks++;
@@ -1035,16 +1058,27 @@
 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 start_cid = ethdev->starting_cid;
 	int i, j, n, ret, pages;
 	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
 
+	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
+	cp->iscsi_start_cid = start_cid;
+	if (start_cid < BNX2X_ISCSI_START_CID) {
+		u32 delta = BNX2X_ISCSI_START_CID - start_cid;
+
+		cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
+		cp->max_cid_space += delta;
+	}
+
 	cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
 				GFP_KERNEL);
 	if (!cp->iscsi_tbl)
 		goto error;
 
 	cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
-				  MAX_CNIC_L5_CONTEXT, GFP_KERNEL);
+				cp->max_cid_space, GFP_KERNEL);
 	if (!cp->ctx_tbl)
 		goto error;
 
@@ -1053,7 +1087,7 @@
 		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
 	}
 
-	pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) /
+	pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
 		PAGE_SIZE;
 
 	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
@@ -1061,7 +1095,7 @@
 		return -ENOMEM;
 
 	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
-	for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
 		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
 
 		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
@@ -1072,22 +1106,9 @@
 			j++;
 	}
 
-	ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0);
+	ret = cnic_alloc_kcq(dev, &cp->kcq1);
 	if (ret)
 		goto error;
-	cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
-
-	for (i = 0; i < KCQ_PAGE_CNT; i++) {
-		struct bnx2x_bd_chain_next *next =
-			(struct bnx2x_bd_chain_next *)
-			&cp->kcq[i][MAX_KCQE_CNT];
-		int j = i + 1;
-
-		if (j >= KCQ_PAGE_CNT)
-			j = 0;
-		next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32;
-		next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff;
-	}
 
 	pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
 			   BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
@@ -2120,18 +2141,20 @@
 	return idx;
 }
 
-static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
+static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
 {
 	struct cnic_local *cp = dev->cnic_priv;
-	u16 i, ri, last;
+	u16 i, ri, hw_prod, last;
 	struct kcqe *kcqe;
 	int kcqe_cnt = 0, last_cnt = 0;
 
-	i = ri = last = *sw_prod;
+	i = ri = last = info->sw_prod_idx;
 	ri &= MAX_KCQ_IDX;
+	hw_prod = *info->hw_prod_idx_ptr;
+	hw_prod = cp->hw_idx(hw_prod);
 
 	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
-		kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
 		cp->completed_kcq[kcqe_cnt++] = kcqe;
 		i = cp->next_idx(i);
 		ri = i & MAX_KCQ_IDX;
@@ -2141,7 +2164,7 @@
 		}
 	}
 
-	*sw_prod = last;
+	info->sw_prod_idx = last;
 	return last_cnt;
 }
 
@@ -2184,6 +2207,9 @@
 	u16 tx_cons = *cp->tx_cons_ptr;
 	int comp = 0;
 
+	if (!test_bit(CNIC_F_CNIC_UP, &cp->dev->flags))
+		return;
+
 	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
 			comp = cnic_l2_completion(cp);
@@ -2197,103 +2223,79 @@
 		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
 }
 
-static int cnic_service_bnx2(void *data, void *status_blk)
+static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
 {
-	struct cnic_dev *dev = data;
-	struct status_block *sblk = status_blk;
 	struct cnic_local *cp = dev->cnic_priv;
-	u32 status_idx = sblk->status_idx;
-	u16 hw_prod, sw_prod;
+	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
 	int kcqe_cnt;
 
-	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
-		return status_idx;
-
 	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
 
-	hw_prod = sblk->status_completion_producer_index;
-	sw_prod = cp->kcq_prod_idx;
-	while (sw_prod != hw_prod) {
-		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
-		if (kcqe_cnt == 0)
-			goto done;
+	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
 
 		service_kcqes(dev, kcqe_cnt);
 
 		/* Tell compiler that status_blk fields can change. */
 		barrier();
-		if (status_idx != sblk->status_idx) {
-			status_idx = sblk->status_idx;
+		if (status_idx != *cp->kcq1.status_idx_ptr) {
+			status_idx = (u16) *cp->kcq1.status_idx_ptr;
 			cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
-			hw_prod = sblk->status_completion_producer_index;
 		} else
 			break;
 	}
 
-done:
-	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
-
-	cp->kcq_prod_idx = sw_prod;
+	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
 
 	cnic_chk_pkt_rings(cp);
+
 	return status_idx;
 }
 
+static int cnic_service_bnx2(void *data, void *status_blk)
+{
+	struct cnic_dev *dev = data;
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 status_idx = *cp->kcq1.status_idx_ptr;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		return status_idx;
+
+	return cnic_service_bnx2_queues(dev);
+}
+
 static void cnic_service_bnx2_msix(unsigned long data)
 {
 	struct cnic_dev *dev = (struct cnic_dev *) data;
 	struct cnic_local *cp = dev->cnic_priv;
-	struct status_block_msix *status_blk = cp->status_blk.bnx2;
-	u32 status_idx = status_blk->status_idx;
-	u16 hw_prod, sw_prod;
-	int kcqe_cnt;
 
-	cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+	cp->last_status_idx = cnic_service_bnx2_queues(dev);
 
-	hw_prod = status_blk->status_completion_producer_index;
-	sw_prod = cp->kcq_prod_idx;
-	while (sw_prod != hw_prod) {
-		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
-		if (kcqe_cnt == 0)
-			goto done;
-
-		service_kcqes(dev, kcqe_cnt);
-
-		/* Tell compiler that status_blk fields can change. */
-		barrier();
-		if (status_idx != status_blk->status_idx) {
-			status_idx = status_blk->status_idx;
-			cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
-			hw_prod = status_blk->status_completion_producer_index;
-		} else
-			break;
-	}
-
-done:
-	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
-	cp->kcq_prod_idx = sw_prod;
-
-	cnic_chk_pkt_rings(cp);
-
-	cp->last_status_idx = status_idx;
 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
 		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
 }
 
+static void cnic_doirq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
+
+	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+		prefetch(cp->status_blk.gen);
+		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+		tasklet_schedule(&cp->cnic_irq_task);
+	}
+}
+
 static irqreturn_t cnic_irq(int irq, void *dev_instance)
 {
 	struct cnic_dev *dev = dev_instance;
 	struct cnic_local *cp = dev->cnic_priv;
-	u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
 
 	if (cp->ack_int)
 		cp->ack_int(dev);
 
-	prefetch(cp->status_blk.gen);
-	prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
-
-	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
-		tasklet_schedule(&cp->cnic_irq_task);
+	cnic_doirq(dev);
 
 	return IRQ_HANDLED;
 }
@@ -2324,60 +2326,50 @@
 			   IGU_INT_DISABLE, 0);
 }
 
-static void cnic_service_bnx2x_bh(unsigned long data)
+static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
 {
-	struct cnic_dev *dev = (struct cnic_dev *) data;
-	struct cnic_local *cp = dev->cnic_priv;
-	u16 hw_prod, sw_prod;
-	struct cstorm_status_block_c *sblk =
-		&cp->status_blk.bnx2x->c_status_block;
-	u32 status_idx = sblk->status_block_index;
+	u32 last_status = *info->status_idx_ptr;
 	int kcqe_cnt;
 
-	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
-		return;
-
-	hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
-	hw_prod = cp->hw_idx(hw_prod);
-	sw_prod = cp->kcq_prod_idx;
-	while (sw_prod != hw_prod) {
-		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
-		if (kcqe_cnt == 0)
-			goto done;
+	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
 
 		service_kcqes(dev, kcqe_cnt);
 
 		/* Tell compiler that sblk fields can change. */
 		barrier();
-		if (status_idx == sblk->status_block_index)
+		if (last_status == *info->status_idx_ptr)
 			break;
 
-		status_idx = sblk->status_block_index;
-		hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
-		hw_prod = cp->hw_idx(hw_prod);
+		last_status = *info->status_idx_ptr;
 	}
+	return last_status;
+}
 
-done:
-	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX);
+static void cnic_service_bnx2x_bh(unsigned long data)
+{
+	struct cnic_dev *dev = (struct cnic_dev *) data;
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 status_idx;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		return;
+
+	status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+
+	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
 	cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
 			   status_idx, IGU_INT_ENABLE, 1);
-
-	cp->kcq_prod_idx = sw_prod;
 }
 
 static int cnic_service_bnx2x(void *data, void *status_blk)
 {
 	struct cnic_dev *dev = data;
 	struct cnic_local *cp = dev->cnic_priv;
-	u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
 
-	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
-		prefetch(cp->status_blk.bnx2x);
-		prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		cnic_doirq(dev);
 
-		tasklet_schedule(&cp->cnic_irq_task);
-		cnic_chk_pkt_rings(cp);
-	}
+	cnic_chk_pkt_rings(cp);
 
 	return 0;
 }
@@ -2824,7 +2816,7 @@
 
 	err = ip_route_output_key(&init_net, &rt, &fl);
 	if (!err)
-		*dst = &rt->u.dst;
+		*dst = &rt->dst;
 	return err;
 #else
 	return -ENETUNREACH;
@@ -2996,7 +2988,7 @@
 static int cnic_cm_abort(struct cnic_sock *csk)
 {
 	struct cnic_local *cp = csk->dev->cnic_priv;
-	u32 opcode;
+	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
 
 	if (!cnic_in_use(csk))
 		return -EINVAL;
@@ -3008,12 +3000,9 @@
 	 * connect was not successful.
 	 */
 
-	csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
-	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
-		opcode = csk->state;
-	else
-		opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
 	cp->close_conn(csk, opcode);
+	if (csk->state != opcode)
+		return -EALREADY;
 
 	return 0;
 }
@@ -3026,6 +3015,8 @@
 	if (cnic_close_prep(csk)) {
 		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
 		return cnic_cm_close_req(csk);
+	} else {
+		return -EALREADY;
 	}
 	return 0;
 }
@@ -3141,12 +3132,6 @@
 		break;
 
 	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
-		if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
-			cnic_cm_upcall(cp, csk, opcode);
-			break;
-		} else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
-			csk->state = opcode;
-		/* fall through */
 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
 	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
@@ -3202,19 +3187,22 @@
 
 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
 {
-	if ((opcode == csk->state) ||
-	    (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
-	     csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
-		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
-			return 1;
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		/* Unsolicited RESET_COMP or RESET_RECEIVED */
+		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+		csk->state = opcode;
 	}
-	/* 57710+ only  workaround to handle unsolicited RESET_COMP
-	 * which will be treated like a RESET RCVD notification
-	 * which triggers the clean up procedure
+
+	/* 1. If event opcode matches the expected event in csk->state
+	 * 2. If the expected event is CLOSE_COMP, we accept any event
+	 * 3. If the expected event is 0, meaning the connection was never
+	 *    never established, we accept the opcode from cm_abort.
 	 */
-	else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
+	if (opcode == csk->state || csk->state == 0 ||
+	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
 		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
-			csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+			if (csk->state == 0)
+				csk->state = opcode;
 			return 1;
 		}
 	}
@@ -3226,8 +3214,14 @@
 	struct cnic_dev *dev = csk->dev;
 	struct cnic_local *cp = dev->cnic_priv;
 
+	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
+		cnic_cm_upcall(cp, csk, opcode);
+		return;
+	}
+
 	clear_bit(SK_F_CONNECT_START, &csk->flags);
 	cnic_close_conn(csk);
+	csk->state = opcode;
 	cnic_cm_upcall(cp, csk, opcode);
 }
 
@@ -3257,8 +3251,12 @@
 	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
-		if (cnic_ready_to_close(csk, opcode))
-			cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+		if (cnic_ready_to_close(csk, opcode)) {
+			if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+			else
+				close_complete = 1;
+		}
 		break;
 	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
 		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
@@ -3694,7 +3692,7 @@
 	struct cnic_local *cp = dev->cnic_priv;
 	struct cnic_eth_dev *ethdev = cp->ethdev;
 	struct status_block *sblk = cp->status_blk.gen;
-	u32 val;
+	u32 val, kcq_cid_addr, kwq_cid_addr;
 	int err;
 
 	cnic_set_bnx2_mac(dev);
@@ -3719,7 +3717,7 @@
 	cnic_init_context(dev, KWQ_CID);
 	cnic_init_context(dev, KCQ_CID);
 
-	cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
 	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
 
 	cp->max_kwq_idx = MAX_KWQ_IDX;
@@ -3735,50 +3733,59 @@
 	/* Initialize the kernel work queue context. */
 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
 	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
-	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
 
 	val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
-	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
 
 	val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
-	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
 
 	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
-	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
 
 	val = (u32) cp->kwq_info.pgtbl_map;
-	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
 
-	cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
-	cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
 
-	cp->kcq_prod_idx = 0;
+	cp->kcq1.sw_prod_idx = 0;
+	cp->kcq1.hw_prod_idx_ptr =
+		(u16 *) &sblk->status_completion_producer_index;
+
+	cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
 
 	/* Initialize the kernel complete queue context. */
 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
 	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
-	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
 
 	val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
-	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
 
 	val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
-	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
 
-	val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
-	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
 
-	val = (u32) cp->kcq_info.pgtbl_map;
-	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+	val = (u32) cp->kcq1.dma.pgtbl_map;
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
 
 	cp->int_num = 0;
 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *msblk = cp->status_blk.bnx2;
 		u32 sb_id = cp->status_blk_num;
 		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
 
+		cp->kcq1.hw_prod_idx_ptr =
+			(u16 *) &msblk->status_completion_producer_index;
+		cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
+		cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
 		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
-		cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
-		cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
 	}
 
 	/* Enable Commnad Scheduler notification when we write to the
@@ -4123,33 +4130,39 @@
 	u8 sb_id = cp->status_blk_num;
 
 	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
-			       BNX2X_ISCSI_START_CID);
+			       cp->iscsi_start_cid);
 
 	if (ret)
 		return -ENOMEM;
 
-	cp->kcq_io_addr = BAR_CSTRORM_INTMEM +
+	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
 			  CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
-	cp->kcq_prod_idx = 0;
+	cp->kcq1.sw_prod_idx = 0;
+
+	cp->kcq1.hw_prod_idx_ptr =
+		&cp->status_blk.bnx2x->c_status_block.index_values[
+			HC_INDEX_C_ISCSI_EQ_CONS];
+	cp->kcq1.status_idx_ptr =
+		&cp->status_blk.bnx2x->c_status_block.status_block_index;
 
 	cnic_get_bnx2x_iscsi_info(dev);
 
 	/* Only 1 EQ */
-	CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX);
+	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
 		CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
-		cp->kcq_info.pg_map_arr[1] & 0xffffffff);
+		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
-		(u64) cp->kcq_info.pg_map_arr[1] >> 32);
+		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
 		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
-		cp->kcq_info.pg_map_arr[0] & 0xffffffff);
+		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
 		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
-		(u64) cp->kcq_info.pg_map_arr[0] >> 32);
+		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
@@ -4377,7 +4390,7 @@
 		  0);
 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
 		CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0);
-	CNIC_WR16(dev, cp->kcq_io_addr, 0);
+	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
 	cnic_free_resc(dev);
 }
 
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 08b1235..275c361 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -169,6 +169,16 @@
 	} proto;
 };
 
+struct kcq_info {
+	struct cnic_dma	dma;
+	struct kcqe	**kcq;
+
+	u16		*hw_prod_idx_ptr;
+	u16		sw_prod_idx;
+	u16		*status_idx_ptr;
+	u32		io_addr;
+};
+
 struct cnic_local {
 
 	spinlock_t cnic_ulp_lock;
@@ -202,9 +212,6 @@
 	u16		rx_cons;
 	u16		tx_cons;
 
-	u32 kwq_cid_addr;
-	u32 kcq_cid_addr;
-
 	struct cnic_dma		kwq_info;
 	struct kwqe		**kwq;
 
@@ -218,11 +225,7 @@
 	u16		*kwq_con_idx_ptr;
 	u16		kwq_con_idx;
 
-	struct cnic_dma	kcq_info;
-	struct kcqe	**kcq;
-
-	u16		kcq_prod_idx;
-	u32		kcq_io_addr;
+	struct kcq_info	kcq1;
 
 	union {
 		void				*gen;
@@ -248,8 +251,10 @@
 	struct cnic_iscsi	*iscsi_tbl;
 	struct cnic_context	*ctx_tbl;
 	struct cnic_id_tbl	cid_tbl;
-	int			max_iscsi_conn;
 	atomic_t		iscsi_conn;
+	u32			iscsi_start_cid;
+
+	u32			max_cid_space;
 
 	/* per connection parameters */
 	int			num_iscsi_tasks;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 0c55177..344c842 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
 #ifndef CNIC_IF_H
 #define CNIC_IF_H
 
-#define CNIC_MODULE_VERSION	"2.1.2"
-#define CNIC_MODULE_RELDATE	"May 26, 2010"
+#define CNIC_MODULE_VERSION	"2.1.3"
+#define CNIC_MODULE_RELDATE	"June 24, 2010"
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 23786ee..38de1a4 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -964,7 +964,7 @@
 	struct sk_buff *skb;
 
 	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
-	if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) {
+	if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
 		if (netif_msg_drv(priv))
 			printk(KERN_ERR "%s: failed to request registers\n",
 			       dev->name);
@@ -972,7 +972,7 @@
 		goto fail_reserve;
 	}
 
-	priv->regs = ioremap(mem->start, mem->end - mem->start);
+	priv->regs = ioremap(mem->start, resource_size(mem));
 	if (!priv->regs) {
 		if (netif_msg_drv(priv))
 			printk(KERN_ERR "%s: failed to remap registers\n",
@@ -1049,7 +1049,7 @@
 	iounmap(priv->regs);
 
 fail_remap:
-	release_mem_region(mem->start, mem->end - mem->start);
+	release_mem_region(mem->start, resource_size(mem));
 
 fail_reserve:
 	return res;
@@ -1077,7 +1077,7 @@
 	free_irq(dev->irq, dev);
 	iounmap(priv->regs);
 	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
-	release_mem_region(mem->start, mem->end - mem->start);
+	release_mem_region(mem->start, resource_size(mem));
 	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
 	for (i = 0; i < priv->ring_size; i++) {
 		if (priv->rx_head[i].skb) {
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 2ccb9f1..e3a7dca 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -218,7 +218,6 @@
 
 /* Information that need to be kept for each board. */
 struct net_local {
-	struct net_device_stats stats;
 	int chip_type;		/* one of: CS8900, CS8920, CS8920M */
 	char chip_revision;	/* revision letter of the chip ('A'...) */
 	int send_cmd;		/* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */
@@ -257,7 +256,7 @@
 static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer);
 static int get_eeprom_cksum(int off, int len, int *buffer);
 static int set_mac_address(struct net_device *dev, void *addr);
-static void count_rx_errors(int status, struct net_local *lp);
+static void count_rx_errors(int status, struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void net_poll_controller(struct net_device *dev);
 #endif
@@ -983,7 +982,7 @@
 			dev->name, (unsigned long)bp, status, length);
 	}
 	if ((status & RX_OK) == 0) {
-		count_rx_errors(status, lp);
+		count_rx_errors(status, dev);
 		goto skip_this_frame;
 	}
 
@@ -992,7 +991,7 @@
 	if (skb == NULL) {
 		if (net_debug)	/* I don't think we want to do this to a stressed system */
 			printk("%s: Memory squeeze, dropping packet.\n", dev->name);
-		lp->stats.rx_dropped++;
+		dev->stats.rx_dropped++;
 
 		/* AKPM: advance bp to the next frame */
 skip_this_frame:
@@ -1022,8 +1021,8 @@
 	}
         skb->protocol=eth_type_trans(skb,dev);
 	netif_rx(skb);
-	lp->stats.rx_packets++;
-	lp->stats.rx_bytes += length;
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += length;
 }
 
 #endif	/* ALLOW_DMA */
@@ -1552,7 +1551,7 @@
 	/* Write the contents of the packet */
 	writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
 	spin_unlock_irqrestore(&lp->lock, flags);
-	lp->stats.tx_bytes += skb->len;
+	dev->stats.tx_bytes += skb->len;
 	dev_kfree_skb (skb);
 
 	/*
@@ -1598,18 +1597,23 @@
 			net_rx(dev);
 			break;
 		case ISQ_TRANSMITTER_EVENT:
-			lp->stats.tx_packets++;
+			dev->stats.tx_packets++;
 			netif_wake_queue(dev);	/* Inform upper layers. */
 			if ((status & (	TX_OK |
 					TX_LOST_CRS |
 					TX_SQE_ERROR |
 					TX_LATE_COL |
 					TX_16_COL)) != TX_OK) {
-				if ((status & TX_OK) == 0) lp->stats.tx_errors++;
-				if (status & TX_LOST_CRS) lp->stats.tx_carrier_errors++;
-				if (status & TX_SQE_ERROR) lp->stats.tx_heartbeat_errors++;
-				if (status & TX_LATE_COL) lp->stats.tx_window_errors++;
-				if (status & TX_16_COL) lp->stats.tx_aborted_errors++;
+				if ((status & TX_OK) == 0)
+					dev->stats.tx_errors++;
+				if (status & TX_LOST_CRS)
+					dev->stats.tx_carrier_errors++;
+				if (status & TX_SQE_ERROR)
+					dev->stats.tx_heartbeat_errors++;
+				if (status & TX_LATE_COL)
+					dev->stats.tx_window_errors++;
+				if (status & TX_16_COL)
+					dev->stats.tx_aborted_errors++;
 			}
 			break;
 		case ISQ_BUFFER_EVENT:
@@ -1651,10 +1655,10 @@
 #endif
 			break;
 		case ISQ_RX_MISS_EVENT:
-			lp->stats.rx_missed_errors += (status >>6);
+			dev->stats.rx_missed_errors += (status >> 6);
 			break;
 		case ISQ_TX_COL_EVENT:
-			lp->stats.collisions += (status >>6);
+			dev->stats.collisions += (status >> 6);
 			break;
 		}
 	}
@@ -1662,22 +1666,24 @@
 }
 
 static void
-count_rx_errors(int status, struct net_local *lp)
+count_rx_errors(int status, struct net_device *dev)
 {
-	lp->stats.rx_errors++;
-	if (status & RX_RUNT) lp->stats.rx_length_errors++;
-	if (status & RX_EXTRA_DATA) lp->stats.rx_length_errors++;
-	if (status & RX_CRC_ERROR) if (!(status & (RX_EXTRA_DATA|RX_RUNT)))
+	dev->stats.rx_errors++;
+	if (status & RX_RUNT)
+		dev->stats.rx_length_errors++;
+	if (status & RX_EXTRA_DATA)
+		dev->stats.rx_length_errors++;
+	if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA|RX_RUNT)))
 		/* per str 172 */
-		lp->stats.rx_crc_errors++;
-	if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++;
+		dev->stats.rx_crc_errors++;
+	if (status & RX_DRIBBLE)
+		dev->stats.rx_frame_errors++;
 }
 
 /* We have a good packet(s), get it/them out of the buffers. */
 static void
 net_rx(struct net_device *dev)
 {
-	struct net_local *lp = netdev_priv(dev);
 	struct sk_buff *skb;
 	int status, length;
 
@@ -1686,7 +1692,7 @@
 	length = readword(ioaddr, RX_FRAME_PORT);
 
 	if ((status & RX_OK) == 0) {
-		count_rx_errors(status, lp);
+		count_rx_errors(status, dev);
 		return;
 	}
 
@@ -1696,7 +1702,7 @@
 #if 0		/* Again, this seems a cruel thing to do */
 		printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
 #endif
-		lp->stats.rx_dropped++;
+		dev->stats.rx_dropped++;
 		return;
 	}
 	skb_reserve(skb, 2);	/* longword align L3 header */
@@ -1713,8 +1719,8 @@
 
         skb->protocol=eth_type_trans(skb,dev);
 	netif_rx(skb);
-	lp->stats.rx_packets++;
-	lp->stats.rx_bytes += length;
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += length;
 }
 
 #if ALLOW_DMA
@@ -1765,11 +1771,11 @@
 
 	spin_lock_irqsave(&lp->lock, flags);
 	/* Update the statistics from the device registers. */
-	lp->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
-	lp->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
+	dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
+	dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
 	spin_unlock_irqrestore(&lp->lock, flags);
 
-	return &lp->stats;
+	return &dev->stats;
 }
 
 static void set_multicast_list(struct net_device *dev)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index e3f1b85..066fd5b 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2311,15 +2311,9 @@
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 			return -EFAULT;
 		/* Check t.len sanity ? */
-		fw_data = kmalloc(t.len, GFP_KERNEL);
-		if (!fw_data)
-			return -ENOMEM;
-
-		if (copy_from_user
-			(fw_data, useraddr + sizeof(t), t.len)) {
-			kfree(fw_data);
-			return -EFAULT;
-		}
+		fw_data = memdup_user(useraddr + sizeof(t), t.len);
+		if (IS_ERR(fw_data))
+			return PTR_ERR(fw_data);
 
 		ret = t3_load_fw(adapter, fw_data, t.len);
 		kfree(fw_data);
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 9d0bd9d..8bda06e 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -35,10 +35,10 @@
 #define DRV_DESC "Chelsio T3 Network Driver"
 #define DRV_NAME "cxgb3"
 /* Driver version */
-#define DRV_VERSION "1.1.3-ko"
+#define DRV_VERSION "1.1.4-ko"
 
 /* Firmware version */
 #define FW_VERSION_MAJOR 7
-#define FW_VERSION_MINOR 4
+#define FW_VERSION_MINOR 10
 #define FW_VERSION_MICRO 0
 #endif				/* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index dd1770e..62804bb 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -219,6 +219,10 @@
 	struct vpd_params vpd;
 	struct pci_params pci;
 
+	unsigned int sf_size;             /* serial flash size in bytes */
+	unsigned int sf_nsec;             /* # of flash sectors */
+	unsigned int sf_fw_start;         /* start of FW image in flash */
+
 	unsigned int fw_vers;
 	unsigned int tp_vers;
 	u8 api_vers[7];
@@ -305,7 +309,6 @@
 	FULL_INIT_DONE     = (1 << 0),
 	USING_MSI          = (1 << 1),
 	USING_MSIX         = (1 << 2),
-	QUEUES_BOUND       = (1 << 3),
 	FW_OK              = (1 << 4),
 };
 
@@ -646,6 +649,7 @@
 void t4_intr_clear(struct adapter *adapter);
 int t4_slow_intr_handler(struct adapter *adapter);
 
+int t4_wait_dev_ready(struct adapter *adap);
 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
 		  struct link_config *lc);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 58045b0..55a720e 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -77,6 +77,76 @@
  */
 #define MAX_SGE_TIMERVAL 200U
 
+#ifdef CONFIG_PCI_IOV
+/*
+ * Virtual Function provisioning constants.  We need two extra Ingress Queues
+ * with Interrupt capability to serve as the VF's Firmware Event Queue and
+ * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
+ * Lists associated with them).  For each Ethernet/Control Egress Queue and
+ * for each Free List, we need an Egress Context.
+ */
+enum {
+	VFRES_NPORTS = 1,		/* # of "ports" per VF */
+	VFRES_NQSETS = 2,		/* # of "Queue Sets" per VF */
+
+	VFRES_NVI = VFRES_NPORTS,	/* # of Virtual Interfaces */
+	VFRES_NETHCTRL = VFRES_NQSETS,	/* # of EQs used for ETH or CTRL Qs */
+	VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
+	VFRES_NIQ = 0,			/* # of non-fl/int ingress queues */
+	VFRES_NEQ = VFRES_NQSETS*2,	/* # of egress queues */
+	VFRES_TC = 0,			/* PCI-E traffic class */
+	VFRES_NEXACTF = 16,		/* # of exact MPS filters */
+
+	VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
+	VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
+};
+
+/*
+ * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
+ * static and likely not to be useful in the long run.  We really need to
+ * implement some form of persistent configuration which the firmware
+ * controls.
+ */
+static unsigned int pfvfres_pmask(struct adapter *adapter,
+				  unsigned int pf, unsigned int vf)
+{
+	unsigned int portn, portvec;
+
+	/*
+	 * Give PF's access to all of the ports.
+	 */
+	if (vf == 0)
+		return FW_PFVF_CMD_PMASK_MASK;
+
+	/*
+	 * For VFs, we'll assign them access to the ports based purely on the
+	 * PF.  We assign active ports in order, wrapping around if there are
+	 * fewer active ports than PFs: e.g. active port[pf % nports].
+	 * Unfortunately the adapter's port_info structs haven't been
+	 * initialized yet so we have to compute this.
+	 */
+	if (adapter->params.nports == 0)
+		return 0;
+
+	portn = pf % adapter->params.nports;
+	portvec = adapter->params.portvec;
+	for (;;) {
+		/*
+		 * Isolate the lowest set bit in the port vector.  If we're at
+		 * the port number that we want, return that as the pmask.
+		 * otherwise mask that bit out of the port vector and
+		 * decrement our port number ...
+		 */
+		unsigned int pmask = portvec ^ (portvec & (portvec-1));
+		if (portn == 0)
+			return pmask;
+		portn--;
+		portvec &= ~pmask;
+	}
+	/*NOTREACHED*/
+}
+#endif
+
 enum {
 	MEMWIN0_APERTURE = 65536,
 	MEMWIN0_BASE     = 0x30000,
@@ -216,7 +286,7 @@
 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
 {
 	static const char *mod_str[] = {
-		NULL, "LR", "SR", "ER", "passive DA", "active DA"
+		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
 	};
 
 	const struct net_device *dev = adap->port[port_id];
@@ -224,7 +294,7 @@
 
 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
 		netdev_info(dev, "port module unplugged\n");
-	else
+	else if (pi->mod_type < ARRAY_SIZE(mod_str))
 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
 }
 
@@ -1234,7 +1304,8 @@
 {
 	unsigned int v = 0;
 
-	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
+	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+	    type == FW_PORT_TYPE_BT_XAUI) {
 		v |= SUPPORTED_TP;
 		if (caps & FW_PORT_CAP_SPEED_100M)
 			v |= SUPPORTED_100baseT_Full;
@@ -1250,7 +1321,10 @@
 			v |= SUPPORTED_10000baseKX4_Full;
 	} else if (type == FW_PORT_TYPE_KR)
 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
-	else if (type == FW_PORT_TYPE_FIBER)
+	else if (type == FW_PORT_TYPE_BP_AP)
+		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
+	else if (type == FW_PORT_TYPE_FIBER_XFI ||
+		 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
 		v |= SUPPORTED_FIBRE;
 
 	if (caps & FW_PORT_CAP_ANEG)
@@ -1276,13 +1350,19 @@
 	const struct port_info *p = netdev_priv(dev);
 
 	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+	    p->port_type == FW_PORT_TYPE_BT_XFI ||
 	    p->port_type == FW_PORT_TYPE_BT_XAUI)
 		cmd->port = PORT_TP;
-	else if (p->port_type == FW_PORT_TYPE_FIBER)
+	else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+		 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
 		cmd->port = PORT_FIBRE;
-	else if (p->port_type == FW_PORT_TYPE_TWINAX)
-		cmd->port = PORT_DA;
-	else
+	else if (p->port_type == FW_PORT_TYPE_SFP) {
+		if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+		    p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+			cmd->port = PORT_DA;
+		else
+			cmd->port = PORT_FIBRE;
+	} else
 		cmd->port = PORT_OTHER;
 
 	if (p->mdio_addr >= 0) {
@@ -1719,14 +1799,7 @@
 
 static int set_flags(struct net_device *dev, u32 flags)
 {
-	if (flags & ~ETH_FLAG_RXHASH)
-		return -EOPNOTSUPP;
-
-	if (flags & ETH_FLAG_RXHASH)
-		dev->features |= NETIF_F_RXHASH;
-	else
-		dev->features &= ~NETIF_F_RXHASH;
-	return 0;
+	return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
 }
 
 static struct ethtool_ops cxgb_ethtool_ops = {
@@ -2483,6 +2556,7 @@
 	t4_intr_disable(adapter);
 	cancel_work_sync(&adapter->tid_release_task);
 	adapter->tid_release_task_busy = false;
+	adapter->tid_release_head = NULL;
 
 	if (adapter->flags & USING_MSIX) {
 		free_msix_queue_irqs(adapter);
@@ -2511,9 +2585,10 @@
 	}
 
 	dev->real_num_tx_queues = pi->nqsets;
-	link_start(dev);
-	netif_tx_start_all_queues(dev);
-	return 0;
+	err = link_start(dev);
+	if (!err)
+		netif_tx_start_all_queues(dev);
+	return err;
 }
 
 static int cxgb_close(struct net_device *dev)
@@ -2526,12 +2601,12 @@
 	return t4_enable_vi(adapter, 0, pi->viid, false, false);
 }
 
-static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev)
 {
 	struct port_stats stats;
 	struct port_info *p = netdev_priv(dev);
 	struct adapter *adapter = p->adapter;
-	struct net_device_stats *ns = &dev->stats;
+	struct rtnl_link_stats64 *ns = &dev->stats64;
 
 	spin_lock(&adapter->stats_lock);
 	t4_get_port_stats(adapter, p->tx_chan, &stats);
@@ -2674,7 +2749,7 @@
 	.ndo_open             = cxgb_open,
 	.ndo_stop             = cxgb_close,
 	.ndo_start_xmit       = t4_eth_xmit,
-	.ndo_get_stats        = cxgb_get_stats,
+	.ndo_get_stats64      = cxgb_get_stats,
 	.ndo_set_rx_mode      = cxgb_set_rxmode,
 	.ndo_set_mac_address  = cxgb_set_mac_addr,
 	.ndo_validate_addr    = eth_validate_addr,
@@ -2709,6 +2784,65 @@
 		     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
 }
 
+static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
+{
+	u32 v;
+	int ret;
+
+	/* get device capabilities */
+	memset(c, 0, sizeof(*c));
+	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+			       FW_CMD_REQUEST | FW_CMD_READ);
+	c->retval_len16 = htonl(FW_LEN16(*c));
+	ret = t4_wr_mbox(adap, 0, c, sizeof(*c), c);
+	if (ret < 0)
+		return ret;
+
+	/* select capabilities we'll be using */
+	if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+		if (!vf_acls)
+			c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+		else
+			c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+	} else if (vf_acls) {
+		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
+		return ret;
+	}
+	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+			       FW_CMD_REQUEST | FW_CMD_WRITE);
+	ret = t4_wr_mbox(adap, 0, c, sizeof(*c), NULL);
+	if (ret < 0)
+		return ret;
+
+	ret = t4_config_glbl_rss(adap, 0,
+				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+	if (ret < 0)
+		return ret;
+
+	ret = t4_cfg_pfvf(adap, 0, 0, 0, MAX_EGRQ, 64, MAX_INGQ, 0, 0, 4,
+			  0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+	if (ret < 0)
+		return ret;
+
+	t4_sge_init(adap);
+
+	/* get basic stuff going */
+	ret = t4_early_init(adap, 0);
+	if (ret < 0)
+		return ret;
+
+	/* tweak some settings */
+	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
+	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
+	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
+	v = t4_read_reg(adap, TP_PIO_DATA);
+	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+	setup_memwin(adap);
+	return 0;
+}
+
 /*
  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
  */
@@ -2746,43 +2880,6 @@
 	if (ret < 0)
 		goto bye;
 
-	/* get device capabilities */
-	memset(&c, 0, sizeof(c));
-	c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
-			      FW_CMD_REQUEST | FW_CMD_READ);
-	c.retval_len16 = htonl(FW_LEN16(c));
-	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
-	if (ret < 0)
-		goto bye;
-
-	/* select capabilities we'll be using */
-	if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
-		if (!vf_acls)
-			c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
-		else
-			c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
-	} else if (vf_acls) {
-		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
-		goto bye;
-	}
-	c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
-			      FW_CMD_REQUEST | FW_CMD_WRITE);
-	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
-	if (ret < 0)
-		goto bye;
-
-	ret = t4_config_glbl_rss(adap, 0,
-				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
-				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
-				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
-	if (ret < 0)
-		goto bye;
-
-	ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
-			  FW_CMD_CAP_PF, FW_CMD_CAP_PF);
-	if (ret < 0)
-		goto bye;
-
 	for (v = 0; v < SGE_NTIMERS - 1; v++)
 		adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
 	adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
@@ -2790,17 +2887,20 @@
 	for (v = 1; v < SGE_NCOUNTERS; v++)
 		adap->sge.counter_val[v] = min(intr_cnt[v - 1],
 					       THRESHOLD_3_MASK);
-	t4_sge_init(adap);
-
-	/* get basic stuff going */
-	ret = t4_early_init(adap, 0);
-	if (ret < 0)
-		goto bye;
-
 #define FW_PARAM_DEV(param) \
 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
 	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
 
+	params[0] = FW_PARAM_DEV(CCLK);
+	ret = t4_query_params(adap, 0, 0, 0, 1, params, val);
+	if (ret < 0)
+		goto bye;
+	adap->params.vpd.cclk = val[0];
+
+	ret = adap_init1(adap, &c);
+	if (ret < 0)
+		goto bye;
+
 #define FW_PARAM_PFVF(param) \
 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
 	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
@@ -2853,6 +2953,18 @@
 		adap->vres.rq.size = val[3] - val[2] + 1;
 		adap->vres.pbl.start = val[4];
 		adap->vres.pbl.size = val[5] - val[4] + 1;
+
+		params[0] = FW_PARAM_PFVF(SQRQ_START);
+		params[1] = FW_PARAM_PFVF(SQRQ_END);
+		params[2] = FW_PARAM_PFVF(CQ_START);
+		params[3] = FW_PARAM_PFVF(CQ_END);
+		ret = t4_query_params(adap, 0, 0, 0, 4, params, val);
+		if (ret < 0)
+			goto bye;
+		adap->vres.qp.start = val[0];
+		adap->vres.qp.size = val[1] - val[0] + 1;
+		adap->vres.cq.start = val[2];
+		adap->vres.cq.size = val[3] - val[2] + 1;
 	}
 	if (c.iscsicaps) {
 		params[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -2877,13 +2989,41 @@
 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
 		     adap->params.b_wnd);
 
-	/* tweak some settings */
-	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
-	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
-	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
-	v = t4_read_reg(adap, TP_PIO_DATA);
-	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
-	setup_memwin(adap);
+#ifdef CONFIG_PCI_IOV
+	/*
+	 * Provision resource limits for Virtual Functions.  We currently
+	 * grant them all the same static resource limits except for the Port
+	 * Access Rights Mask which we're assigning based on the PF.  All of
+	 * the static provisioning stuff for both the PF and VF really needs
+	 * to be managed in a persistent manner for each device which the
+	 * firmware controls.
+	 */
+	{
+		int pf, vf;
+
+		for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
+			if (num_vf[pf] <= 0)
+				continue;
+
+			/* VF numbering starts at 1! */
+			for (vf = 1; vf <= num_vf[pf]; vf++) {
+				ret = t4_cfg_pfvf(adap, 0, pf, vf,
+						  VFRES_NEQ, VFRES_NETHCTRL,
+						  VFRES_NIQFLINT, VFRES_NIQ,
+						  VFRES_TC, VFRES_NVI,
+						  FW_PFVF_CMD_CMASK_MASK,
+						  pfvfres_pmask(adap, pf, vf),
+						  VFRES_NEXACTF,
+						  VFRES_R_CAPS, VFRES_WX_CAPS);
+				if (ret < 0)
+					dev_warn(adap->pdev_dev, "failed to "
+						 "provision pf/vf=%d/%d; "
+						 "err=%d\n", pf, vf, ret);
+			}
+		}
+	}
+#endif
+
 	return 0;
 
 	/*
@@ -2896,6 +3036,108 @@
 	return ret;
 }
 
+/* EEH callbacks */
+
+static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
+					 pci_channel_state_t state)
+{
+	int i;
+	struct adapter *adap = pci_get_drvdata(pdev);
+
+	if (!adap)
+		goto out;
+
+	rtnl_lock();
+	adap->flags &= ~FW_OK;
+	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+
+		netif_device_detach(dev);
+		netif_carrier_off(dev);
+	}
+	if (adap->flags & FULL_INIT_DONE)
+		cxgb_down(adap);
+	rtnl_unlock();
+	pci_disable_device(pdev);
+out:	return state == pci_channel_io_perm_failure ?
+		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
+{
+	int i, ret;
+	struct fw_caps_config_cmd c;
+	struct adapter *adap = pci_get_drvdata(pdev);
+
+	if (!adap) {
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+		return PCI_ERS_RESULT_RECOVERED;
+	}
+
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+
+	if (t4_wait_dev_ready(adap) < 0)
+		return PCI_ERS_RESULT_DISCONNECT;
+	if (t4_fw_hello(adap, 0, 0, MASTER_MUST, NULL))
+		return PCI_ERS_RESULT_DISCONNECT;
+	adap->flags |= FW_OK;
+	if (adap_init1(adap, &c))
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	for_each_port(adap, i) {
+		struct port_info *p = adap2pinfo(adap, i);
+
+		ret = t4_alloc_vi(adap, 0, p->tx_chan, 0, 0, 1, NULL, NULL);
+		if (ret < 0)
+			return PCI_ERS_RESULT_DISCONNECT;
+		p->viid = ret;
+		p->xact_addr_filt = -1;
+	}
+
+	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+		     adap->params.b_wnd);
+	if (cxgb_up(adap))
+		return PCI_ERS_RESULT_DISCONNECT;
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void eeh_resume(struct pci_dev *pdev)
+{
+	int i;
+	struct adapter *adap = pci_get_drvdata(pdev);
+
+	if (!adap)
+		return;
+
+	rtnl_lock();
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+
+		if (netif_running(dev)) {
+			link_start(dev);
+			cxgb_set_rxmode(dev);
+		}
+		netif_device_attach(dev);
+	}
+	rtnl_unlock();
+}
+
+static struct pci_error_handlers cxgb4_eeh = {
+	.error_detected = eeh_err_detected,
+	.slot_reset     = eeh_slot_reset,
+	.resume         = eeh_resume,
+};
+
 static inline bool is_10g_port(const struct link_config *lc)
 {
 	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
@@ -3079,7 +3321,8 @@
 static void __devinit print_port_info(struct adapter *adap)
 {
 	static const char *base[] = {
-		"R", "KX4", "T", "KX", "T", "KR", "CX4"
+		"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
+		"KX", "KR", "KR SFP+", "KR FEC"
 	};
 
 	int i;
@@ -3143,8 +3386,10 @@
 
 	/* We control everything through PF 0 */
 	func = PCI_FUNC(pdev->devfn);
-	if (func > 0)
+	if (func > 0) {
+		pci_save_state(pdev);        /* to restore SR-IOV later */
 		goto sriov;
+	}
 
 	err = pci_enable_device(pdev);
 	if (err) {
@@ -3385,6 +3630,7 @@
 	.id_table = cxgb4_pci_tbl,
 	.probe    = init_one,
 	.remove   = __devexit_p(remove_one),
+	.err_handler = &cxgb4_eeh,
 };
 
 static int __init cxgb4_init_module(void)
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 5b98546..0dc0866 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -185,6 +185,8 @@
 	struct cxgb4_range stag;
 	struct cxgb4_range rq;
 	struct cxgb4_range pbl;
+	struct cxgb4_range qp;
+	struct cxgb4_range cq;
 };
 
 /*
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
index 9f96724..5b990d2 100644
--- a/drivers/net/cxgb4/l2t.c
+++ b/drivers/net/cxgb4/l2t.c
@@ -310,6 +310,13 @@
 			neigh_release(e->neigh);
 			e->neigh = NULL;
 		}
+		while (e->arpq_head) {
+			struct sk_buff *skb = e->arpq_head;
+
+			e->arpq_head = skb->next;
+			kfree(skb);
+		}
+		e->arpq_tail = NULL;
 	}
 	spin_unlock_bh(&e->lock);
 
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index d1f8f22..4388f72 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -931,7 +931,7 @@
 
 	ssi = skb_shinfo(skb);
 	if (ssi->gso_size) {
-		struct cpl_tx_pkt_lso *lso = (void *)wr;
+		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
 		int l3hdr_len = skb_network_header_len(skb);
 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
@@ -1718,7 +1718,7 @@
 					free_rx_bufs(q->adap, &rxq->fl, 1);
 					q->offset = 0;
 				}
-				len &= RSPD_LEN;
+				len = RSPD_LEN(len);
 			}
 			si.tot_len = len;
 
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index da272a9..3e63d14 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -221,6 +221,13 @@
 	if ((size & 15) || size > MBOX_LEN)
 		return -EINVAL;
 
+	/*
+	 * If the device is off-line, as in EEH, commands will time out.
+	 * Fail them early so we don't waste time waiting.
+	 */
+	if (adap->pdev->error_state != pci_channel_io_normal)
+		return -EIO;
+
 	v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
 		v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
@@ -449,12 +456,10 @@
 	SF_RD_STATUS    = 5,          /* read status register */
 	SF_WR_ENABLE    = 6,          /* enable writes */
 	SF_RD_DATA_FAST = 0xb,        /* read flash */
+	SF_RD_ID        = 0x9f,       /* read ID */
 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
 
-	FW_START_SEC = 8,             /* first flash sector for FW */
-	FW_END_SEC = 15,              /* last flash sector for FW */
-	FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
-	FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+	FW_MAX_SIZE = 512 * 1024,
 };
 
 /**
@@ -558,7 +563,7 @@
 {
 	int ret;
 
-	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
 		return -EINVAL;
 
 	addr = swab32(addr) | SF_RD_DATA_FAST;
@@ -596,7 +601,7 @@
 	u32 buf[64];
 	unsigned int i, c, left, val, offset = addr & 0xff;
 
-	if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
+	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
 		return -EINVAL;
 
 	val = swab32(addr) | SF_PROG_PAGE;
@@ -614,7 +619,7 @@
 		if (ret)
 			goto unlock;
 	}
-	ret = flash_wait_op(adapter, 5, 1);
+	ret = flash_wait_op(adapter, 8, 1);
 	if (ret)
 		goto unlock;
 
@@ -647,9 +652,8 @@
  */
 static int get_fw_version(struct adapter *adapter, u32 *vers)
 {
-	return t4_read_flash(adapter,
-			     FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
-			     vers, 0);
+	return t4_read_flash(adapter, adapter->params.sf_fw_start +
+			     offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
 }
 
 /**
@@ -661,8 +665,8 @@
  */
 static int get_tp_version(struct adapter *adapter, u32 *vers)
 {
-	return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
-							      tp_microcode_ver),
+	return t4_read_flash(adapter, adapter->params.sf_fw_start +
+			     offsetof(struct fw_hdr, tp_microcode_ver),
 			     1, vers, 0);
 }
 
@@ -684,9 +688,9 @@
 	if (!ret)
 		ret = get_tp_version(adapter, &adapter->params.tp_vers);
 	if (!ret)
-		ret = t4_read_flash(adapter,
-			FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
-			2, api_vers, 1);
+		ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
+				    offsetof(struct fw_hdr, intfver_nic),
+				    2, api_vers, 1);
 	if (ret)
 		return ret;
 
@@ -726,7 +730,7 @@
 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
 		    (ret = sf1_write(adapter, 4, 0, 1,
 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
-		    (ret = flash_wait_op(adapter, 5, 500)) != 0) {
+		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
 			dev_err(adapter->pdev_dev,
 				"erase of flash sector %d failed, error %d\n",
 				start, ret);
@@ -754,6 +758,9 @@
 	u8 first_page[SF_PAGE_SIZE];
 	const u32 *p = (const u32 *)fw_data;
 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
+	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+	unsigned int fw_img_start = adap->params.sf_fw_start;
+	unsigned int fw_start_sec = fw_img_start / sf_sec_size;
 
 	if (!size) {
 		dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -784,8 +791,8 @@
 		return -EINVAL;
 	}
 
-	i = DIV_ROUND_UP(size, SF_SEC_SIZE);        /* # of sectors spanned */
-	ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
+	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
+	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
 	if (ret)
 		goto out;
 
@@ -796,11 +803,11 @@
 	 */
 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
-	ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
+	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
 	if (ret)
 		goto out;
 
-	addr = FW_IMG_START;
+	addr = fw_img_start;
 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
 		addr += SF_PAGE_SIZE;
 		fw_data += SF_PAGE_SIZE;
@@ -810,7 +817,7 @@
 	}
 
 	ret = t4_write_flash(adap,
-			     FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
+			     fw_img_start + offsetof(struct fw_hdr, fw_ver),
 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
 out:
 	if (ret)
@@ -1128,6 +1135,7 @@
 static void ulprx_intr_handler(struct adapter *adapter)
 {
 	static struct intr_info ulprx_intr_info[] = {
+		{ 0x1800000, "ULPRX context error", -1, 1 },
 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
 		{ 0 }
 	};
@@ -2510,7 +2518,7 @@
 	c.retval_len16 = htonl(FW_LEN16(c));
 	c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
 			       FW_PFVF_CMD_NIQ(rxq));
-	c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
+	c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
 			       FW_PFVF_CMD_PMASK(pmask) |
 			       FW_PFVF_CMD_NEQ(txq));
 	c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
@@ -2572,7 +2580,7 @@
 	}
 	if (rss_size)
 		*rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
-	return ntohs(c.viid_pkd);
+	return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
 }
 
 /**
@@ -2595,7 +2603,7 @@
 			    FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
 			    FW_VI_CMD_VFN(vf));
 	c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
-	c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
+	c.type_viid = htons(FW_VI_CMD_VIID(viid));
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 }
 
@@ -3045,7 +3053,7 @@
 	}
 }
 
-static int __devinit wait_dev_ready(struct adapter *adap)
+int t4_wait_dev_ready(struct adapter *adap)
 {
 	if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
 		return 0;
@@ -3053,6 +3061,33 @@
 	return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
 }
 
+static int __devinit get_flash_params(struct adapter *adap)
+{
+	int ret;
+	u32 info;
+
+	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
+	if (!ret)
+		ret = sf1_read(adap, 3, 0, 1, &info);
+	t4_write_reg(adap, SF_OP, 0);                    /* unlock SF */
+	if (ret)
+		return ret;
+
+	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
+		return -EINVAL;
+	info >>= 16;                           /* log2 of size */
+	if (info >= 0x14 && info < 0x18)
+		adap->params.sf_nsec = 1 << (info - 16);
+	else if (info == 0x18)
+		adap->params.sf_nsec = 64;
+	else
+		return -EINVAL;
+	adap->params.sf_size = 1 << info;
+	adap->params.sf_fw_start =
+		t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
+	return 0;
+}
+
 /**
  *	t4_prep_adapter - prepare SW and HW for operation
  *	@adapter: the adapter
@@ -3066,13 +3101,19 @@
 {
 	int ret;
 
-	ret = wait_dev_ready(adapter);
+	ret = t4_wait_dev_ready(adapter);
 	if (ret < 0)
 		return ret;
 
 	get_pci_mode(adapter, &adapter->params.pci);
 	adapter->params.rev = t4_read_reg(adapter, PL_REV);
 
+	ret = get_flash_params(adapter);
+	if (ret < 0) {
+		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
+		return ret;
+	}
+
 	ret = get_vpd_params(adapter, &adapter->params.vpd);
 	if (ret < 0)
 		return ret;
@@ -3122,12 +3163,13 @@
 		p->rss_size = rss_size;
 		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
 		memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
+		adap->port[i]->dev_id = j;
 
 		ret = ntohl(c.u.info.lstatus_to_modtype);
 		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
 			FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
 		p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
-		p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
+		p->mod_type = FW_PORT_MOD_TYPE_NA;
 
 		init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
 		j++;
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 0256232..e875d09 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -57,8 +57,6 @@
 
 enum {
 	SF_PAGE_SIZE = 256,           /* serial flash page size */
-	SF_SEC_SIZE = 64 * 1024,      /* serial flash sector size */
-	SF_SIZE = SF_SEC_SIZE * 16,   /* serial flash size */
 };
 
 enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
@@ -69,6 +67,45 @@
 	SGE_MAX_WR_LEN = 512,     /* max WR size in bytes */
 	SGE_NTIMERS = 6,          /* # of interrupt holdoff timer values */
 	SGE_NCOUNTERS = 4,        /* # of interrupt packet counter values */
+
+	SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
+	SGE_TIMER_UPD_CIDX = 7,   /* update cidx only */
+
+	SGE_EQ_IDXSIZE = 64,      /* egress queue pidx/cidx unit size */
+
+	SGE_INTRDST_PCI = 0,      /* interrupt destination is PCI-E */
+	SGE_INTRDST_IQ = 1,       /*   destination is an ingress queue */
+
+	SGE_UPDATEDEL_NONE = 0,   /* ingress queue pidx update delivery */
+	SGE_UPDATEDEL_INTR = 1,   /*   interrupt */
+	SGE_UPDATEDEL_STPG = 2,   /*   status page */
+	SGE_UPDATEDEL_BOTH = 3,   /*   interrupt and status page */
+
+	SGE_HOSTFCMODE_NONE = 0,  /* egress queue cidx updates */
+	SGE_HOSTFCMODE_IQ = 1,    /*   sent to ingress queue */
+	SGE_HOSTFCMODE_STPG = 2,  /*   sent to status page */
+	SGE_HOSTFCMODE_BOTH = 3,  /*   ingress queue and status page */
+
+	SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */
+	SGE_FETCHBURSTMIN_32B = 1,
+	SGE_FETCHBURSTMIN_64B = 2,
+	SGE_FETCHBURSTMIN_128B = 3,
+
+	SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */
+	SGE_FETCHBURSTMAX_128B = 1,
+	SGE_FETCHBURSTMAX_256B = 2,
+	SGE_FETCHBURSTMAX_512B = 3,
+
+	SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */
+	SGE_CIDXFLUSHTHRESH_2 = 1,
+	SGE_CIDXFLUSHTHRESH_4 = 2,
+	SGE_CIDXFLUSHTHRESH_8 = 3,
+	SGE_CIDXFLUSHTHRESH_16 = 4,
+	SGE_CIDXFLUSHTHRESH_32 = 5,
+	SGE_CIDXFLUSHTHRESH_64 = 6,
+	SGE_CIDXFLUSHTHRESH_128 = 7,
+
+	SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
 };
 
 struct sge_qstat {                /* data written to SGE queue status entries */
@@ -90,11 +127,13 @@
 };
 
 #define RSPD_NEWBUF 0x80000000U
-#define RSPD_LEN    0x7fffffffU
+#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
+#define RSPD_QID(x) RSPD_LEN(x)
 
 #define RSPD_GEN(x)  ((x) >> 7)
 #define RSPD_TYPE(x) (((x) >> 4) & 3)
 
 #define QINTR_CNT_EN       0x1
 #define QINTR_TIMER_IDX(x) ((x) << 1)
+#define QINTR_TIMER_IDX_GET(x) (((x) << 1) & 0x7)
 #endif /* __T4_HW_H */
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index 7a981b8..623932b 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -443,8 +443,7 @@
 
 #define cpl_tx_pkt_xt cpl_tx_pkt
 
-struct cpl_tx_pkt_lso {
-	WR_HDR;
+struct cpl_tx_pkt_lso_core {
 	__be32 lso_ctrl;
 #define LSO_TCPHDR_LEN(x) ((x) << 0)
 #define LSO_IPHDR_LEN(x)  ((x) << 4)
@@ -460,6 +459,12 @@
 	/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
 };
 
+struct cpl_tx_pkt_lso {
+	WR_HDR;
+	struct cpl_tx_pkt_lso_core c;
+	/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
 struct cpl_iscsi_hdr {
 	union opcode_tid ot;
 	__be16 pdu_len_ddp;
@@ -623,6 +628,11 @@
 	__be64 data[4];
 };
 
+/* cpl_fw6_msg.type values */
+enum {
+	FW6_TYPE_CMD_RPL = 0,
+};
+
 enum {
 	ULP_TX_MEM_READ = 2,
 	ULP_TX_MEM_WRITE = 3,
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
index 5ed5648..bf21c14 100644
--- a/drivers/net/cxgb4/t4_regs.h
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -93,12 +93,15 @@
 #define  PKTSHIFT_MASK          0x00001c00U
 #define  PKTSHIFT_SHIFT         10
 #define  PKTSHIFT(x)            ((x) << PKTSHIFT_SHIFT)
+#define  PKTSHIFT_GET(x)	(((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
 #define  INGPCIEBOUNDARY_MASK   0x00000380U
 #define  INGPCIEBOUNDARY_SHIFT  7
 #define  INGPCIEBOUNDARY(x)     ((x) << INGPCIEBOUNDARY_SHIFT)
 #define  INGPADBOUNDARY_MASK    0x00000070U
 #define  INGPADBOUNDARY_SHIFT   4
 #define  INGPADBOUNDARY(x)      ((x) << INGPADBOUNDARY_SHIFT)
+#define  INGPADBOUNDARY_GET(x)	(((x) & INGPADBOUNDARY_MASK) \
+				 >> INGPADBOUNDARY_SHIFT)
 #define  EGRPCIEBOUNDARY_MASK   0x0000000eU
 #define  EGRPCIEBOUNDARY_SHIFT  1
 #define  EGRPCIEBOUNDARY(x)     ((x) << EGRPCIEBOUNDARY_SHIFT)
@@ -326,6 +329,9 @@
 
 #define EDC_1_BASE_ADDR 0x7980
 
+#define CIM_BOOT_CFG 0x7b00
+#define  BOOTADDR_MASK 0xffffff00U
+
 #define CIM_PF_MAILBOX_DATA 0x240
 #define CIM_PF_MAILBOX_CTRL 0x280
 #define  MBMSGVALID     0x00000008U
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 63991d6..ca45df8 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -71,6 +71,7 @@
 #define FW_WR_ATOMIC(x)	 ((x) << 23)
 #define FW_WR_FLUSH(x)   ((x) << 22)
 #define FW_WR_COMPL(x)   ((x) << 21)
+#define FW_WR_IMMDLEN_MASK 0xff
 #define FW_WR_IMMDLEN(x) ((x) << 0)
 
 #define FW_WR_EQUIQ	(1U << 31)
@@ -447,7 +448,9 @@
 	FW_PARAMS_PARAM_DEV_INTVER_RI	= 0x07,
 	FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
 	FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
-	FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A
+	FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
+	FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
+	FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
 };
 
 /*
@@ -475,7 +478,13 @@
 	FW_PARAMS_PARAM_PFVF_PBL_END	= 0x12,
 	FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
 	FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
+	FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15,
+	FW_PARAMS_PARAM_PFVF_SQRQ_END	= 0x16,
+	FW_PARAMS_PARAM_PFVF_CQ_START	= 0x17,
+	FW_PARAMS_PARAM_PFVF_CQ_END	= 0x18,
 	FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
+	FW_PARAMS_PARAM_PFVF_VIID       = 0x24,
+	FW_PARAMS_PARAM_PFVF_CPMASK     = 0x25,
 };
 
 /*
@@ -512,7 +521,7 @@
 	__be32 op_to_vfn;
 	__be32 retval_len16;
 	__be32 niqflint_niq;
-	__be32 cmask_to_neq;
+	__be32 type_to_neq;
 	__be32 tc_to_nexactf;
 	__be32 r_caps_to_nethctrl;
 	__be16 nricq;
@@ -529,11 +538,16 @@
 #define FW_PFVF_CMD_NIQ(x) ((x) << 0)
 #define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
 
+#define FW_PFVF_CMD_TYPE (1 << 31)
+#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1)
+
 #define FW_PFVF_CMD_CMASK(x) ((x) << 24)
-#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf)
+#define FW_PFVF_CMD_CMASK_MASK 0xf
+#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK)
 
 #define FW_PFVF_CMD_PMASK(x) ((x) << 20)
-#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf)
+#define FW_PFVF_CMD_PMASK_MASK 0xf
+#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK)
 
 #define FW_PFVF_CMD_NEQ(x) ((x) << 0)
 #define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
@@ -686,6 +700,7 @@
 #define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
 #define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
 #define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
 
 #define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
 #define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
@@ -804,16 +819,16 @@
 struct fw_vi_cmd {
 	__be32 op_to_vfn;
 	__be32 alloc_to_len16;
-	__be16 viid_pkd;
+	__be16 type_viid;
 	u8 mac[6];
 	u8 portid_pkd;
 	u8 nmac;
 	u8 nmac0[6];
 	__be16 rsssize_pkd;
 	u8 nmac1[6];
-	__be16 r7;
+	__be16 idsiiq_pkd;
 	u8 nmac2[6];
-	__be16 r8;
+	__be16 idseiq_pkd;
 	u8 nmac3[6];
 	__be64 r9;
 	__be64 r10;
@@ -824,13 +839,16 @@
 #define FW_VI_CMD_ALLOC (1U << 31)
 #define FW_VI_CMD_FREE (1U << 30)
 #define FW_VI_CMD_VIID(x) ((x) << 0)
+#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff)
 #define FW_VI_CMD_PORTID(x) ((x) << 4)
+#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf)
 #define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
 
 /* Special VI_MAC command index ids */
 #define FW_VI_MAC_ADD_MAC		0x3FF
 #define FW_VI_MAC_ADD_PERSIST_MAC	0x3FE
 #define FW_VI_MAC_MAC_BASED_FREE	0x3FD
+#define FW_CLS_TCAM_NUM_ENTRIES		336
 
 enum fw_vi_mac_smac {
 	FW_VI_MAC_MPS_TCAM_ENTRY,
@@ -881,6 +899,7 @@
 };
 
 #define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
+#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff
 #define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
 #define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
 #define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
@@ -1136,6 +1155,11 @@
 			__be32 lstatus_to_modtype;
 			__be16 pcap;
 			__be16 acap;
+			__be16 mtu;
+			__u8   cbllen;
+			__u8   r9;
+			__be32 r10;
+			__be64 r11;
 		} info;
 		struct fw_port_ppp {
 			__be32 pppen_to_ncsich;
@@ -1161,6 +1185,7 @@
 #define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
 
 #define FW_PORT_CMD_ACTION(x) ((x) << 16)
+#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff)
 
 #define FW_PORT_CMD_CTLBF(x) ((x) << 10)
 #define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
@@ -1196,14 +1221,17 @@
 #define FW_PORT_CMD_NCSICH(x) ((x) << 4)
 
 enum fw_port_type {
-	FW_PORT_TYPE_FIBER,
-	FW_PORT_TYPE_KX4,
+	FW_PORT_TYPE_FIBER_XFI,
+	FW_PORT_TYPE_FIBER_XAUI,
 	FW_PORT_TYPE_BT_SGMII,
-	FW_PORT_TYPE_KX,
+	FW_PORT_TYPE_BT_XFI,
 	FW_PORT_TYPE_BT_XAUI,
-	FW_PORT_TYPE_KR,
+	FW_PORT_TYPE_KX4,
 	FW_PORT_TYPE_CX4,
-	FW_PORT_TYPE_TWINAX,
+	FW_PORT_TYPE_KX,
+	FW_PORT_TYPE_KR,
+	FW_PORT_TYPE_SFP,
+	FW_PORT_TYPE_BP_AP,
 
 	FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
 };
@@ -1213,6 +1241,9 @@
 	FW_PORT_MOD_TYPE_LR,
 	FW_PORT_MOD_TYPE_SR,
 	FW_PORT_MOD_TYPE_ER,
+	FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
+	FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
+	FW_PORT_MOD_TYPE_LRM,
 
 	FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
 };
@@ -1469,6 +1500,7 @@
 };
 
 #define FW_RSS_GLB_CONFIG_CMD_MODE(x)	((x) << 28)
+#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf)
 
 #define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL	0
 #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL	1
@@ -1485,13 +1517,14 @@
 		} manual;
 		struct fw_rss_vi_config_basicvirtual {
 			__be32 r6;
-			__be32 defaultq_to_ip4udpen;
+			__be32 defaultq_to_udpen;
 #define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x)  ((x) << 16)
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff)
 #define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
 #define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN  (1U << 3)
 #define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
 #define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN  (1U << 1)
-#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN     (1U << 0)
+#define FW_RSS_VI_CONFIG_CMD_UDPEN        (1U << 0)
 			__be64 r9;
 			__be64 r10;
 		} basicvirtual;
diff --git a/drivers/net/cxgb4vf/Makefile b/drivers/net/cxgb4vf/Makefile
new file mode 100644
index 0000000..d72ee26
--- /dev/null
+++ b/drivers/net/cxgb4vf/Makefile
@@ -0,0 +1,7 @@
+#
+# Chelsio T4 SR-IOV Virtual Function Driver
+#
+
+obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o
+
+cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
new file mode 100644
index 0000000..8ea0196
--- /dev/null
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -0,0 +1,540 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file should not be included directly.  Include t4vf_common.h instead.
+ */
+
+#ifndef __CXGB4VF_ADAPTER_H__
+#define __CXGB4VF_ADAPTER_H__
+
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include "../cxgb4/t4_hw.h"
+
+/*
+ * Constants of the implementation.
+ */
+enum {
+	MAX_NPORTS	= 1,		/* max # of "ports" */
+	MAX_PORT_QSETS	= 8,		/* max # of Queue Sets / "port" */
+	MAX_ETH_QSETS	= MAX_NPORTS*MAX_PORT_QSETS,
+
+	/*
+	 * MSI-X interrupt index usage.
+	 */
+	MSIX_FW		= 0,		/* MSI-X index for firmware Q */
+	MSIX_NIQFLINT	= 1,		/* MSI-X index base for Ingress Qs */
+	MSIX_EXTRAS	= 1,
+	MSIX_ENTRIES	= MAX_ETH_QSETS + MSIX_EXTRAS,
+
+	/*
+	 * The maximum number of Ingress and Egress Queues is determined by
+	 * the maximum number of "Queue Sets" which we support plus any
+	 * ancillary queues.  Each "Queue Set" requires one Ingress Queue
+	 * for RX Packet Ingress Event notifications and two Egress Queues for
+	 * a Free List and an Ethernet TX list.
+	 */
+	INGQ_EXTRAS	= 2,		/* firmware event queue and */
+					/*   forwarded interrupts */
+	MAX_INGQ	= MAX_ETH_QSETS+INGQ_EXTRAS,
+	MAX_EGRQ	= MAX_ETH_QSETS*2,
+};
+
+/*
+ * Forward structure definition references.
+ */
+struct adapter;
+struct sge_eth_rxq;
+struct sge_rspq;
+
+/*
+ * Per-"port" information.  This is really per-Virtual Interface information
+ * but the use of the "port" nomanclature makes it easier to go back and forth
+ * between the PF and VF drivers ...
+ */
+struct port_info {
+	struct adapter *adapter;	/* our adapter */
+	struct vlan_group *vlan_grp;	/* out VLAN group */
+	u16 viid;			/* virtual interface ID */
+	s16 xact_addr_filt;		/* index of our MAC address filter */
+	u16 rss_size;			/* size of VI's RSS table slice */
+	u8 pidx;			/* index into adapter port[] */
+	u8 port_id;			/* physical port ID */
+	u8 rx_offload;			/* CSO, etc. */
+	u8 nqsets;			/* # of "Queue Sets" */
+	u8 first_qset;			/* index of first "Queue Set" */
+	struct link_config link_cfg;	/* physical port configuration */
+};
+
+/* port_info.rx_offload flags */
+enum {
+	RX_CSO = 1 << 0,
+};
+
+/*
+ * Scatter Gather Engine resources for the "adapter".  Our ingress and egress
+ * queues are organized into "Queue Sets" with one ingress and one egress
+ * queue per Queue Set.  These Queue Sets are aportionable between the "ports"
+ * (Virtual Interfaces).  One extra ingress queue is used to receive
+ * asynchronous messages from the firmware.  Note that the "Queue IDs" that we
+ * use here are really "Relative Queue IDs" which are returned as part of the
+ * firmware command to allocate queues.  These queue IDs are relative to the
+ * absolute Queue ID base of the section of the Queue ID space allocated to
+ * the PF/VF.
+ */
+
+/*
+ * SGE free-list queue state.
+ */
+struct rx_sw_desc;
+struct sge_fl {
+	unsigned int avail;		/* # of available RX buffers */
+	unsigned int pend_cred;		/* new buffers since last FL DB ring */
+	unsigned int cidx;		/* consumer index */
+	unsigned int pidx;		/* producer index */
+	unsigned long alloc_failed;	/* # of buffer allocation failures */
+	unsigned long large_alloc_failed;
+	unsigned long starving;		/* # of times FL was found starving */
+
+	/*
+	 * Write-once/infrequently fields.
+	 * -------------------------------
+	 */
+
+	unsigned int cntxt_id;		/* SGE relative QID for the free list */
+	unsigned int abs_id;		/* SGE absolute QID for the free list */
+	unsigned int size;		/* capacity of free list */
+	struct rx_sw_desc *sdesc;	/* address of SW RX descriptor ring */
+	__be64 *desc;			/* address of HW RX descriptor ring */
+	dma_addr_t addr;		/* PCI bus address of hardware ring */
+};
+
+/*
+ * An ingress packet gather list.
+ */
+struct pkt_gl {
+	skb_frag_t frags[MAX_SKB_FRAGS];
+	void *va;			/* virtual address of first byte */
+	unsigned int nfrags;		/* # of fragments */
+	unsigned int tot_len;		/* total length of fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
+			      const struct pkt_gl *);
+
+/*
+ * State for an SGE Response Queue.
+ */
+struct sge_rspq {
+	struct napi_struct napi;	/* NAPI scheduling control */
+	const __be64 *cur_desc;		/* current descriptor in queue */
+	unsigned int cidx;		/* consumer index */
+	u8 gen;				/* current generation bit */
+	u8 next_intr_params;		/* holdoff params for next interrupt */
+	int offset;			/* offset into current FL buffer */
+
+	unsigned int unhandled_irqs;	/* bogus interrupts */
+
+	/*
+	 * Write-once/infrequently fields.
+	 * -------------------------------
+	 */
+
+	u8 intr_params;			/* interrupt holdoff parameters */
+	u8 pktcnt_idx;			/* interrupt packet threshold */
+	u8 idx;				/* queue index within its group */
+	u16 cntxt_id;			/* SGE rel QID for the response Q */
+	u16 abs_id;			/* SGE abs QID for the response Q */
+	__be64 *desc;			/* address of hardware response ring */
+	dma_addr_t phys_addr;		/* PCI bus address of ring */
+	unsigned int iqe_len;		/* entry size */
+	unsigned int size;		/* capcity of response Q */
+	struct adapter *adapter;	/* our adapter */
+	struct net_device *netdev;	/* associated net device */
+	rspq_handler_t handler;		/* the handler for this response Q */
+};
+
+/*
+ * Ethernet queue statistics
+ */
+struct sge_eth_stats {
+	unsigned long pkts;		/* # of ethernet packets */
+	unsigned long lro_pkts;		/* # of LRO super packets */
+	unsigned long lro_merged;	/* # of wire packets merged by LRO */
+	unsigned long rx_cso;		/* # of Rx checksum offloads */
+	unsigned long vlan_ex;		/* # of Rx VLAN extractions */
+	unsigned long rx_drops;		/* # of packets dropped due to no mem */
+};
+
+/*
+ * State for an Ethernet Receive Queue.
+ */
+struct sge_eth_rxq {
+	struct sge_rspq rspq;		/* Response Queue */
+	struct sge_fl fl;		/* Free List */
+	struct sge_eth_stats stats;	/* receive statistics */
+};
+
+/*
+ * SGE Transmit Queue state.  This contains all of the resources associated
+ * with the hardware status of a TX Queue which is a circular ring of hardware
+ * TX Descriptors.  For convenience, it also contains a pointer to a parallel
+ * "Software Descriptor" array but we don't know anything about it here other
+ * than its type name.
+ */
+struct tx_desc {
+	/*
+	 * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
+	 * hardware: Sizes, Producer and Consumer indices, etc.
+	 */
+	__be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
+};
+struct tx_sw_desc;
+struct sge_txq {
+	unsigned int in_use;		/* # of in-use TX descriptors */
+	unsigned int size;		/* # of descriptors */
+	unsigned int cidx;		/* SW consumer index */
+	unsigned int pidx;		/* producer index */
+	unsigned long stops;		/* # of times queue has been stopped */
+	unsigned long restarts;		/* # of queue restarts */
+
+	/*
+	 * Write-once/infrequently fields.
+	 * -------------------------------
+	 */
+
+	unsigned int cntxt_id;		/* SGE relative QID for the TX Q */
+	unsigned int abs_id;		/* SGE absolute QID for the TX Q */
+	struct tx_desc *desc;		/* address of HW TX descriptor ring */
+	struct tx_sw_desc *sdesc;	/* address of SW TX descriptor ring */
+	struct sge_qstat *stat;		/* queue status entry */
+	dma_addr_t phys_addr;		/* PCI bus address of hardware ring */
+};
+
+/*
+ * State for an Ethernet Transmit Queue.
+ */
+struct sge_eth_txq {
+	struct sge_txq q;		/* SGE TX Queue */
+	struct netdev_queue *txq;	/* associated netdev TX queue */
+	unsigned long tso;		/* # of TSO requests */
+	unsigned long tx_cso;		/* # of TX checksum offloads */
+	unsigned long vlan_ins;		/* # of TX VLAN insertions */
+	unsigned long mapping_err;	/* # of I/O MMU packet mapping errors */
+};
+
+/*
+ * The complete set of Scatter/Gather Engine resources.
+ */
+struct sge {
+	/*
+	 * Our "Queue Sets" ...
+	 */
+	struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+	struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+
+	/*
+	 * Extra ingress queues for asynchronous firmware events and
+	 * forwarded interrupts (when in MSI mode).
+	 */
+	struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+
+	struct sge_rspq intrq ____cacheline_aligned_in_smp;
+	spinlock_t intrq_lock;
+
+	/*
+	 * State for managing "starving Free Lists" -- Free Lists which have
+	 * fallen below a certain threshold of buffers available to the
+	 * hardware and attempts to refill them up to that threshold have
+	 * failed.  We have a regular "slow tick" timer process which will
+	 * make periodic attempts to refill these starving Free Lists ...
+	 */
+	DECLARE_BITMAP(starving_fl, MAX_EGRQ);
+	struct timer_list rx_timer;
+
+	/*
+	 * State for cleaning up completed TX descriptors.
+	 */
+	struct timer_list tx_timer;
+
+	/*
+	 * Write-once/infrequently fields.
+	 * -------------------------------
+	 */
+
+	u16 max_ethqsets;		/* # of available Ethernet queue sets */
+	u16 ethqsets;			/* # of active Ethernet queue sets */
+	u16 ethtxq_rover;		/* Tx queue to clean up next */
+	u16 timer_val[SGE_NTIMERS];	/* interrupt holdoff timer array */
+	u8 counter_val[SGE_NCOUNTERS];	/* interrupt RX threshold array */
+
+	/*
+	 * Reverse maps from Absolute Queue IDs to associated queue pointers.
+	 * The absolute Queue IDs are in a compact range which start at a
+	 * [potentially large] Base Queue ID.  We perform the reverse map by
+	 * first converting the Absolute Queue ID into a Relative Queue ID by
+	 * subtracting off the Base Queue ID and then use a Relative Queue ID
+	 * indexed table to get the pointer to the corresponding software
+	 * queue structure.
+	 */
+	unsigned int egr_base;
+	unsigned int ingr_base;
+	void *egr_map[MAX_EGRQ];
+	struct sge_rspq *ingr_map[MAX_INGQ];
+};
+
+/*
+ * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
+ * and Ingress-Queues.  The EQ_MAP() and IQ_MAP() macros which provide
+ * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
+ */
+#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
+#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
+
+#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
+#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
+
+/*
+ * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
+ */
+#define for_each_ethrxq(sge, iter) \
+	for (iter = 0; iter < (sge)->ethqsets; iter++)
+
+/*
+ * Per-"adapter" (Virtual Function) information.
+ */
+struct adapter {
+	/* PCI resources */
+	void __iomem *regs;
+	struct pci_dev *pdev;
+	struct device *pdev_dev;
+
+	/* "adapter" resources */
+	unsigned long registered_device_map;
+	unsigned long open_device_map;
+	unsigned long flags;
+	struct adapter_params params;
+
+	/* queue and interrupt resources */
+	struct {
+		unsigned short vec;
+		char desc[22];
+	} msix_info[MSIX_ENTRIES];
+	struct sge sge;
+
+	/* Linux network device resources */
+	struct net_device *port[MAX_NPORTS];
+	const char *name;
+	unsigned int msg_enable;
+
+	/* debugfs resources */
+	struct dentry *debugfs_root;
+
+	/* various locks */
+	spinlock_t stats_lock;
+};
+
+enum { /* adapter flags */
+	FULL_INIT_DONE     = (1UL << 0),
+	USING_MSI          = (1UL << 1),
+	USING_MSIX         = (1UL << 2),
+	QUEUES_BOUND       = (1UL << 3),
+};
+
+/*
+ * The following register read/write routine definitions are required by
+ * the common code.
+ */
+
+/**
+ * t4_read_reg - read a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 32-bit value of the given HW register.
+ */
+static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+	return readl(adapter->regs + reg_addr);
+}
+
+/**
+ * t4_write_reg - write a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+	writel(val, adapter->regs + reg_addr);
+}
+
+#ifndef readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+	return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+	writel(val, addr);
+	writel(val >> 32, addr + 4);
+}
+#endif
+
+/**
+ * t4_read_reg64 - read a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 64-bit value of the given HW register.
+ */
+static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
+{
+	return readq(adapter->regs + reg_addr);
+}
+
+/**
+ * t4_write_reg64 - write a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 64-bit value into the given HW register.
+ */
+static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
+				  u64 val)
+{
+	writeq(val, adapter->regs + reg_addr);
+}
+
+/**
+ * port_name - return the string name of a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Return the string name of the selected port.
+ */
+static inline const char *port_name(struct adapter *adapter, int pidx)
+{
+	return adapter->port[pidx]->name;
+}
+
+/**
+ * t4_os_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW.  Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
+				     u8 hw_addr[])
+{
+	memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
+	memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
+}
+
+/**
+ * netdev2pinfo - return the port_info structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct port_info associated with a net_device
+ */
+static inline struct port_info *netdev2pinfo(const struct net_device *dev)
+{
+	return netdev_priv(dev);
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @pidx: the port index
+ *
+ * Return the port_info structure for the adapter.
+ */
+static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
+{
+	return netdev_priv(adapter->port[pidx]);
+}
+
+/**
+ * netdev2adap - return the adapter structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct adapter associated with a net_device
+ */
+static inline struct adapter *netdev2adap(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->adapter;
+}
+
+/*
+ * OS "Callback" function declarations.  These are functions that the OS code
+ * is "contracted" to provide for the common code.
+ */
+void t4vf_os_link_changed(struct adapter *, int, int);
+
+/*
+ * SGE function prototype declarations.
+ */
+int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
+		       struct net_device *, int,
+		       struct sge_fl *, rspq_handler_t);
+int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
+			   struct net_device *, struct netdev_queue *,
+			   unsigned int);
+void t4vf_free_sge_resources(struct adapter *);
+
+int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
+int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
+		       const struct pkt_gl *);
+
+irq_handler_t t4vf_intr_handler(struct adapter *);
+irqreturn_t t4vf_sge_intr_msix(int, void *);
+
+int t4vf_sge_init(struct adapter *);
+void t4vf_sge_start(struct adapter *);
+void t4vf_sge_stop(struct adapter *);
+
+#endif /* __CXGB4VF_ADAPTER_H__ */
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
new file mode 100644
index 0000000..bd73ff5
--- /dev/null
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -0,0 +1,2906 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/ethtool.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_msg.h"
+
+/*
+ * Generic information about the driver.
+ */
+#define DRV_VERSION "1.0.0"
+#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
+
+/*
+ * Module Parameters.
+ * ==================
+ */
+
+/*
+ * Default ethtool "message level" for adapters.
+ */
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable,
+		 "default adapter ethtool message level bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X then MSI.  This parameter determines which of these schemes the
+ * driver may consider as follows:
+ *
+ *     msi = 2: choose from among MSI-X and MSI
+ *     msi = 1: only consider MSI interrupts
+ *
+ * Note that unlike the Physical Function driver, this Virtual Function driver
+ * does _not_ support legacy INTx interrupts (this limitation is mandated by
+ * the PCI-E SR-IOV standard).
+ */
+#define MSI_MSIX	2
+#define MSI_MSI		1
+#define MSI_DEFAULT	MSI_MSIX
+
+static int msi = MSI_DEFAULT;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
+
+/*
+ * Fundamental constants.
+ * ======================
+ */
+
+enum {
+	MAX_TXQ_ENTRIES		= 16384,
+	MAX_RSPQ_ENTRIES	= 16384,
+	MAX_RX_BUFFERS		= 16384,
+
+	MIN_TXQ_ENTRIES		= 32,
+	MIN_RSPQ_ENTRIES	= 128,
+	MIN_FL_ENTRIES		= 16,
+
+	/*
+	 * For purposes of manipulating the Free List size we need to
+	 * recognize that Free Lists are actually Egress Queues (the host
+	 * produces free buffers which the hardware consumes), Egress Queues
+	 * indices are all in units of Egress Context Units bytes, and free
+	 * list entries are 64-bit PCI DMA addresses.  And since the state of
+	 * the Producer Index == the Consumer Index implies an EMPTY list, we
+	 * always have at least one Egress Unit's worth of Free List entries
+	 * unused.  See sge.c for more details ...
+	 */
+	EQ_UNIT = SGE_EQ_IDXSIZE,
+	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+	MIN_FL_RESID = FL_PER_EQ_UNIT,
+};
+
+/*
+ * Global driver state.
+ * ====================
+ */
+
+static struct dentry *cxgb4vf_debugfs_root;
+
+/*
+ * OS "Callback" functions.
+ * ========================
+ */
+
+/*
+ * The link status has changed on the indicated "port" (Virtual Interface).
+ */
+void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
+{
+	struct net_device *dev = adapter->port[pidx];
+
+	/*
+	 * If the port is disabled or the current recorded "link up"
+	 * status matches the new status, just return.
+	 */
+	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
+		return;
+
+	/*
+	 * Tell the OS that the link status has changed and print a short
+	 * informative message on the console about the event.
+	 */
+	if (link_ok) {
+		const char *s;
+		const char *fc;
+		const struct port_info *pi = netdev_priv(dev);
+
+		netif_carrier_on(dev);
+
+		switch (pi->link_cfg.speed) {
+		case SPEED_10000:
+			s = "10Gbps";
+			break;
+
+		case SPEED_1000:
+			s = "1000Mbps";
+			break;
+
+		case SPEED_100:
+			s = "100Mbps";
+			break;
+
+		default:
+			s = "unknown";
+			break;
+		}
+
+		switch (pi->link_cfg.fc) {
+		case PAUSE_RX:
+			fc = "RX";
+			break;
+
+		case PAUSE_TX:
+			fc = "TX";
+			break;
+
+		case PAUSE_RX|PAUSE_TX:
+			fc = "RX/TX";
+			break;
+
+		default:
+			fc = "no";
+			break;
+		}
+
+		printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n",
+		       dev->name, s, fc);
+	} else {
+		netif_carrier_off(dev);
+		printk(KERN_INFO "%s: link down\n", dev->name);
+	}
+}
+
+/*
+ * Net device operations.
+ * ======================
+ */
+
+/*
+ * Record our new VLAN Group and enable/disable hardware VLAN Tag extraction
+ * based on whether the specified VLAN Group pointer is NULL or not.
+ */
+static void cxgb4vf_vlan_rx_register(struct net_device *dev,
+				     struct vlan_group *grp)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	pi->vlan_grp = grp;
+	t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, grp != NULL, 0);
+}
+
+/*
+ * Perform the MAC and PHY actions needed to enable a "port" (Virtual
+ * Interface).
+ */
+static int link_start(struct net_device *dev)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	/*
+	 * We do not set address filters and promiscuity here, the stack does
+	 * that step explicitly.
+	 */
+	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, -1,
+			      true);
+	if (ret == 0) {
+		ret = t4vf_change_mac(pi->adapter, pi->viid,
+				      pi->xact_addr_filt, dev->dev_addr, true);
+		if (ret >= 0) {
+			pi->xact_addr_filt = ret;
+			ret = 0;
+		}
+	}
+
+	/*
+	 * We don't need to actually "start the link" itself since the
+	 * firmware will do that for us when the first Virtual Interface
+	 * is enabled on a port.
+	 */
+	if (ret == 0)
+		ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
+	return ret;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adapter)
+{
+	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
+	int pidx;
+
+	/*
+	 * Firmware events.
+	 */
+	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
+		 "%s-FWeventq", adapter->name);
+	adapter->msix_info[MSIX_FW].desc[namelen] = 0;
+
+	/*
+	 * Ethernet queues.
+	 */
+	for_each_port(adapter, pidx) {
+		struct net_device *dev = adapter->port[pidx];
+		const struct port_info *pi = netdev_priv(dev);
+		int qs, msi;
+
+		for (qs = 0, msi = MSIX_NIQFLINT;
+		     qs < pi->nqsets;
+		     qs++, msi++) {
+			snprintf(adapter->msix_info[msi].desc, namelen,
+				 "%s-%d", dev->name, qs);
+			adapter->msix_info[msi].desc[namelen] = 0;
+		}
+	}
+}
+
+/*
+ * Request all of our MSI-X resources.
+ */
+static int request_msix_queue_irqs(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int rxq, msi, err;
+
+	/*
+	 * Firmware events.
+	 */
+	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
+			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
+	if (err)
+		return err;
+
+	/*
+	 * Ethernet queues.
+	 */
+	msi = MSIX_NIQFLINT;
+	for_each_ethrxq(s, rxq) {
+		err = request_irq(adapter->msix_info[msi].vec,
+				  t4vf_sge_intr_msix, 0,
+				  adapter->msix_info[msi].desc,
+				  &s->ethrxq[rxq].rspq);
+		if (err)
+			goto err_free_irqs;
+		msi++;
+	}
+	return 0;
+
+err_free_irqs:
+	while (--rxq >= 0)
+		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
+	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
+	return err;
+}
+
+/*
+ * Free our MSI-X resources.
+ */
+static void free_msix_queue_irqs(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int rxq, msi;
+
+	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
+	msi = MSIX_NIQFLINT;
+	for_each_ethrxq(s, rxq)
+		free_irq(adapter->msix_info[msi++].vec,
+			 &s->ethrxq[rxq].rspq);
+}
+
+/*
+ * Turn on NAPI and start up interrupts on a response queue.
+ */
+static void qenable(struct sge_rspq *rspq)
+{
+	napi_enable(&rspq->napi);
+
+	/*
+	 * 0-increment the Going To Sleep register to start the timer and
+	 * enable interrupts.
+	 */
+	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+		     CIDXINC(0) |
+		     SEINTARM(rspq->intr_params) |
+		     INGRESSQID(rspq->cntxt_id));
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Receive Queues.
+ */
+static void enable_rx(struct adapter *adapter)
+{
+	int rxq;
+	struct sge *s = &adapter->sge;
+
+	for_each_ethrxq(s, rxq)
+		qenable(&s->ethrxq[rxq].rspq);
+	qenable(&s->fw_evtq);
+
+	/*
+	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
+	 * its Going To Sleep register here to get it started.
+	 */
+	if (adapter->flags & USING_MSI)
+		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+			     CIDXINC(0) |
+			     SEINTARM(s->intrq.intr_params) |
+			     INGRESSQID(s->intrq.cntxt_id));
+
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.
+ */
+static void quiesce_rx(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int rxq;
+
+	for_each_ethrxq(s, rxq)
+		napi_disable(&s->ethrxq[rxq].rspq.napi);
+	napi_disable(&s->fw_evtq.napi);
+}
+
+/*
+ * Response queue handler for the firmware event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
+			  const struct pkt_gl *gl)
+{
+	/*
+	 * Extract response opcode and get pointer to CPL message body.
+	 */
+	struct adapter *adapter = rspq->adapter;
+	u8 opcode = ((const struct rss_header *)rsp)->opcode;
+	void *cpl = (void *)(rsp + 1);
+
+	switch (opcode) {
+	case CPL_FW6_MSG: {
+		/*
+		 * We've received an asynchronous message from the firmware.
+		 */
+		const struct cpl_fw6_msg *fw_msg = cpl;
+		if (fw_msg->type == FW6_TYPE_CMD_RPL)
+			t4vf_handle_fw_rpl(adapter, fw_msg->data);
+		break;
+	}
+
+	case CPL_SGE_EGR_UPDATE: {
+		/*
+		 * We've received an Egress Queue status update message.
+		 * We get these, as the SGE is currently configured, when
+		 * the firmware passes certain points in processing our
+		 * TX Ethernet Queue.  We use these updates to determine
+		 * when we may need to restart a TX Ethernet Queue which
+		 * was stopped for lack of free slots ...
+		 */
+		const struct cpl_sge_egr_update *p = (void *)cpl;
+		unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
+		struct sge *s = &adapter->sge;
+		struct sge_txq *tq;
+		struct sge_eth_txq *txq;
+		unsigned int eq_idx;
+		int hw_cidx, reclaimable, in_use;
+
+		/*
+		 * Perform sanity checking on the Queue ID to make sure it
+		 * really refers to one of our TX Ethernet Egress Queues which
+		 * is active and matches the queue's ID.  None of these error
+		 * conditions should ever happen so we may want to either make
+		 * them fatal and/or conditionalized under DEBUG.
+		 */
+		eq_idx = EQ_IDX(s, qid);
+		if (unlikely(eq_idx >= MAX_EGRQ)) {
+			dev_err(adapter->pdev_dev,
+				"Egress Update QID %d out of range\n", qid);
+			break;
+		}
+		tq = s->egr_map[eq_idx];
+		if (unlikely(tq == NULL)) {
+			dev_err(adapter->pdev_dev,
+				"Egress Update QID %d TXQ=NULL\n", qid);
+			break;
+		}
+		txq = container_of(tq, struct sge_eth_txq, q);
+		if (unlikely(tq->abs_id != qid)) {
+			dev_err(adapter->pdev_dev,
+				"Egress Update QID %d refers to TXQ %d\n",
+				qid, tq->abs_id);
+			break;
+		}
+
+		/*
+		 * Skip TX Queues which aren't stopped.
+		 */
+		if (likely(!netif_tx_queue_stopped(txq->txq)))
+			break;
+
+		/*
+		 * Skip stopped TX Queues which have more than half of their
+		 * DMA rings occupied with unacknowledged writes.
+		 */
+		hw_cidx = be16_to_cpu(txq->q.stat->cidx);
+		reclaimable = hw_cidx - txq->q.cidx;
+		if (reclaimable < 0)
+			reclaimable += txq->q.size;
+		in_use = txq->q.in_use - reclaimable;
+		if (in_use >= txq->q.size/2)
+			break;
+
+		/*
+		 * Restart a stopped TX Queue which has less than half of its
+		 * TX ring in use ...
+		 */
+		txq->q.restarts++;
+		netif_tx_wake_queue(txq->txq);
+		break;
+	}
+
+	default:
+		dev_err(adapter->pdev_dev,
+			"unexpected CPL %#x on FW event queue\n", opcode);
+	}
+
+	return 0;
+}
+
+/*
+ * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
+ * to use and initializes them.  We support multiple "Queue Sets" per port if
+ * we have MSI-X, otherwise just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int err, pidx, msix;
+
+	/*
+	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
+	 * state.
+	 */
+	bitmap_zero(s->starving_fl, MAX_EGRQ);
+
+	/*
+	 * If we're using MSI interrupt mode we need to set up a "forwarded
+	 * interrupt" queue which we'll set up with our MSI vector.  The rest
+	 * of the ingress queues will be set up to forward their interrupts to
+	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
+	 * the intrq's queue ID as the interrupt forwarding queue for the
+	 * subsequent calls ...
+	 */
+	if (adapter->flags & USING_MSI) {
+		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
+					 adapter->port[0], 0, NULL, NULL);
+		if (err)
+			goto err_free_queues;
+	}
+
+	/*
+	 * Allocate our ingress queue for asynchronous firmware messages.
+	 */
+	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
+				 MSIX_FW, NULL, fwevtq_handler);
+	if (err)
+		goto err_free_queues;
+
+	/*
+	 * Allocate each "port"'s initial Queue Sets.  These can be changed
+	 * later on ... up to the point where any interface on the adapter is
+	 * brought up at which point lots of things get nailed down
+	 * permanently ...
+	 */
+	msix = MSIX_NIQFLINT;
+	for_each_port(adapter, pidx) {
+		struct net_device *dev = adapter->port[pidx];
+		struct port_info *pi = netdev_priv(dev);
+		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
+		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
+		int nqsets = (adapter->flags & USING_MSIX) ? pi->nqsets : 1;
+		int qs;
+
+		for (qs = 0; qs < nqsets; qs++, rxq++, txq++) {
+			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
+						 dev, msix++,
+						 &rxq->fl, t4vf_ethrx_handler);
+			if (err)
+				goto err_free_queues;
+
+			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
+					     netdev_get_tx_queue(dev, qs),
+					     s->fw_evtq.cntxt_id);
+			if (err)
+				goto err_free_queues;
+
+			rxq->rspq.idx = qs;
+			memset(&rxq->stats, 0, sizeof(rxq->stats));
+		}
+	}
+
+	/*
+	 * Create the reverse mappings for the queues.
+	 */
+	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
+	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
+	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
+	for_each_port(adapter, pidx) {
+		struct net_device *dev = adapter->port[pidx];
+		struct port_info *pi = netdev_priv(dev);
+		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
+		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
+		int nqsets = (adapter->flags & USING_MSIX) ? pi->nqsets : 1;
+		int qs;
+
+		for (qs = 0; qs < nqsets; qs++, rxq++, txq++) {
+			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
+			EQ_MAP(s, txq->q.abs_id) = &txq->q;
+
+			/*
+			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
+			 * for Free Lists but since all of the Egress Queues
+			 * (including Free Lists) have Relative Queue IDs
+			 * which are computed as Absolute - Base Queue ID, we
+			 * can synthesize the Absolute Queue IDs for the Free
+			 * Lists.  This is useful for debugging purposes when
+			 * we want to dump Queue Contexts via the PF Driver.
+			 */
+			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
+			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
+		}
+	}
+	return 0;
+
+err_free_queues:
+	t4vf_free_sge_resources(adapter);
+	return err;
+}
+
+/*
+ * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
+ * queues.  We configure the RSS CPU lookup table to distribute to the number
+ * of HW receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each "port" (Virtual
+ * Interface).  We always configure the RSS mapping for all ports since the
+ * mapping table has plenty of entries.
+ */
+static int setup_rss(struct adapter *adapter)
+{
+	int pidx;
+
+	for_each_port(adapter, pidx) {
+		struct port_info *pi = adap2pinfo(adapter, pidx);
+		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
+		u16 rss[MAX_PORT_QSETS];
+		int qs, err;
+
+		for (qs = 0; qs < pi->nqsets; qs++)
+			rss[qs] = rxq[qs].rspq.abs_id;
+
+		err = t4vf_config_rss_range(adapter, pi->viid,
+					    0, pi->rss_size, rss, pi->nqsets);
+		if (err)
+			return err;
+
+		/*
+		 * Perform Global RSS Mode-specific initialization.
+		 */
+		switch (adapter->params.rss.mode) {
+		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
+			/*
+			 * If Tunnel All Lookup isn't specified in the global
+			 * RSS Configuration, then we need to specify a
+			 * default Ingress Queue for any ingress packets which
+			 * aren't hashed.  We'll use our first ingress queue
+			 * ...
+			 */
+			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
+				union rss_vi_config config;
+				err = t4vf_read_rss_vi_config(adapter,
+							      pi->viid,
+							      &config);
+				if (err)
+					return err;
+				config.basicvirtual.defaultq =
+					rxq[0].rspq.abs_id;
+				err = t4vf_write_rss_vi_config(adapter,
+							       pi->viid,
+							       &config);
+				if (err)
+					return err;
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Bring the adapter up.  Called whenever we go from no "ports" open to having
+ * one open.  This function performs the actions necessary to make an adapter
+ * operational, such as completing the initialization of HW modules, and
+ * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
+ * this is called "cxgb_up" in the PF Driver.)
+ */
+static int adapter_up(struct adapter *adapter)
+{
+	int err;
+
+	/*
+	 * If this is the first time we've been called, perform basic
+	 * adapter setup.  Once we've done this, many of our adapter
+	 * parameters can no longer be changed ...
+	 */
+	if ((adapter->flags & FULL_INIT_DONE) == 0) {
+		err = setup_sge_queues(adapter);
+		if (err)
+			return err;
+		err = setup_rss(adapter);
+		if (err) {
+			t4vf_free_sge_resources(adapter);
+			return err;
+		}
+
+		if (adapter->flags & USING_MSIX)
+			name_msix_vecs(adapter);
+		adapter->flags |= FULL_INIT_DONE;
+	}
+
+	/*
+	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
+	 */
+	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+	if (adapter->flags & USING_MSIX)
+		err = request_msix_queue_irqs(adapter);
+	else
+		err = request_irq(adapter->pdev->irq,
+				  t4vf_intr_handler(adapter), 0,
+				  adapter->name, adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
+			err);
+		return err;
+	}
+
+	/*
+	 * Enable NAPI ingress processing and return success.
+	 */
+	enable_rx(adapter);
+	t4vf_sge_start(adapter);
+	return 0;
+}
+
+/*
+ * Bring the adapter down.  Called whenever the last "port" (Virtual
+ * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
+ * Driver.)
+ */
+static void adapter_down(struct adapter *adapter)
+{
+	/*
+	 * Free interrupt resources.
+	 */
+	if (adapter->flags & USING_MSIX)
+		free_msix_queue_irqs(adapter);
+	else
+		free_irq(adapter->pdev->irq, adapter);
+
+	/*
+	 * Wait for NAPI handlers to finish.
+	 */
+	quiesce_rx(adapter);
+}
+
+/*
+ * Start up a net device.
+ */
+static int cxgb4vf_open(struct net_device *dev)
+{
+	int err;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	/*
+	 * If this is the first interface that we're opening on the "adapter",
+	 * bring the "adapter" up now.
+	 */
+	if (adapter->open_device_map == 0) {
+		err = adapter_up(adapter);
+		if (err)
+			return err;
+	}
+
+	/*
+	 * Note that this interface is up and start everything up ...
+	 */
+	dev->real_num_tx_queues = pi->nqsets;
+	set_bit(pi->port_id, &adapter->open_device_map);
+	link_start(dev);
+	netif_tx_start_all_queues(dev);
+	return 0;
+}
+
+/*
+ * Shut down a net device.  This routine is called "cxgb_close" in the PF
+ * Driver ...
+ */
+static int cxgb4vf_stop(struct net_device *dev)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	netif_tx_stop_all_queues(dev);
+	netif_carrier_off(dev);
+	ret = t4vf_enable_vi(adapter, pi->viid, false, false);
+	pi->link_cfg.link_ok = 0;
+
+	clear_bit(pi->port_id, &adapter->open_device_map);
+	if (adapter->open_device_map == 0)
+		adapter_down(adapter);
+	return 0;
+}
+
+/*
+ * Translate our basic statistics into the standard "ifconfig" statistics.
+ */
+static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
+{
+	struct t4vf_port_stats stats;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adapter = pi->adapter;
+	struct net_device_stats *ns = &dev->stats;
+	int err;
+
+	spin_lock(&adapter->stats_lock);
+	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
+	spin_unlock(&adapter->stats_lock);
+
+	memset(ns, 0, sizeof(*ns));
+	if (err)
+		return ns;
+
+	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
+			stats.tx_ucast_bytes + stats.tx_offload_bytes);
+	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
+			  stats.tx_ucast_frames + stats.tx_offload_frames);
+	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
+			stats.rx_ucast_bytes);
+	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
+			  stats.rx_ucast_frames);
+	ns->multicast = stats.rx_mcast_frames;
+	ns->tx_errors = stats.tx_drop_frames;
+	ns->rx_errors = stats.rx_err_frames;
+
+	return ns;
+}
+
+/*
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
+ * array of addrss pointers and return the number collected.
+ */
+static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
+					       const u8 **addr,
+					       unsigned int maxaddrs)
+{
+	unsigned int naddr = 0;
+	const struct netdev_hw_addr *ha;
+
+	for_each_dev_addr(dev, ha) {
+		addr[naddr++] = ha->addr;
+		if (naddr >= maxaddrs)
+			break;
+	}
+	return naddr;
+}
+
+/*
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
+ * array of addrss pointers and return the number collected.
+ */
+static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
+					       const u8 **addr,
+					       unsigned int maxaddrs)
+{
+	unsigned int naddr = 0;
+	const struct netdev_hw_addr *ha;
+
+	netdev_for_each_mc_addr(ha, dev) {
+		addr[naddr++] = ha->addr;
+		if (naddr >= maxaddrs)
+			break;
+	}
+	return naddr;
+}
+
+/*
+ * Configure the exact and hash address filters to handle a port's multicast
+ * and secondary unicast MAC addresses.
+ */
+static int set_addr_filters(const struct net_device *dev, bool sleep)
+{
+	u64 mhash = 0;
+	u64 uhash = 0;
+	bool free = true;
+	u16 filt_idx[7];
+	const u8 *addr[7];
+	int ret, naddr = 0;
+	const struct port_info *pi = netdev_priv(dev);
+
+	/* first do the secondary unicast addresses */
+	naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
+	if (naddr > 0) {
+		ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
+					  naddr, addr, filt_idx, &uhash, sleep);
+		if (ret < 0)
+			return ret;
+
+		free = false;
+	}
+
+	/* next set up the multicast addresses */
+	naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
+	if (naddr > 0) {
+		ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
+					  naddr, addr, filt_idx, &mhash, sleep);
+		if (ret < 0)
+			return ret;
+	}
+
+	return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
+				  uhash | mhash, sleep);
+}
+
+/*
+ * Set RX properties of a port, such as promiscruity, address filters, and MTU.
+ * If @mtu is -1 it is left unchanged.
+ */
+static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	ret = set_addr_filters(dev, sleep_ok);
+	if (ret == 0)
+		ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1,
+				      (dev->flags & IFF_PROMISC) != 0,
+				      (dev->flags & IFF_ALLMULTI) != 0,
+				      1, -1, sleep_ok);
+	return ret;
+}
+
+/*
+ * Set the current receive modes on the device.
+ */
+static void cxgb4vf_set_rxmode(struct net_device *dev)
+{
+	/* unfortunately we can't return errors to the stack */
+	set_rxmode(dev, -1, false);
+}
+
+/*
+ * Find the entry in the interrupt holdoff timer value array which comes
+ * closest to the specified interrupt holdoff value.
+ */
+static int closest_timer(const struct sge *s, int us)
+{
+	int i, timer_idx = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+		int delta = us - s->timer_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			timer_idx = i;
+		}
+	}
+	return timer_idx;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+		delta = thres - s->counter_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			pktcnt_idx = i;
+		}
+	}
+	return pktcnt_idx;
+}
+
+/*
+ * Return a queue's interrupt hold-off time in us.  0 means no timer.
+ */
+static unsigned int qtimer_val(const struct adapter *adapter,
+			       const struct sge_rspq *rspq)
+{
+	unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
+
+	return timer_idx < SGE_NTIMERS
+		? adapter->sge.timer_val[timer_idx]
+		: 0;
+}
+
+/**
+ *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
+ *	@adapter: the adapter
+ *	@rspq: the RX response queue
+ *	@us: the hold-off time in us, or 0 to disable timer
+ *	@cnt: the hold-off packet count, or 0 to disable counter
+ *
+ *	Sets an RX response queue's interrupt hold-off time and packet count.
+ *	At least one of the two needs to be enabled for the queue to generate
+ *	interrupts.
+ */
+static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
+			       unsigned int us, unsigned int cnt)
+{
+	unsigned int timer_idx;
+
+	/*
+	 * If both the interrupt holdoff timer and count are specified as
+	 * zero, default to a holdoff count of 1 ...
+	 */
+	if ((us | cnt) == 0)
+		cnt = 1;
+
+	/*
+	 * If an interrupt holdoff count has been specified, then find the
+	 * closest configured holdoff count and use that.  If the response
+	 * queue has already been created, then update its queue context
+	 * parameters ...
+	 */
+	if (cnt) {
+		int err;
+		u32 v, pktcnt_idx;
+
+		pktcnt_idx = closest_thres(&adapter->sge, cnt);
+		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
+			v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+			    FW_PARAMS_PARAM_X(
+					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+			    FW_PARAMS_PARAM_YZ(rspq->cntxt_id);
+			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
+			if (err)
+				return err;
+		}
+		rspq->pktcnt_idx = pktcnt_idx;
+	}
+
+	/*
+	 * Compute the closest holdoff timer index from the supplied holdoff
+	 * timer value.
+	 */
+	timer_idx = (us == 0
+		     ? SGE_TIMER_RSTRT_CNTR
+		     : closest_timer(&adapter->sge, us));
+
+	/*
+	 * Update the response queue's interrupt coalescing parameters and
+	 * return success.
+	 */
+	rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
+			     (cnt > 0 ? QINTR_CNT_EN : 0));
+	return 0;
+}
+
+/*
+ * Return a version number to identify the type of adapter.  The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ */
+static inline unsigned int mk_adap_vers(const struct adapter *adapter)
+{
+	/*
+	 * Chip version 4, revision 0x3f (cxgb4vf).
+	 */
+	return 4 | (0x3f << 10);
+}
+
+/*
+ * Execute the specified ioctl command.
+ */
+static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	    /*
+	     * The VF Driver doesn't have access to any of the other
+	     * common Ethernet device ioctl()'s (like reading/writing
+	     * PHY registers, etc.
+	     */
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+/*
+ * Change the device's MTU.
+ */
+static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* accommodate SACK */
+	if (new_mtu < 81)
+		return -EINVAL;
+
+	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
+			      -1, -1, -1, -1, true);
+	if (!ret)
+		dev->mtu = new_mtu;
+	return ret;
+}
+
+/*
+ * Change the devices MAC address.
+ */
+static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
+{
+	int ret;
+	struct sockaddr *addr = _addr;
+	struct port_info *pi = netdev_priv(dev);
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
+			      addr->sa_data, true);
+	if (ret < 0)
+		return ret;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	pi->xact_addr_filt = ret;
+	return 0;
+}
+
+/*
+ * Return a TX Queue on which to send the specified skb.
+ */
+static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	/*
+	 * XXX For now just use the default hash but we probably want to
+	 * XXX look at other possibilities ...
+	 */
+	return skb_tx_hash(dev, skb);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Poll all of our receive queues.  This is called outside of normal interrupt
+ * context.
+ */
+static void cxgb4vf_poll_controller(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	if (adapter->flags & USING_MSIX) {
+		struct sge_eth_rxq *rxq;
+		int nqsets;
+
+		rxq = &adapter->sge.ethrxq[pi->first_qset];
+		for (nqsets = pi->nqsets; nqsets; nqsets--) {
+			t4vf_sge_intr_msix(0, &rxq->rspq);
+			rxq++;
+		}
+	} else
+		t4vf_intr_handler(adapter)(0, adapter);
+}
+#endif
+
+/*
+ * Ethtool operations.
+ * ===================
+ *
+ * Note that we don't support any ethtool operations which change the physical
+ * state of the port to which we're linked.
+ */
+
+/*
+ * Return current port link settings.
+ */
+static int cxgb4vf_get_settings(struct net_device *dev,
+				struct ethtool_cmd *cmd)
+{
+	const struct port_info *pi = netdev_priv(dev);
+
+	cmd->supported = pi->link_cfg.supported;
+	cmd->advertising = pi->link_cfg.advertising;
+	cmd->speed = netif_carrier_ok(dev) ? pi->link_cfg.speed : -1;
+	cmd->duplex = DUPLEX_FULL;
+
+	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+	cmd->phy_address = pi->port_id;
+	cmd->transceiver = XCVR_EXTERNAL;
+	cmd->autoneg = pi->link_cfg.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+/*
+ * Return our driver information.
+ */
+static void cxgb4vf_get_drvinfo(struct net_device *dev,
+				struct ethtool_drvinfo *drvinfo)
+{
+	struct adapter *adapter = netdev2adap(dev);
+
+	strcpy(drvinfo->driver, KBUILD_MODNAME);
+	strcpy(drvinfo->version, DRV_VERSION);
+	strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
+		 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
+		 FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev),
+		 FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev),
+		 FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev),
+		 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev),
+		 FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev),
+		 FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev),
+		 FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev));
+}
+
+/*
+ * Return current adapter message level.
+ */
+static u32 cxgb4vf_get_msglevel(struct net_device *dev)
+{
+	return netdev2adap(dev)->msg_enable;
+}
+
+/*
+ * Set current adapter message level.
+ */
+static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
+{
+	netdev2adap(dev)->msg_enable = msglevel;
+}
+
+/*
+ * Return the device's current Queue Set ring size parameters along with the
+ * allowed maximum values.  Since ethtool doesn't understand the concept of
+ * multi-queue devices, we just return the current values associated with the
+ * first Queue Set.
+ */
+static void cxgb4vf_get_ringparam(struct net_device *dev,
+				  struct ethtool_ringparam *rp)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	const struct sge *s = &pi->adapter->sge;
+
+	rp->rx_max_pending = MAX_RX_BUFFERS;
+	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+	rp->rx_jumbo_max_pending = 0;
+	rp->tx_max_pending = MAX_TXQ_ENTRIES;
+
+	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
+	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+	rp->rx_jumbo_pending = 0;
+	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+/*
+ * Set the Queue Set ring size parameters for the device.  Again, since
+ * ethtool doesn't allow for the concept of multiple queues per device, we'll
+ * apply these new values across all of the Queue Sets associated with the
+ * device -- after vetting them of course!
+ */
+static int cxgb4vf_set_ringparam(struct net_device *dev,
+				 struct ethtool_ringparam *rp)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct sge *s = &adapter->sge;
+	int qs;
+
+	if (rp->rx_pending > MAX_RX_BUFFERS ||
+	    rp->rx_jumbo_pending ||
+	    rp->tx_pending > MAX_TXQ_ENTRIES ||
+	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+	    rp->rx_pending < MIN_FL_ENTRIES ||
+	    rp->tx_pending < MIN_TXQ_ENTRIES)
+		return -EINVAL;
+
+	if (adapter->flags & FULL_INIT_DONE)
+		return -EBUSY;
+
+	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
+		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
+		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
+		s->ethtxq[qs].q.size = rp->tx_pending;
+	}
+	return 0;
+}
+
+/*
+ * Return the interrupt holdoff timer and count for the first Queue Set on the
+ * device.  Our extension ioctl() (the cxgbtool interface) allows the
+ * interrupt holdoff timer to be read on all of the device's Queue Sets.
+ */
+static int cxgb4vf_get_coalesce(struct net_device *dev,
+				struct ethtool_coalesce *coalesce)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	const struct adapter *adapter = pi->adapter;
+	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
+
+	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
+	coalesce->rx_max_coalesced_frames =
+		((rspq->intr_params & QINTR_CNT_EN)
+		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
+		 : 0);
+	return 0;
+}
+
+/*
+ * Set the RX interrupt holdoff timer and count for the first Queue Set on the
+ * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
+ * the interrupt holdoff timer on any of the device's Queue Sets.
+ */
+static int cxgb4vf_set_coalesce(struct net_device *dev,
+				struct ethtool_coalesce *coalesce)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	return set_rxq_intr_params(adapter,
+				   &adapter->sge.ethrxq[pi->first_qset].rspq,
+				   coalesce->rx_coalesce_usecs,
+				   coalesce->rx_max_coalesced_frames);
+}
+
+/*
+ * Report current port link pause parameter settings.
+ */
+static void cxgb4vf_get_pauseparam(struct net_device *dev,
+				   struct ethtool_pauseparam *pauseparam)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+	pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
+	pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+/*
+ * Return whether RX Checksum Offloading is currently enabled for the device.
+ */
+static u32 cxgb4vf_get_rx_csum(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	return (pi->rx_offload & RX_CSO) != 0;
+}
+
+/*
+ * Turn RX Checksum Offloading on or off for the device.
+ */
+static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	if (csum)
+		pi->rx_offload |= RX_CSO;
+	else
+		pi->rx_offload &= ~RX_CSO;
+	return 0;
+}
+
+/*
+ * Identify the port by blinking the port's LED.
+ */
+static int cxgb4vf_phys_id(struct net_device *dev, u32 id)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	return t4vf_identify_port(pi->adapter, pi->viid, 5);
+}
+
+/*
+ * Port stats maintained per queue of the port.
+ */
+struct queue_port_stats {
+	u64 tso;
+	u64 tx_csum;
+	u64 rx_csum;
+	u64 vlan_ex;
+	u64 vlan_ins;
+};
+
+/*
+ * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
+ * these need to match the order of statistics returned by
+ * t4vf_get_port_stats().
+ */
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+	/*
+	 * These must match the layout of the t4vf_port_stats structure.
+	 */
+	"TxBroadcastBytes  ",
+	"TxBroadcastFrames ",
+	"TxMulticastBytes  ",
+	"TxMulticastFrames ",
+	"TxUnicastBytes    ",
+	"TxUnicastFrames   ",
+	"TxDroppedFrames   ",
+	"TxOffloadBytes    ",
+	"TxOffloadFrames   ",
+	"RxBroadcastBytes  ",
+	"RxBroadcastFrames ",
+	"RxMulticastBytes  ",
+	"RxMulticastFrames ",
+	"RxUnicastBytes    ",
+	"RxUnicastFrames   ",
+	"RxErrorFrames     ",
+
+	/*
+	 * These are accumulated per-queue statistics and must match the
+	 * order of the fields in the queue_port_stats structure.
+	 */
+	"TSO               ",
+	"TxCsumOffload     ",
+	"RxCsumGood        ",
+	"VLANextractions   ",
+	"VLANinsertions    ",
+};
+
+/*
+ * Return the number of statistics in the specified statistics set.
+ */
+static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(stats_strings);
+	default:
+		return -EOPNOTSUPP;
+	}
+	/*NOTREACHED*/
+}
+
+/*
+ * Return the strings for the specified statistics set.
+ */
+static void cxgb4vf_get_strings(struct net_device *dev,
+				u32 sset,
+				u8 *data)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		memcpy(data, stats_strings, sizeof(stats_strings));
+		break;
+	}
+}
+
+/*
+ * Small utility routine to accumulate queue statistics across the queues of
+ * a "port".
+ */
+static void collect_sge_port_stats(const struct adapter *adapter,
+				   const struct port_info *pi,
+				   struct queue_port_stats *stats)
+{
+	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
+	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
+	int qs;
+
+	memset(stats, 0, sizeof(*stats));
+	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
+		stats->tso += txq->tso;
+		stats->tx_csum += txq->tx_cso;
+		stats->rx_csum += rxq->stats.rx_cso;
+		stats->vlan_ex += rxq->stats.vlan_ex;
+		stats->vlan_ins += txq->vlan_ins;
+	}
+}
+
+/*
+ * Return the ETH_SS_STATS statistics set.
+ */
+static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
+				      struct ethtool_stats *stats,
+				      u64 *data)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adapter = pi->adapter;
+	int err = t4vf_get_port_stats(adapter, pi->pidx,
+				      (struct t4vf_port_stats *)data);
+	if (err)
+		memset(data, 0, sizeof(struct t4vf_port_stats));
+
+	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
+	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+}
+
+/*
+ * Return the size of our register map.
+ */
+static int cxgb4vf_get_regs_len(struct net_device *dev)
+{
+	return T4VF_REGMAP_SIZE;
+}
+
+/*
+ * Dump a block of registers, start to end inclusive, into a buffer.
+ */
+static void reg_block_dump(struct adapter *adapter, void *regbuf,
+			   unsigned int start, unsigned int end)
+{
+	u32 *bp = regbuf + start - T4VF_REGMAP_START;
+
+	for ( ; start <= end; start += sizeof(u32)) {
+		/*
+		 * Avoid reading the Mailbox Control register since that
+		 * can trigger a Mailbox Ownership Arbitration cycle and
+		 * interfere with communication with the firmware.
+		 */
+		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
+			*bp++ = 0xffff;
+		else
+			*bp++ = t4_read_reg(adapter, start);
+	}
+}
+
+/*
+ * Copy our entire register map into the provided buffer.
+ */
+static void cxgb4vf_get_regs(struct net_device *dev,
+			     struct ethtool_regs *regs,
+			     void *regbuf)
+{
+	struct adapter *adapter = netdev2adap(dev);
+
+	regs->version = mk_adap_vers(adapter);
+
+	/*
+	 * Fill in register buffer with our register map.
+	 */
+	memset(regbuf, 0, T4VF_REGMAP_SIZE);
+
+	reg_block_dump(adapter, regbuf,
+		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
+		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
+	reg_block_dump(adapter, regbuf,
+		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
+		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
+	reg_block_dump(adapter, regbuf,
+		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
+		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST);
+	reg_block_dump(adapter, regbuf,
+		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
+		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
+
+	reg_block_dump(adapter, regbuf,
+		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
+		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
+}
+
+/*
+ * Report current Wake On LAN settings.
+ */
+static void cxgb4vf_get_wol(struct net_device *dev,
+			    struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts = 0;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/*
+ * Set TCP Segmentation Offloading feature capabilities.
+ */
+static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
+{
+	if (tso)
+		dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	else
+		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+	return 0;
+}
+
+static struct ethtool_ops cxgb4vf_ethtool_ops = {
+	.get_settings		= cxgb4vf_get_settings,
+	.get_drvinfo		= cxgb4vf_get_drvinfo,
+	.get_msglevel		= cxgb4vf_get_msglevel,
+	.set_msglevel		= cxgb4vf_set_msglevel,
+	.get_ringparam		= cxgb4vf_get_ringparam,
+	.set_ringparam		= cxgb4vf_set_ringparam,
+	.get_coalesce		= cxgb4vf_get_coalesce,
+	.set_coalesce		= cxgb4vf_set_coalesce,
+	.get_pauseparam		= cxgb4vf_get_pauseparam,
+	.get_rx_csum		= cxgb4vf_get_rx_csum,
+	.set_rx_csum		= cxgb4vf_set_rx_csum,
+	.set_tx_csum		= ethtool_op_set_tx_ipv6_csum,
+	.set_sg			= ethtool_op_set_sg,
+	.get_link		= ethtool_op_get_link,
+	.get_strings		= cxgb4vf_get_strings,
+	.phys_id		= cxgb4vf_phys_id,
+	.get_sset_count		= cxgb4vf_get_sset_count,
+	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
+	.get_regs_len		= cxgb4vf_get_regs_len,
+	.get_regs		= cxgb4vf_get_regs,
+	.get_wol		= cxgb4vf_get_wol,
+	.set_tso		= cxgb4vf_set_tso,
+};
+
+/*
+ * /sys/kernel/debug/cxgb4vf support code and data.
+ * ================================================
+ */
+
+/*
+ * Show SGE Queue Set information.  We display QPL Queues Sets per line.
+ */
+#define QPL	4
+
+static int sge_qinfo_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
+	int qs, r = (uintptr_t)v - 1;
+
+	if (r)
+		seq_putc(seq, '\n');
+
+	#define S3(fmt_spec, s, v) \
+		do {\
+			seq_printf(seq, "%-12s", s); \
+			for (qs = 0; qs < n; ++qs) \
+				seq_printf(seq, " %16" fmt_spec, v); \
+			seq_putc(seq, '\n'); \
+		} while (0)
+	#define S(s, v)		S3("s", s, v)
+	#define T(s, v)		S3("u", s, txq[qs].v)
+	#define R(s, v)		S3("u", s, rxq[qs].v)
+
+	if (r < eth_entries) {
+		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
+		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
+		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
+
+		S("QType:", "Ethernet");
+		S("Interface:",
+		  (rxq[qs].rspq.netdev
+		   ? rxq[qs].rspq.netdev->name
+		   : "N/A"));
+		S3("d", "Port:",
+		   (rxq[qs].rspq.netdev
+		    ? ((struct port_info *)
+		       netdev_priv(rxq[qs].rspq.netdev))->port_id
+		    : -1));
+		T("TxQ ID:", q.abs_id);
+		T("TxQ size:", q.size);
+		T("TxQ inuse:", q.in_use);
+		T("TxQ PIdx:", q.pidx);
+		T("TxQ CIdx:", q.cidx);
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
+		S3("u", "Intr pktcnt:",
+		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
+		R("RspQ CIdx:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		R("FL ID:", fl.abs_id);
+		R("FL size:", fl.size - MIN_FL_RESID);
+		R("FL avail:", fl.avail);
+		R("FL PIdx:", fl.pidx);
+		R("FL CIdx:", fl.cidx);
+		return 0;
+	}
+
+	r -= eth_entries;
+	if (r == 0) {
+		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
+
+		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
+		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
+		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+			   qtimer_val(adapter, evtq));
+		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+			   adapter->sge.counter_val[evtq->pktcnt_idx]);
+		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
+		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
+	} else if (r == 1) {
+		const struct sge_rspq *intrq = &adapter->sge.intrq;
+
+		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
+		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
+		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+			   qtimer_val(adapter, intrq));
+		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+			   adapter->sge.counter_val[intrq->pktcnt_idx]);
+		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
+		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
+	}
+
+	#undef R
+	#undef T
+	#undef S
+	#undef S3
+
+	return 0;
+}
+
+/*
+ * Return the number of "entries" in our "file".  We group the multi-Queue
+ * sections with QPL Queue Sets per "entry".  The sections of the output are:
+ *
+ *     Ethernet RX/TX Queue Sets
+ *     Firmware Event Queue
+ *     Forwarded Interrupt Queue (if in MSI mode)
+ */
+static int sge_queue_entries(const struct adapter *adapter)
+{
+	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
+		((adapter->flags & USING_MSI) != 0);
+}
+
+static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
+{
+	int entries = sge_queue_entries(seq->private);
+
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_queue_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int entries = sge_queue_entries(seq->private);
+
+	++*pos;
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qinfo_seq_ops = {
+	.start = sge_queue_start,
+	.next  = sge_queue_next,
+	.stop  = sge_queue_stop,
+	.show  = sge_qinfo_show
+};
+
+static int sge_qinfo_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &sge_qinfo_seq_ops);
+
+	if (!res) {
+		struct seq_file *seq = file->private_data;
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations sge_qinfo_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = sge_qinfo_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
+ */
+#define QPL	4
+
+static int sge_qstats_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
+	int qs, r = (uintptr_t)v - 1;
+
+	if (r)
+		seq_putc(seq, '\n');
+
+	#define S3(fmt, s, v) \
+		do { \
+			seq_printf(seq, "%-16s", s); \
+			for (qs = 0; qs < n; ++qs) \
+				seq_printf(seq, " %8" fmt, v); \
+			seq_putc(seq, '\n'); \
+		} while (0)
+	#define S(s, v)		S3("s", s, v)
+
+	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
+	#define T(s, v)		T3("lu", s, v)
+
+	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
+	#define R(s, v)		R3("lu", s, v)
+
+	if (r < eth_entries) {
+		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
+		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
+		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
+
+		S("QType:", "Ethernet");
+		S("Interface:",
+		  (rxq[qs].rspq.netdev
+		   ? rxq[qs].rspq.netdev->name
+		   : "N/A"));
+		R3("u", "RspQNullInts", rspq.unhandled_irqs);
+		R("RxPackets:", stats.pkts);
+		R("RxCSO:", stats.rx_cso);
+		R("VLANxtract:", stats.vlan_ex);
+		R("LROmerged:", stats.lro_merged);
+		R("LROpackets:", stats.lro_pkts);
+		R("RxDrops:", stats.rx_drops);
+		T("TSO:", tso);
+		T("TxCSO:", tx_cso);
+		T("VLANins:", vlan_ins);
+		T("TxQFull:", q.stops);
+		T("TxQRestarts:", q.restarts);
+		T("TxMapErr:", mapping_err);
+		R("FLAllocErr:", fl.alloc_failed);
+		R("FLLrgAlcErr:", fl.large_alloc_failed);
+		R("FLStarving:", fl.starving);
+		return 0;
+	}
+
+	r -= eth_entries;
+	if (r == 0) {
+		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
+
+		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
+		/* no real response queue statistics available to display */
+		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
+		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
+	} else if (r == 1) {
+		const struct sge_rspq *intrq = &adapter->sge.intrq;
+
+		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
+		/* no real response queue statistics available to display */
+		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
+		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
+	}
+
+	#undef R
+	#undef T
+	#undef S
+	#undef R3
+	#undef T3
+	#undef S3
+
+	return 0;
+}
+
+/*
+ * Return the number of "entries" in our "file".  We group the multi-Queue
+ * sections with QPL Queue Sets per "entry".  The sections of the output are:
+ *
+ *     Ethernet RX/TX Queue Sets
+ *     Firmware Event Queue
+ *     Forwarded Interrupt Queue (if in MSI mode)
+ */
+static int sge_qstats_entries(const struct adapter *adapter)
+{
+	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
+		((adapter->flags & USING_MSI) != 0);
+}
+
+static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
+{
+	int entries = sge_qstats_entries(seq->private);
+
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_qstats_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int entries = sge_qstats_entries(seq->private);
+
+	(*pos)++;
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qstats_seq_ops = {
+	.start = sge_qstats_start,
+	.next  = sge_qstats_next,
+	.stop  = sge_qstats_stop,
+	.show  = sge_qstats_show
+};
+
+static int sge_qstats_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &sge_qstats_seq_ops);
+
+	if (res == 0) {
+		struct seq_file *seq = file->private_data;
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations sge_qstats_proc_fops = {
+	.owner   = THIS_MODULE,
+	.open    = sge_qstats_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * Show PCI-E SR-IOV Virtual Function Resource Limits.
+ */
+static int resources_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	struct vf_resources *vfres = &adapter->params.vfres;
+
+	#define S(desc, fmt, var) \
+		seq_printf(seq, "%-60s " fmt "\n", \
+			   desc " (" #var "):", vfres->var)
+
+	S("Virtual Interfaces", "%d", nvi);
+	S("Egress Queues", "%d", neq);
+	S("Ethernet Control", "%d", nethctrl);
+	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
+	S("Ingress Queues", "%d", niq);
+	S("Traffic Class", "%d", tc);
+	S("Port Access Rights Mask", "%#x", pmask);
+	S("MAC Address Filters", "%d", nexactf);
+	S("Firmware Command Read Capabilities", "%#x", r_caps);
+	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
+
+	#undef S
+
+	return 0;
+}
+
+static int resources_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, resources_show, inode->i_private);
+}
+
+static const struct file_operations resources_proc_fops = {
+	.owner   = THIS_MODULE,
+	.open    = resources_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+/*
+ * Show Virtual Interfaces.
+ */
+static int interfaces_show(struct seq_file *seq, void *v)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Interface  Port   VIID\n");
+	} else {
+		struct adapter *adapter = seq->private;
+		int pidx = (uintptr_t)v - 2;
+		struct net_device *dev = adapter->port[pidx];
+		struct port_info *pi = netdev_priv(dev);
+
+		seq_printf(seq, "%9s  %4d  %#5x\n",
+			   dev->name, pi->port_id, pi->viid);
+	}
+	return 0;
+}
+
+static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
+{
+	return pos <= adapter->params.nports
+		? (void *)(uintptr_t)(pos + 1)
+		: NULL;
+}
+
+static void *interfaces_start(struct seq_file *seq, loff_t *pos)
+{
+	return *pos
+		? interfaces_get_idx(seq->private, *pos)
+		: SEQ_START_TOKEN;
+}
+
+static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return interfaces_get_idx(seq->private, *pos);
+}
+
+static void interfaces_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations interfaces_seq_ops = {
+	.start = interfaces_start,
+	.next  = interfaces_next,
+	.stop  = interfaces_stop,
+	.show  = interfaces_show
+};
+
+static int interfaces_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &interfaces_seq_ops);
+
+	if (res == 0) {
+		struct seq_file *seq = file->private_data;
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations interfaces_proc_fops = {
+	.owner   = THIS_MODULE,
+	.open    = interfaces_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * /sys/kernel/debugfs/cxgb4vf/ files list.
+ */
+struct cxgb4vf_debugfs_entry {
+	const char *name;		/* name of debugfs node */
+	mode_t mode;			/* file system mode */
+	const struct file_operations *fops;
+};
+
+static struct cxgb4vf_debugfs_entry debugfs_files[] = {
+	{ "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
+	{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
+	{ "resources",  S_IRUGO, &resources_proc_fops },
+	{ "interfaces", S_IRUGO, &interfaces_proc_fops },
+};
+
+/*
+ * Module and device initialization and cleanup code.
+ * ==================================================
+ */
+
+/*
+ * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
+ * directory (debugfs_root) has already been set up.
+ */
+static int __devinit setup_debugfs(struct adapter *adapter)
+{
+	int i;
+
+	BUG_ON(adapter->debugfs_root == NULL);
+
+	/*
+	 * Debugfs support is best effort.
+	 */
+	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+		(void)debugfs_create_file(debugfs_files[i].name,
+				  debugfs_files[i].mode,
+				  adapter->debugfs_root,
+				  (void *)adapter,
+				  debugfs_files[i].fops);
+
+	return 0;
+}
+
+/*
+ * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
+ * it to our caller to tear down the directory (debugfs_root).
+ */
+static void __devexit cleanup_debugfs(struct adapter *adapter)
+{
+	BUG_ON(adapter->debugfs_root == NULL);
+
+	/*
+	 * Unlike our sister routine cleanup_proc(), we don't need to remove
+	 * individual entries because a call will be made to
+	 * debugfs_remove_recursive().  We just need to clean up any ancillary
+	 * persistent state.
+	 */
+	/* nothing to do */
+}
+
+/*
+ * Perform early "adapter" initialization.  This is where we discover what
+ * adapter parameters we're going to be using and initialize basic adapter
+ * hardware support.
+ */
+static int adap_init0(struct adapter *adapter)
+{
+	struct vf_resources *vfres = &adapter->params.vfres;
+	struct sge_params *sge_params = &adapter->params.sge;
+	struct sge *s = &adapter->sge;
+	unsigned int ethqsets;
+	int err;
+
+	/*
+	 * Wait for the device to become ready before proceeding ...
+	 */
+	err = t4vf_wait_dev_ready(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "device didn't become ready:"
+			" err=%d\n", err);
+		return err;
+	}
+
+	/*
+	 * Grab basic operational parameters.  These will predominantly have
+	 * been set up by the Physical Function Driver or will be hard coded
+	 * into the adapter.  We just have to live with them ...  Note that
+	 * we _must_ get our VPD parameters before our SGE parameters because
+	 * we need to know the adapter's core clock from the VPD in order to
+	 * properly decode the SGE Timer Values.
+	 */
+	err = t4vf_get_dev_params(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+			" device parameters: err=%d\n", err);
+		return err;
+	}
+	err = t4vf_get_vpd_params(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+			" VPD parameters: err=%d\n", err);
+		return err;
+	}
+	err = t4vf_get_sge_params(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+			" SGE parameters: err=%d\n", err);
+		return err;
+	}
+	err = t4vf_get_rss_glb_config(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+			" RSS parameters: err=%d\n", err);
+		return err;
+	}
+	if (adapter->params.rss.mode !=
+	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
+			" mode %d\n", adapter->params.rss.mode);
+		return -EINVAL;
+	}
+	err = t4vf_sge_init(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
+			" err=%d\n", err);
+		return err;
+	}
+
+	/*
+	 * Retrieve our RX interrupt holdoff timer values and counter
+	 * threshold values from the SGE parameters.
+	 */
+	s->timer_val[0] = core_ticks_to_us(adapter,
+		TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
+	s->timer_val[1] = core_ticks_to_us(adapter,
+		TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
+	s->timer_val[2] = core_ticks_to_us(adapter,
+		TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
+	s->timer_val[3] = core_ticks_to_us(adapter,
+		TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
+	s->timer_val[4] = core_ticks_to_us(adapter,
+		TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
+	s->timer_val[5] = core_ticks_to_us(adapter,
+		TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
+
+	s->counter_val[0] =
+		THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
+	s->counter_val[1] =
+		THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
+	s->counter_val[2] =
+		THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
+	s->counter_val[3] =
+		THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
+
+	/*
+	 * Grab our Virtual Interface resource allocation, extract the
+	 * features that we're interested in and do a bit of sanity testing on
+	 * what we discover.
+	 */
+	err = t4vf_get_vfres(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to get virtual interface"
+			" resources: err=%d\n", err);
+		return err;
+	}
+
+	/*
+	 * The number of "ports" which we support is equal to the number of
+	 * Virtual Interfaces with which we've been provisioned.
+	 */
+	adapter->params.nports = vfres->nvi;
+	if (adapter->params.nports > MAX_NPORTS) {
+		dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
+			 " virtual interfaces\n", MAX_NPORTS,
+			 adapter->params.nports);
+		adapter->params.nports = MAX_NPORTS;
+	}
+
+	/*
+	 * We need to reserve a number of the ingress queues with Free List
+	 * and Interrupt capabilities for special interrupt purposes (like
+	 * asynchronous firmware messages, or forwarded interrupts if we're
+	 * using MSI).  The rest of the FL/Intr-capable ingress queues will be
+	 * matched up one-for-one with Ethernet/Control egress queues in order
+	 * to form "Queue Sets" which will be aportioned between the "ports".
+	 * For each Queue Set, we'll need the ability to allocate two Egress
+	 * Contexts -- one for the Ingress Queue Free List and one for the TX
+	 * Ethernet Queue.
+	 */
+	ethqsets = vfres->niqflint - INGQ_EXTRAS;
+	if (vfres->nethctrl != ethqsets) {
+		dev_warn(adapter->pdev_dev, "unequal number of [available]"
+			 " ingress/egress queues (%d/%d); using minimum for"
+			 " number of Queue Sets\n", ethqsets, vfres->nethctrl);
+		ethqsets = min(vfres->nethctrl, ethqsets);
+	}
+	if (vfres->neq < ethqsets*2) {
+		dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
+			 " to support Queue Sets (%d); reducing allowed Queue"
+			 " Sets\n", vfres->neq, ethqsets);
+		ethqsets = vfres->neq/2;
+	}
+	if (ethqsets > MAX_ETH_QSETS) {
+		dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
+			 " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
+		ethqsets = MAX_ETH_QSETS;
+	}
+	if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
+		dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
+			 " ignored\n", vfres->niq, vfres->neq - ethqsets*2);
+	}
+	adapter->sge.max_ethqsets = ethqsets;
+
+	/*
+	 * Check for various parameter sanity issues.  Most checks simply
+	 * result in us using fewer resources than our provissioning but we
+	 * do need at least  one "port" with which to work ...
+	 */
+	if (adapter->sge.max_ethqsets < adapter->params.nports) {
+		dev_warn(adapter->pdev_dev, "only using %d of %d available"
+			 " virtual interfaces (too few Queue Sets)\n",
+			 adapter->sge.max_ethqsets, adapter->params.nports);
+		adapter->params.nports = adapter->sge.max_ethqsets;
+	}
+	if (adapter->params.nports == 0) {
+		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
+			"usable!\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
+			     u8 pkt_cnt_idx, unsigned int size,
+			     unsigned int iqe_size)
+{
+	rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
+			     (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
+	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
+			    ? pkt_cnt_idx
+			    : 0);
+	rspq->iqe_len = iqe_size;
+	rspq->size = size;
+}
+
+/*
+ * Perform default configuration of DMA queues depending on the number and
+ * type of ports we found and the number of available CPUs.  Most settings can
+ * be modified by the admin via ethtool and cxgbtool prior to the adapter
+ * being brought up for the first time.
+ */
+static void __devinit cfg_queues(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int q10g, n10g, qidx, pidx, qs;
+
+	/*
+	 * We should not be called till we know how many Queue Sets we can
+	 * support.  In particular, this means that we need to know what kind
+	 * of interrupts we'll be using ...
+	 */
+	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+
+	/*
+	 * Count the number of 10GbE Virtual Interfaces that we have.
+	 */
+	n10g = 0;
+	for_each_port(adapter, pidx)
+		n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+
+	/*
+	 * We default to 1 queue per non-10G port and up to # of cores queues
+	 * per 10G port.
+	 */
+	if (n10g == 0)
+		q10g = 0;
+	else {
+		int n1g = (adapter->params.nports - n10g);
+		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
+		if (q10g > num_online_cpus())
+			q10g = num_online_cpus();
+	}
+
+	/*
+	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
+	 * The layout will be established in setup_sge_queues() when the
+	 * adapter is brough up for the first time.
+	 */
+	qidx = 0;
+	for_each_port(adapter, pidx) {
+		struct port_info *pi = adap2pinfo(adapter, pidx);
+
+		pi->first_qset = qidx;
+		pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+		qidx += pi->nqsets;
+	}
+	s->ethqsets = qidx;
+
+	/*
+	 * Set up default Queue Set parameters ...  Start off with the
+	 * shortest interrupt holdoff timer.
+	 */
+	for (qs = 0; qs < s->max_ethqsets; qs++) {
+		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
+		struct sge_eth_txq *txq = &s->ethtxq[qs];
+
+		init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES);
+		rxq->fl.size = 72;
+		txq->q.size = 1024;
+	}
+
+	/*
+	 * The firmware event queue is used for link state changes and
+	 * notifications of TX DMA completions.
+	 */
+	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512,
+		  L1_CACHE_BYTES);
+
+	/*
+	 * The forwarded interrupt queue is used when we're in MSI interrupt
+	 * mode.  In this mode all interrupts associated with RX queues will
+	 * be forwarded to a single queue which we'll associate with our MSI
+	 * interrupt vector.  The messages dropped in the forwarded interrupt
+	 * queue will indicate which ingress queue needs servicing ...  This
+	 * queue needs to be large enough to accommodate all of the ingress
+	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
+	 * from equalling the CIDX if every ingress queue has an outstanding
+	 * interrupt).  The queue doesn't need to be any larger because no
+	 * ingress queue will ever have more than one outstanding interrupt at
+	 * any time ...
+	 */
+	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
+		  L1_CACHE_BYTES);
+}
+
+/*
+ * Reduce the number of Ethernet queues across all ports to at most n.
+ * n provides at least one queue per port.
+ */
+static void __devinit reduce_ethqs(struct adapter *adapter, int n)
+{
+	int i;
+	struct port_info *pi;
+
+	/*
+	 * While we have too many active Ether Queue Sets, interate across the
+	 * "ports" and reduce their individual Queue Set allocations.
+	 */
+	BUG_ON(n < adapter->params.nports);
+	while (n < adapter->sge.ethqsets)
+		for_each_port(adapter, i) {
+			pi = adap2pinfo(adapter, i);
+			if (pi->nqsets > 1) {
+				pi->nqsets--;
+				adapter->sge.ethqsets--;
+				if (adapter->sge.ethqsets <= n)
+					break;
+			}
+		}
+
+	/*
+	 * Reassign the starting Queue Sets for each of the "ports" ...
+	 */
+	n = 0;
+	for_each_port(adapter, i) {
+		pi = adap2pinfo(adapter, i);
+		pi->first_qset = n;
+		n += pi->nqsets;
+	}
+}
+
+/*
+ * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
+ * we get a separate MSI-X vector for every "Queue Set" plus any extras we
+ * need.  Minimally we need one for every Virtual Interface plus those needed
+ * for our "extras".  Note that this process may lower the maximum number of
+ * allowed Queue Sets ...
+ */
+static int __devinit enable_msix(struct adapter *adapter)
+{
+	int i, err, want, need;
+	struct msix_entry entries[MSIX_ENTRIES];
+	struct sge *s = &adapter->sge;
+
+	for (i = 0; i < MSIX_ENTRIES; ++i)
+		entries[i].entry = i;
+
+	/*
+	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
+	 * plus those needed for our "extras" (for example, the firmware
+	 * message queue).  We _need_ at least one "Queue Set" per Virtual
+	 * Interface plus those needed for our "extras".  So now we get to see
+	 * if the song is right ...
+	 */
+	want = s->max_ethqsets + MSIX_EXTRAS;
+	need = adapter->params.nports + MSIX_EXTRAS;
+	while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
+		want = err;
+
+	if (err == 0) {
+		int nqsets = want - MSIX_EXTRAS;
+		if (nqsets < s->max_ethqsets) {
+			dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+				 " for %d Queue Sets\n", nqsets);
+			s->max_ethqsets = nqsets;
+			if (nqsets < s->ethqsets)
+				reduce_ethqs(adapter, nqsets);
+		}
+		for (i = 0; i < want; ++i)
+			adapter->msix_info[i].vec = entries[i].vector;
+	} else if (err > 0) {
+		pci_disable_msix(adapter->pdev);
+		dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
+			 " not using MSI-X\n", err);
+	}
+	return err;
+}
+
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops cxgb4vf_netdev_ops	= {
+	.ndo_open		= cxgb4vf_open,
+	.ndo_stop		= cxgb4vf_stop,
+	.ndo_start_xmit		= t4vf_eth_xmit,
+	.ndo_get_stats		= cxgb4vf_get_stats,
+	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
+	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
+	.ndo_select_queue	= cxgb4vf_select_queue,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
+	.ndo_change_mtu		= cxgb4vf_change_mtu,
+	.ndo_vlan_rx_register	= cxgb4vf_vlan_rx_register,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= cxgb4vf_poll_controller,
+#endif
+};
+#endif
+
+/*
+ * "Probe" a device: initialize a device and construct all kernel and driver
+ * state needed to manage the device.  This routine is called "init_one" in
+ * the PF Driver ...
+ */
+static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
+				       const struct pci_device_id *ent)
+{
+	static int version_printed;
+
+	int pci_using_dac;
+	int err, pidx;
+	unsigned int pmask;
+	struct adapter *adapter;
+	struct port_info *pi;
+	struct net_device *netdev;
+
+	/*
+	 * Vet our module parameters.
+	 */
+	if (msi != MSI_MSIX && msi != MSI_MSI) {
+		dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
+			" (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
+			MSI_MSI);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	/*
+	 * Print our driver banner the first time we're called to initialize a
+	 * device.
+	 */
+	if (version_printed == 0) {
+		printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+		version_printed = 1;
+	}
+
+	/*
+	 * Reserve PCI resources for the device.  If we can't get them some
+	 * other driver may have already claimed the device ...
+	 */
+	err = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (err) {
+		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+		return err;
+	}
+
+	/*
+	 * Initialize generic PCI device state.
+	 */
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		goto err_release_regions;
+	}
+
+	/*
+	 * Set up our DMA mask: try for 64-bit address masking first and
+	 * fall back to 32-bit if we can't get 64 bits ...
+	 */
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err == 0) {
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
+				" coherent allocations\n");
+			goto err_disable_device;
+		}
+		pci_using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err != 0) {
+			dev_err(&pdev->dev, "no usable DMA configuration\n");
+			goto err_disable_device;
+		}
+		pci_using_dac = 0;
+	}
+
+	/*
+	 * Enable bus mastering for the device ...
+	 */
+	pci_set_master(pdev);
+
+	/*
+	 * Allocate our adapter data structure and attach it to the device.
+	 */
+	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+	if (!adapter) {
+		err = -ENOMEM;
+		goto err_disable_device;
+	}
+	pci_set_drvdata(pdev, adapter);
+	adapter->pdev = pdev;
+	adapter->pdev_dev = &pdev->dev;
+
+	/*
+	 * Initialize SMP data synchronization resources.
+	 */
+	spin_lock_init(&adapter->stats_lock);
+
+	/*
+	 * Map our I/O registers in BAR0.
+	 */
+	adapter->regs = pci_ioremap_bar(pdev, 0);
+	if (!adapter->regs) {
+		dev_err(&pdev->dev, "cannot map device registers\n");
+		err = -ENOMEM;
+		goto err_free_adapter;
+	}
+
+	/*
+	 * Initialize adapter level features.
+	 */
+	adapter->name = pci_name(pdev);
+	adapter->msg_enable = dflt_msg_enable;
+	err = adap_init0(adapter);
+	if (err)
+		goto err_unmap_bar;
+
+	/*
+	 * Allocate our "adapter ports" and stitch everything together.
+	 */
+	pmask = adapter->params.vfres.pmask;
+	for_each_port(adapter, pidx) {
+		int port_id, viid;
+
+		/*
+		 * We simplistically allocate our virtual interfaces
+		 * sequentially across the port numbers to which we have
+		 * access rights.  This should be configurable in some manner
+		 * ...
+		 */
+		if (pmask == 0)
+			break;
+		port_id = ffs(pmask) - 1;
+		pmask &= ~(1 << port_id);
+		viid = t4vf_alloc_vi(adapter, port_id);
+		if (viid < 0) {
+			dev_err(&pdev->dev, "cannot allocate VI for port %d:"
+				" err=%d\n", port_id, viid);
+			err = viid;
+			goto err_free_dev;
+		}
+
+		/*
+		 * Allocate our network device and stitch things together.
+		 */
+		netdev = alloc_etherdev_mq(sizeof(struct port_info),
+					   MAX_PORT_QSETS);
+		if (netdev == NULL) {
+			dev_err(&pdev->dev, "cannot allocate netdev for"
+				" port %d\n", port_id);
+			t4vf_free_vi(adapter, viid);
+			err = -ENOMEM;
+			goto err_free_dev;
+		}
+		adapter->port[pidx] = netdev;
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+		pi = netdev_priv(netdev);
+		pi->adapter = adapter;
+		pi->pidx = pidx;
+		pi->port_id = port_id;
+		pi->viid = viid;
+
+		/*
+		 * Initialize the starting state of our "port" and register
+		 * it.
+		 */
+		pi->xact_addr_filt = -1;
+		pi->rx_offload = RX_CSO;
+		netif_carrier_off(netdev);
+		netif_tx_stop_all_queues(netdev);
+		netdev->irq = pdev->irq;
+
+		netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+				    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+				    NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+				    NETIF_F_GRO);
+		if (pci_using_dac)
+			netdev->features |= NETIF_F_HIGHDMA;
+		netdev->vlan_features =
+			(netdev->features &
+			 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX));
+
+#ifdef HAVE_NET_DEVICE_OPS
+		netdev->netdev_ops = &cxgb4vf_netdev_ops;
+#else
+		netdev->vlan_rx_register = cxgb4vf_vlan_rx_register;
+		netdev->open = cxgb4vf_open;
+		netdev->stop = cxgb4vf_stop;
+		netdev->hard_start_xmit = t4vf_eth_xmit;
+		netdev->get_stats = cxgb4vf_get_stats;
+		netdev->set_rx_mode = cxgb4vf_set_rxmode;
+		netdev->do_ioctl = cxgb4vf_do_ioctl;
+		netdev->change_mtu = cxgb4vf_change_mtu;
+		netdev->set_mac_address = cxgb4vf_set_mac_addr;
+		netdev->select_queue = cxgb4vf_select_queue;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+		netdev->poll_controller = cxgb4vf_poll_controller;
+#endif
+#endif
+		SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
+
+		/*
+		 * Initialize the hardware/software state for the port.
+		 */
+		err = t4vf_port_init(adapter, pidx);
+		if (err) {
+			dev_err(&pdev->dev, "cannot initialize port %d\n",
+				pidx);
+			goto err_free_dev;
+		}
+	}
+
+	/*
+	 * The "card" is now ready to go.  If any errors occur during device
+	 * registration we do not fail the whole "card" but rather proceed
+	 * only with the ports we manage to register successfully.  However we
+	 * must register at least one net device.
+	 */
+	for_each_port(adapter, pidx) {
+		netdev = adapter->port[pidx];
+		if (netdev == NULL)
+			continue;
+
+		err = register_netdev(netdev);
+		if (err) {
+			dev_warn(&pdev->dev, "cannot register net device %s,"
+				 " skipping\n", netdev->name);
+			continue;
+		}
+
+		set_bit(pidx, &adapter->registered_device_map);
+	}
+	if (adapter->registered_device_map == 0) {
+		dev_err(&pdev->dev, "could not register any net devices\n");
+		goto err_free_dev;
+	}
+
+	/*
+	 * Set up our debugfs entries.
+	 */
+	if (cxgb4vf_debugfs_root) {
+		adapter->debugfs_root =
+			debugfs_create_dir(pci_name(pdev),
+					   cxgb4vf_debugfs_root);
+		if (adapter->debugfs_root == NULL)
+			dev_warn(&pdev->dev, "could not create debugfs"
+				 " directory");
+		else
+			setup_debugfs(adapter);
+	}
+
+	/*
+	 * See what interrupts we'll be using.  If we've been configured to
+	 * use MSI-X interrupts, try to enable them but fall back to using
+	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
+	 * get MSI interrupts we bail with the error.
+	 */
+	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
+		adapter->flags |= USING_MSIX;
+	else {
+		err = pci_enable_msi(pdev);
+		if (err) {
+			dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
+				" err=%d\n",
+				msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
+			goto err_free_debugfs;
+		}
+		adapter->flags |= USING_MSI;
+	}
+
+	/*
+	 * Now that we know how many "ports" we have and what their types are,
+	 * and how many Queue Sets we can support, we can configure our queue
+	 * resources.
+	 */
+	cfg_queues(adapter);
+
+	/*
+	 * Print a short notice on the existance and configuration of the new
+	 * VF network device ...
+	 */
+	for_each_port(adapter, pidx) {
+		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
+			 adapter->port[pidx]->name,
+			 (adapter->flags & USING_MSIX) ? "MSI-X" :
+			 (adapter->flags & USING_MSI)  ? "MSI" : "");
+	}
+
+	/*
+	 * Return success!
+	 */
+	return 0;
+
+	/*
+	 * Error recovery and exit code.  Unwind state that's been created
+	 * so far and return the error.
+	 */
+
+err_free_debugfs:
+	if (adapter->debugfs_root) {
+		cleanup_debugfs(adapter);
+		debugfs_remove_recursive(adapter->debugfs_root);
+	}
+
+err_free_dev:
+	for_each_port(adapter, pidx) {
+		netdev = adapter->port[pidx];
+		if (netdev == NULL)
+			continue;
+		pi = netdev_priv(netdev);
+		t4vf_free_vi(adapter, pi->viid);
+		if (test_bit(pidx, &adapter->registered_device_map))
+			unregister_netdev(netdev);
+		free_netdev(netdev);
+	}
+
+err_unmap_bar:
+	iounmap(adapter->regs);
+
+err_free_adapter:
+	kfree(adapter);
+	pci_set_drvdata(pdev, NULL);
+
+err_disable_device:
+	pci_disable_device(pdev);
+	pci_clear_master(pdev);
+
+err_release_regions:
+	pci_release_regions(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+err_out:
+	return err;
+}
+
+/*
+ * "Remove" a device: tear down all kernel and driver state created in the
+ * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
+ * that this is called "remove_one" in the PF Driver.)
+ */
+static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
+{
+	struct adapter *adapter = pci_get_drvdata(pdev);
+
+	/*
+	 * Tear down driver state associated with device.
+	 */
+	if (adapter) {
+		int pidx;
+
+		/*
+		 * Stop all of our activity.  Unregister network port,
+		 * disable interrupts, etc.
+		 */
+		for_each_port(adapter, pidx)
+			if (test_bit(pidx, &adapter->registered_device_map))
+				unregister_netdev(adapter->port[pidx]);
+		t4vf_sge_stop(adapter);
+		if (adapter->flags & USING_MSIX) {
+			pci_disable_msix(adapter->pdev);
+			adapter->flags &= ~USING_MSIX;
+		} else if (adapter->flags & USING_MSI) {
+			pci_disable_msi(adapter->pdev);
+			adapter->flags &= ~USING_MSI;
+		}
+
+		/*
+		 * Tear down our debugfs entries.
+		 */
+		if (adapter->debugfs_root) {
+			cleanup_debugfs(adapter);
+			debugfs_remove_recursive(adapter->debugfs_root);
+		}
+
+		/*
+		 * Free all of the various resources which we've acquired ...
+		 */
+		t4vf_free_sge_resources(adapter);
+		for_each_port(adapter, pidx) {
+			struct net_device *netdev = adapter->port[pidx];
+			struct port_info *pi;
+
+			if (netdev == NULL)
+				continue;
+
+			pi = netdev_priv(netdev);
+			t4vf_free_vi(adapter, pi->viid);
+			free_netdev(netdev);
+		}
+		iounmap(adapter->regs);
+		kfree(adapter);
+		pci_set_drvdata(pdev, NULL);
+	}
+
+	/*
+	 * Disable the device and release its PCI resources.
+	 */
+	pci_disable_device(pdev);
+	pci_clear_master(pdev);
+	pci_release_regions(pdev);
+}
+
+/*
+ * PCI Device registration data structures.
+ */
+#define CH_DEVICE(devid, idx) \
+	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
+
+static struct pci_device_id cxgb4vf_pci_tbl[] = {
+	CH_DEVICE(0xb000, 0),	/* PE10K FPGA */
+	CH_DEVICE(0x4800, 0),	/* T440-dbg */
+	CH_DEVICE(0x4801, 0),	/* T420-cr */
+	CH_DEVICE(0x4802, 0),	/* T422-cr */
+	{ 0, }
+};
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
+
+static struct pci_driver cxgb4vf_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= cxgb4vf_pci_tbl,
+	.probe		= cxgb4vf_pci_probe,
+	.remove		= __devexit_p(cxgb4vf_pci_remove),
+};
+
+/*
+ * Initialize global driver state.
+ */
+static int __init cxgb4vf_module_init(void)
+{
+	int ret;
+
+	/* Debugfs support is optional, just warn if this fails */
+	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!cxgb4vf_debugfs_root)
+		printk(KERN_WARNING KBUILD_MODNAME ": could not create"
+		       " debugfs entry, continuing\n");
+
+	ret = pci_register_driver(&cxgb4vf_driver);
+	if (ret < 0)
+		debugfs_remove(cxgb4vf_debugfs_root);
+	return ret;
+}
+
+/*
+ * Tear down global driver state.
+ */
+static void __exit cxgb4vf_module_exit(void)
+{
+	pci_unregister_driver(&cxgb4vf_driver);
+	debugfs_remove(cxgb4vf_debugfs_root);
+}
+
+module_init(cxgb4vf_module_init);
+module_exit(cxgb4vf_module_exit);
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
new file mode 100644
index 0000000..3a7c02f
--- /dev/null
+++ b/drivers/net/cxgb4vf/sge.c
@@ -0,0 +1,2449 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/dma-mapping.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4fw_api.h"
+#include "../cxgb4/t4_msg.h"
+
+/*
+ * Decoded Adapter Parameters.
+ */
+static u32 FL_PG_ORDER;		/* large page allocation size */
+static u32 STAT_LEN;		/* length of status page at ring end */
+static u32 PKTSHIFT;		/* padding between CPL and packet data */
+static u32 FL_ALIGN;		/* response queue message alignment */
+
+/*
+ * Constants ...
+ */
+enum {
+	/*
+	 * Egress Queue sizes, producer and consumer indices are all in units
+	 * of Egress Context Units bytes.  Note that as far as the hardware is
+	 * concerned, the free list is an Egress Queue (the host produces free
+	 * buffers which the hardware consumes) and free list entries are
+	 * 64-bit PCI DMA addresses.
+	 */
+	EQ_UNIT = SGE_EQ_IDXSIZE,
+	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+	TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+	/*
+	 * Max number of TX descriptors we clean up at a time.  Should be
+	 * modest as freeing skbs isn't cheap and it happens while holding
+	 * locks.  We just need to free packets faster than they arrive, we
+	 * eventually catch up and keep the amortized cost reasonable.
+	 */
+	MAX_TX_RECLAIM = 16,
+
+	/*
+	 * Max number of Rx buffers we replenish at a time.  Again keep this
+	 * modest, allocating buffers isn't cheap either.
+	 */
+	MAX_RX_REFILL = 16,
+
+	/*
+	 * Period of the Rx queue check timer.  This timer is infrequent as it
+	 * has something to do only when the system experiences severe memory
+	 * shortage.
+	 */
+	RX_QCHECK_PERIOD = (HZ / 2),
+
+	/*
+	 * Period of the TX queue check timer and the maximum number of TX
+	 * descriptors to be reclaimed by the TX timer.
+	 */
+	TX_QCHECK_PERIOD = (HZ / 2),
+	MAX_TIMER_TX_RECLAIM = 100,
+
+	/*
+	 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
+	 * timer will attempt to refill it.
+	 */
+	FL_STARVE_THRES = 4,
+
+	/*
+	 * Suspend an Ethernet TX queue with fewer available descriptors than
+	 * this.  We always want to have room for a maximum sized packet:
+	 * inline immediate data + MAX_SKB_FRAGS. This is the same as
+	 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
+	 * (see that function and its helpers for a description of the
+	 * calculation).
+	 */
+	ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
+	ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
+				   ((ETHTXQ_MAX_FRAGS-1) & 1) +
+				   2),
+	ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+			  sizeof(struct cpl_tx_pkt_lso_core) +
+			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+	ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
+
+	ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
+
+	/*
+	 * Max TX descriptor space we allow for an Ethernet packet to be
+	 * inlined into a WR.  This is limited by the maximum value which
+	 * we can specify for immediate data in the firmware Ethernet TX
+	 * Work Request.
+	 */
+	MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK,
+
+	/*
+	 * Max size of a WR sent through a control TX queue.
+	 */
+	MAX_CTRL_WR_LEN = 256,
+
+	/*
+	 * Maximum amount of data which we'll ever need to inline into a
+	 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
+	 */
+	MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
+			  ? MAX_IMM_TX_PKT_LEN
+			  : MAX_CTRL_WR_LEN),
+
+	/*
+	 * For incoming packets less than RX_COPY_THRES, we copy the data into
+	 * an skb rather than referencing the data.  We allocate enough
+	 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
+	 * of the data (header).
+	 */
+	RX_COPY_THRES = 256,
+	RX_PULL_LEN = 128,
+};
+
+/*
+ * Can't define this in the above enum because PKTSHIFT isn't a constant in
+ * the VF Driver ...
+ */
+#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
+
+/*
+ * Software state per TX descriptor.
+ */
+struct tx_sw_desc {
+	struct sk_buff *skb;		/* socket buffer of TX data source */
+	struct ulptx_sgl *sgl;		/* scatter/gather list in TX Queue */
+};
+
+/*
+ * Software state per RX Free List descriptor.  We keep track of the allocated
+ * FL page, its size, and its PCI DMA address (if the page is mapped).  The FL
+ * page size and its PCI DMA mapped state are stored in the low bits of the
+ * PCI DMA address as per below.
+ */
+struct rx_sw_desc {
+	struct page *page;		/* Free List page buffer */
+	dma_addr_t dma_addr;		/* PCI DMA address (if mapped) */
+					/*   and flags (see below) */
+};
+
+/*
+ * The low bits of rx_sw_desc.dma_addr have special meaning.  Note that the
+ * SGE also uses the low 4 bits to determine the size of the buffer.  It uses
+ * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
+ * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
+ * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
+ * to the SGE.  Thus, our software state of "is the buffer mapped for DMA" is
+ * maintained in an inverse sense so the hardware never sees that bit high.
+ */
+enum {
+	RX_LARGE_BUF    = 1 << 0,	/* buffer is SGE_FL_BUFFER_SIZE[1] */
+	RX_UNMAPPED_BUF = 1 << 1,	/* buffer is not mapped */
+};
+
+/**
+ *	get_buf_addr - return DMA buffer address of software descriptor
+ *	@sdesc: pointer to the software buffer descriptor
+ *
+ *	Return the DMA buffer address of a software descriptor (stripping out
+ *	our low-order flag bits).
+ */
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
+{
+	return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+}
+
+/**
+ *	is_buf_mapped - is buffer mapped for DMA?
+ *	@sdesc: pointer to the software buffer descriptor
+ *
+ *	Determine whether the buffer associated with a software descriptor in
+ *	mapped for DMA or not.
+ */
+static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
+{
+	return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ *	need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ *	Returns true if the platfrom needs sk_buff unmapping.  The compiler
+ *	optimizes away unecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+	/*
+	 * This structure is used to tell if the platfrom needs buffer
+	 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
+	 */
+	struct dummy {
+		DECLARE_PCI_UNMAP_ADDR(addr);
+	};
+
+	return sizeof(struct dummy) != 0;
+}
+
+/**
+ *	txq_avail - return the number of available slots in a TX queue
+ *	@tq: the TX queue
+ *
+ *	Returns the number of available descriptors in a TX queue.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *tq)
+{
+	return tq->size - 1 - tq->in_use;
+}
+
+/**
+ *	fl_cap - return the capacity of a Free List
+ *	@fl: the Free List
+ *
+ *	Returns the capacity of a Free List.  The capacity is less than the
+ *	size because an Egress Queue Index Unit worth of descriptors needs to
+ *	be left unpopulated, otherwise the Producer and Consumer indices PIDX
+ *	and CIDX will match and the hardware will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+	return fl->size - FL_PER_EQ_UNIT;
+}
+
+/**
+ *	fl_starving - return whether a Free List is starving.
+ *	@fl: the Free List
+ *
+ *	Tests specified Free List to see whether the number of buffers
+ *	available to the hardware has falled below our "starvation"
+ *	threshhold.
+ */
+static inline bool fl_starving(const struct sge_fl *fl)
+{
+	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+}
+
+/**
+ *	map_skb -  map an skb for DMA to the device
+ *	@dev: the egress net device
+ *	@skb: the packet to map
+ *	@addr: a pointer to the base of the DMA mapping array
+ *
+ *	Map an skb for DMA to the device and return an array of DMA addresses.
+ */
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+		   dma_addr_t *addr)
+{
+	const skb_frag_t *fp, *end;
+	const struct skb_shared_info *si;
+
+	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, *addr))
+		goto out_err;
+
+	si = skb_shinfo(skb);
+	end = &si->frags[si->nr_frags];
+	for (fp = si->frags; fp < end; fp++) {
+		*++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
+				       DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, *addr))
+			goto unwind;
+	}
+	return 0;
+
+unwind:
+	while (fp-- > si->frags)
+		dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+
+out_err:
+	return -ENOMEM;
+}
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+		      const struct ulptx_sgl *sgl, const struct sge_txq *tq)
+{
+	const struct ulptx_sge_pair *p;
+	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+	if (likely(skb_headlen(skb)))
+		dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
+				 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+	else {
+		dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
+			       be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+		nfrags--;
+	}
+
+	/*
+	 * the complexity below is because of the possibility of a wrap-around
+	 * in the middle of an SGL
+	 */
+	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+		if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
+unmap:
+			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+			p++;
+		} else if ((u8 *)p == (u8 *)tq->stat) {
+			p = (const struct ulptx_sge_pair *)tq->desc;
+			goto unmap;
+		} else if ((u8 *)p + 8 == (u8 *)tq->stat) {
+			const __be64 *addr = (const __be64 *)tq->desc;
+
+			dma_unmap_page(dev, be64_to_cpu(addr[0]),
+				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(addr[1]),
+				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+			p = (const struct ulptx_sge_pair *)&addr[2];
+		} else {
+			const __be64 *addr = (const __be64 *)tq->desc;
+
+			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(addr[0]),
+				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+			p = (const struct ulptx_sge_pair *)&addr[1];
+		}
+	}
+	if (nfrags) {
+		__be64 addr;
+
+		if ((u8 *)p == (u8 *)tq->stat)
+			p = (const struct ulptx_sge_pair *)tq->desc;
+		addr = ((u8 *)p + 16 <= (u8 *)tq->stat
+			? p->addr[0]
+			: *(const __be64 *)tq->desc);
+		dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
+			       DMA_TO_DEVICE);
+	}
+}
+
+/**
+ *	free_tx_desc - reclaims TX descriptors and their buffers
+ *	@adapter: the adapter
+ *	@tq: the TX queue to reclaim descriptors from
+ *	@n: the number of descriptors to reclaim
+ *	@unmap: whether the buffers should be unmapped for DMA
+ *
+ *	Reclaims TX descriptors from an SGE TX queue and frees the associated
+ *	TX buffers.  Called with the TX queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
+			 unsigned int n, bool unmap)
+{
+	struct tx_sw_desc *sdesc;
+	unsigned int cidx = tq->cidx;
+	struct device *dev = adapter->pdev_dev;
+
+	const int need_unmap = need_skb_unmap() && unmap;
+
+	sdesc = &tq->sdesc[cidx];
+	while (n--) {
+		/*
+		 * If we kept a reference to the original TX skb, we need to
+		 * unmap it from PCI DMA space (if required) and free it.
+		 */
+		if (sdesc->skb) {
+			if (need_unmap)
+				unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
+			kfree_skb(sdesc->skb);
+			sdesc->skb = NULL;
+		}
+
+		sdesc++;
+		if (++cidx == tq->size) {
+			cidx = 0;
+			sdesc = tq->sdesc;
+		}
+	}
+	tq->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a TX queue.
+ */
+static inline int reclaimable(const struct sge_txq *tq)
+{
+	int hw_cidx = be16_to_cpu(tq->stat->cidx);
+	int reclaimable = hw_cidx - tq->cidx;
+	if (reclaimable < 0)
+		reclaimable += tq->size;
+	return reclaimable;
+}
+
+/**
+ *	reclaim_completed_tx - reclaims completed TX descriptors
+ *	@adapter: the adapter
+ *	@tq: the TX queue to reclaim completed descriptors from
+ *	@unmap: whether the buffers should be unmapped for DMA
+ *
+ *	Reclaims TX descriptors that the SGE has indicated it has processed,
+ *	and frees the associated buffers if possible.  Called with the TX
+ *	queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adapter,
+					struct sge_txq *tq,
+					bool unmap)
+{
+	int avail = reclaimable(tq);
+
+	if (avail) {
+		/*
+		 * Limit the amount of clean up work we do at a time to keep
+		 * the TX lock hold time O(1).
+		 */
+		if (avail > MAX_TX_RECLAIM)
+			avail = MAX_TX_RECLAIM;
+
+		free_tx_desc(adapter, tq, avail, unmap);
+		tq->in_use -= avail;
+	}
+}
+
+/**
+ *	get_buf_size - return the size of an RX Free List buffer.
+ *	@sdesc: pointer to the software buffer descriptor
+ */
+static inline int get_buf_size(const struct rx_sw_desc *sdesc)
+{
+	return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+		? (PAGE_SIZE << FL_PG_ORDER)
+		: PAGE_SIZE;
+}
+
+/**
+ *	free_rx_bufs - free RX buffers on an SGE Free List
+ *	@adapter: the adapter
+ *	@fl: the SGE Free List to free buffers from
+ *	@n: how many buffers to free
+ *
+ *	Release the next @n buffers on an SGE Free List RX queue.   The
+ *	buffers must be made inaccessible to hardware before calling this
+ *	function.
+ */
+static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
+{
+	while (n--) {
+		struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+		if (is_buf_mapped(sdesc))
+			dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+				       get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+		put_page(sdesc->page);
+		sdesc->page = NULL;
+		if (++fl->cidx == fl->size)
+			fl->cidx = 0;
+		fl->avail--;
+	}
+}
+
+/**
+ *	unmap_rx_buf - unmap the current RX buffer on an SGE Free List
+ *	@adapter: the adapter
+ *	@fl: the SGE Free List
+ *
+ *	Unmap the current buffer on an SGE Free List RX queue.   The
+ *	buffer must be made inaccessible to HW before calling this function.
+ *
+ *	This is similar to @free_rx_bufs above but does not free the buffer.
+ *	Do note that the FL still loses any further access to the buffer.
+ *	This is used predominantly to "transfer ownership" of an FL buffer
+ *	to another entity (typically an skb's fragment list).
+ */
+static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
+{
+	struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+	if (is_buf_mapped(sdesc))
+		dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+			       get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+	sdesc->page = NULL;
+	if (++fl->cidx == fl->size)
+		fl->cidx = 0;
+	fl->avail--;
+}
+
+/**
+ *	ring_fl_db - righ doorbell on free list
+ *	@adapter: the adapter
+ *	@fl: the Free List whose doorbell should be rung ...
+ *
+ *	Tell the Scatter Gather Engine that there are new free list entries
+ *	available.
+ */
+static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
+{
+	/*
+	 * The SGE keeps track of its Producer and Consumer Indices in terms
+	 * of Egress Queue Units so we can only tell it about integral numbers
+	 * of multiples of Free List Entries per Egress Queue Units ...
+	 */
+	if (fl->pend_cred >= FL_PER_EQ_UNIT) {
+		wmb();
+		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+			     DBPRIO |
+			     QID(fl->cntxt_id) |
+			     PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
+		fl->pend_cred %= FL_PER_EQ_UNIT;
+	}
+}
+
+/**
+ *	set_rx_sw_desc - initialize software RX buffer descriptor
+ *	@sdesc: pointer to the softwore RX buffer descriptor
+ *	@page: pointer to the page data structure backing the RX buffer
+ *	@dma_addr: PCI DMA address (possibly with low-bit flags)
+ */
+static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
+				  dma_addr_t dma_addr)
+{
+	sdesc->page = page;
+	sdesc->dma_addr = dma_addr;
+}
+
+/*
+ * Support for poisoning RX buffers ...
+ */
+#define POISON_BUF_VAL -1
+
+static inline void poison_buf(struct page *page, size_t sz)
+{
+#if POISON_BUF_VAL >= 0
+	memset(page_address(page), POISON_BUF_VAL, sz);
+#endif
+}
+
+/**
+ *	refill_fl - refill an SGE RX buffer ring
+ *	@adapter: the adapter
+ *	@fl: the Free List ring to refill
+ *	@n: the number of new buffers to allocate
+ *	@gfp: the gfp flags for the allocations
+ *
+ *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ *	allocated with the supplied gfp flags.  The caller must assure that
+ *	@n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
+ *	EGRESS QUEUE UNITS_ indicates an empty Free List!  Returns the number
+ *	of buffers allocated.  If afterwards the queue is found critically low,
+ *	mark it as starving in the bitmap of starving FLs.
+ */
+static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
+			      int n, gfp_t gfp)
+{
+	struct page *page;
+	dma_addr_t dma_addr;
+	unsigned int cred = fl->avail;
+	__be64 *d = &fl->desc[fl->pidx];
+	struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
+
+	/*
+	 * Sanity: ensure that the result of adding n Free List buffers
+	 * won't result in wrapping the SGE's Producer Index around to
+	 * it's Consumer Index thereby indicating an empty Free List ...
+	 */
+	BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
+
+	/*
+	 * If we support large pages, prefer large buffers and fail over to
+	 * small pages if we can't allocate large pages to satisfy the refill.
+	 * If we don't support large pages, drop directly into the small page
+	 * allocation code.
+	 */
+	if (FL_PG_ORDER == 0)
+		goto alloc_small_pages;
+
+	while (n) {
+		page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+				   FL_PG_ORDER);
+		if (unlikely(!page)) {
+			/*
+			 * We've failed inour attempt to allocate a "large
+			 * page".  Fail over to the "small page" allocation
+			 * below.
+			 */
+			fl->large_alloc_failed++;
+			break;
+		}
+		poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
+
+		dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
+					PAGE_SIZE << FL_PG_ORDER,
+					PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+			/*
+			 * We've run out of DMA mapping space.  Free up the
+			 * buffer and return with what we've managed to put
+			 * into the free list.  We don't want to fail over to
+			 * the small page allocation below in this case
+			 * because DMA mapping resources are typically
+			 * critical resources once they become scarse.
+			 */
+			__free_pages(page, FL_PG_ORDER);
+			goto out;
+		}
+		dma_addr |= RX_LARGE_BUF;
+		*d++ = cpu_to_be64(dma_addr);
+
+		set_rx_sw_desc(sdesc, page, dma_addr);
+		sdesc++;
+
+		fl->avail++;
+		if (++fl->pidx == fl->size) {
+			fl->pidx = 0;
+			sdesc = fl->sdesc;
+			d = fl->desc;
+		}
+		n--;
+	}
+
+alloc_small_pages:
+	while (n--) {
+		page = __netdev_alloc_page(adapter->port[0],
+					   gfp | __GFP_NOWARN);
+		if (unlikely(!page)) {
+			fl->alloc_failed++;
+			break;
+		}
+		poison_buf(page, PAGE_SIZE);
+
+		dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
+				       PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+			netdev_free_page(adapter->port[0], page);
+			break;
+		}
+		*d++ = cpu_to_be64(dma_addr);
+
+		set_rx_sw_desc(sdesc, page, dma_addr);
+		sdesc++;
+
+		fl->avail++;
+		if (++fl->pidx == fl->size) {
+			fl->pidx = 0;
+			sdesc = fl->sdesc;
+			d = fl->desc;
+		}
+	}
+
+out:
+	/*
+	 * Update our accounting state to incorporate the new Free List
+	 * buffers, tell the hardware about them and return the number of
+	 * bufers which we were able to allocate.
+	 */
+	cred = fl->avail - cred;
+	fl->pend_cred += cred;
+	ring_fl_db(adapter, fl);
+
+	if (unlikely(fl_starving(fl))) {
+		smp_wmb();
+		set_bit(fl->cntxt_id, adapter->sge.starving_fl);
+	}
+
+	return cred;
+}
+
+/*
+ * Refill a Free List to its capacity or the Maximum Refill Increment,
+ * whichever is smaller ...
+ */
+static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
+{
+	refill_fl(adapter, fl,
+		  min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+		  GFP_ATOMIC);
+}
+
+/**
+ *	alloc_ring - allocate resources for an SGE descriptor ring
+ *	@dev: the PCI device's core device
+ *	@nelem: the number of descriptors
+ *	@hwsize: the size of each hardware descriptor
+ *	@swsize: the size of each software descriptor
+ *	@busaddrp: the physical PCI bus address of the allocated ring
+ *	@swringp: return address pointer for software ring
+ *	@stat_size: extra space in hardware ring for status information
+ *
+ *	Allocates resources for an SGE descriptor ring, such as TX queues,
+ *	free buffer lists, response queues, etc.  Each SGE ring requires
+ *	space for its hardware descriptors plus, optionally, space for software
+ *	state associated with each hardware entry (the metadata).  The function
+ *	returns three values: the virtual address for the hardware ring (the
+ *	return value of the function), the PCI bus address of the hardware
+ *	ring (in *busaddrp), and the address of the software ring (in swringp).
+ *	Both the hardware and software rings are returned zeroed out.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
+			size_t swsize, dma_addr_t *busaddrp, void *swringp,
+			size_t stat_size)
+{
+	/*
+	 * Allocate the hardware ring and PCI DMA bus address space for said.
+	 */
+	size_t hwlen = nelem * hwsize + stat_size;
+	void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
+
+	if (!hwring)
+		return NULL;
+
+	/*
+	 * If the caller wants a software ring, allocate it and return a
+	 * pointer to it in *swringp.
+	 */
+	BUG_ON((swsize != 0) != (swringp != NULL));
+	if (swsize) {
+		void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
+
+		if (!swring) {
+			dma_free_coherent(dev, hwlen, hwring, *busaddrp);
+			return NULL;
+		}
+		*(void **)swringp = swring;
+	}
+
+	/*
+	 * Zero out the hardware ring and return its address as our function
+	 * value.
+	 */
+	memset(hwring, 0, hwlen);
+	return hwring;
+}
+
+/**
+ *	sgl_len - calculates the size of an SGL of the given capacity
+ *	@n: the number of SGL entries
+ *
+ *	Calculates the number of flits (8-byte units) needed for a Direct
+ *	Scatter/Gather List that can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+	/*
+	 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
+	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+	 * repeated sequences of { Length[i], Length[i+1], Address[i],
+	 * Address[i+1] } (this ensures that all addresses are on 64-bit
+	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
+	 * Address[N+1] is omitted.
+	 *
+	 * The following calculation incorporates all of the above.  It's
+	 * somewhat hard to follow but, briefly: the "+2" accounts for the
+	 * first two flits which include the DSGL header, Length0 and
+	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
+	 * (n-1) is odd ...
+	 */
+	n--;
+	return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ *	flits_to_desc - returns the num of TX descriptors for the given flits
+ *	@flits: the number of flits
+ *
+ *	Returns the number of TX descriptors needed for the supplied number
+ *	of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int flits)
+{
+	BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
+	return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
+}
+
+/**
+ *	is_eth_imm - can an Ethernet packet be sent as immediate data?
+ *	@skb: the packet
+ *
+ *	Returns whether an Ethernet packet is small enough to fit completely as
+ *	immediate data.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+	/*
+	 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+	 * which does not accommodate immediate data.  We could dike out all
+	 * of the support code for immediate data but that would tie our hands
+	 * too much if we ever want to enhace the firmware.  It would also
+	 * create more differences between the PF and VF Drivers.
+	 */
+	return false;
+}
+
+/**
+ *	calc_tx_flits - calculate the number of flits for a packet TX WR
+ *	@skb: the packet
+ *
+ *	Returns the number of flits needed for a TX Work Request for the
+ *	given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+	unsigned int flits;
+
+	/*
+	 * If the skb is small enough, we can pump it out as a work request
+	 * with only immediate data.  In that case we just have to have the
+	 * TX Packet header plus the skb data in the Work Request.
+	 */
+	if (is_eth_imm(skb))
+		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+				    sizeof(__be64));
+
+	/*
+	 * Otherwise, we're going to have to construct a Scatter gather list
+	 * of the skb body and fragments.  We also include the flits necessary
+	 * for the TX Packet Work Request and CPL.  We always have a firmware
+	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+	 * message or, if we're doing a Large Send Offload, an LSO CPL message
+	 * with an embeded TX Packet Write CPL message.
+	 */
+	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+	if (skb_shinfo(skb)->gso_size)
+		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+			  sizeof(struct cpl_tx_pkt_lso_core) +
+			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+	else
+		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+	return flits;
+}
+
+/**
+ *	write_sgl - populate a Scatter/Gather List for a packet
+ *	@skb: the packet
+ *	@tq: the TX queue we are writing into
+ *	@sgl: starting location for writing the SGL
+ *	@end: points right after the end of the SGL
+ *	@start: start offset into skb main-body data to include in the SGL
+ *	@addr: the list of DMA bus addresses for the SGL elements
+ *
+ *	Generates a Scatter/Gather List for the buffers that make up a packet.
+ *	The caller must provide adequate space for the SGL that will be written.
+ *	The SGL includes all of the packet's page fragments and the data in its
+ *	main body except for the first @start bytes.  @pos must be 16-byte
+ *	aligned and within a TX descriptor with available space.  @end points
+ *	write after the end of the SGL but does not account for any potential
+ *	wrap around, i.e., @end > @tq->stat.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
+		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+		      const dma_addr_t *addr)
+{
+	unsigned int i, len;
+	struct ulptx_sge_pair *to;
+	const struct skb_shared_info *si = skb_shinfo(skb);
+	unsigned int nfrags = si->nr_frags;
+	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+	len = skb_headlen(skb) - start;
+	if (likely(len)) {
+		sgl->len0 = htonl(len);
+		sgl->addr0 = cpu_to_be64(addr[0] + start);
+		nfrags++;
+	} else {
+		sgl->len0 = htonl(si->frags[0].size);
+		sgl->addr0 = cpu_to_be64(addr[1]);
+	}
+
+	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+			      ULPTX_NSGE(nfrags));
+	if (likely(--nfrags == 0))
+		return;
+	/*
+	 * Most of the complexity below deals with the possibility we hit the
+	 * end of the queue in the middle of writing the SGL.  For this case
+	 * only we create the SGL in a temporary buffer and then copy it.
+	 */
+	to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
+
+	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+		to->len[0] = cpu_to_be32(si->frags[i].size);
+		to->len[1] = cpu_to_be32(si->frags[++i].size);
+		to->addr[0] = cpu_to_be64(addr[i]);
+		to->addr[1] = cpu_to_be64(addr[++i]);
+	}
+	if (nfrags) {
+		to->len[0] = cpu_to_be32(si->frags[i].size);
+		to->len[1] = cpu_to_be32(0);
+		to->addr[0] = cpu_to_be64(addr[i + 1]);
+	}
+	if (unlikely((u8 *)end > (u8 *)tq->stat)) {
+		unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
+
+		if (likely(part0))
+			memcpy(sgl->sge, buf, part0);
+		part1 = (u8 *)end - (u8 *)tq->stat;
+		memcpy(tq->desc, (u8 *)buf + part0, part1);
+		end = (void *)tq->desc + part1;
+	}
+	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
+		*(u64 *)end = 0;
+}
+
+/**
+ *	check_ring_tx_db - check and potentially ring a TX queue's doorbell
+ *	@adapter: the adapter
+ *	@tq: the TX queue
+ *	@n: number of new descriptors to give to HW
+ *
+ *	Ring the doorbel for a TX queue.
+ */
+static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
+			      int n)
+{
+	/*
+	 * Warn if we write doorbells with the wrong priority and write
+	 * descriptors before telling HW.
+	 */
+	WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
+	wmb();
+	t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+		     QID(tq->cntxt_id) | PIDX(n));
+}
+
+/**
+ *	inline_tx_skb - inline a packet's data into TX descriptors
+ *	@skb: the packet
+ *	@tq: the TX queue where the packet will be inlined
+ *	@pos: starting position in the TX queue to inline the packet
+ *
+ *	Inline a packet's contents directly into TX descriptors, starting at
+ *	the given position within the TX DMA ring.
+ *	Most of the complexity of this operation is dealing with wrap arounds
+ *	in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
+			  void *pos)
+{
+	u64 *p;
+	int left = (void *)tq->stat - pos;
+
+	if (likely(skb->len <= left)) {
+		if (likely(!skb->data_len))
+			skb_copy_from_linear_data(skb, pos, skb->len);
+		else
+			skb_copy_bits(skb, 0, pos, skb->len);
+		pos += skb->len;
+	} else {
+		skb_copy_bits(skb, 0, pos, left);
+		skb_copy_bits(skb, left, tq->desc, skb->len - left);
+		pos = (void *)tq->desc + (skb->len - left);
+	}
+
+	/* 0-pad to multiple of 16 */
+	p = PTR_ALIGN(pos, 8);
+	if ((uintptr_t)p & 8)
+		*p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(const struct sk_buff *skb)
+{
+	int csum_type;
+	const struct iphdr *iph = ip_hdr(skb);
+
+	if (iph->version == 4) {
+		if (iph->protocol == IPPROTO_TCP)
+			csum_type = TX_CSUM_TCPIP;
+		else if (iph->protocol == IPPROTO_UDP)
+			csum_type = TX_CSUM_UDPIP;
+		else {
+nocsum:
+			/*
+			 * unknown protocol, disable HW csum
+			 * and hope a bad packet is detected
+			 */
+			return TXPKT_L4CSUM_DIS;
+		}
+	} else {
+		/*
+		 * this doesn't work with extension headers
+		 */
+		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+		if (ip6h->nexthdr == IPPROTO_TCP)
+			csum_type = TX_CSUM_TCPIP6;
+		else if (ip6h->nexthdr == IPPROTO_UDP)
+			csum_type = TX_CSUM_UDPIP6;
+		else
+			goto nocsum;
+	}
+
+	if (likely(csum_type >= TX_CSUM_TCPIP))
+		return TXPKT_CSUM_TYPE(csum_type) |
+			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
+			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+	else {
+		int start = skb_transport_offset(skb);
+
+		return TXPKT_CSUM_TYPE(csum_type) |
+			TXPKT_CSUM_START(start) |
+			TXPKT_CSUM_LOC(start + skb->csum_offset);
+	}
+}
+
+/*
+ * Stop an Ethernet TX queue and record that state change.
+ */
+static void txq_stop(struct sge_eth_txq *txq)
+{
+	netif_tx_stop_queue(txq->txq);
+	txq->q.stops++;
+}
+
+/*
+ * Advance our software state for a TX queue by adding n in use descriptors.
+ */
+static inline void txq_advance(struct sge_txq *tq, unsigned int n)
+{
+	tq->in_use += n;
+	tq->pidx += n;
+	if (tq->pidx >= tq->size)
+		tq->pidx -= tq->size;
+}
+
+/**
+ *	t4vf_eth_xmit - add a packet to an Ethernet TX queue
+ *	@skb: the packet
+ *	@dev: the egress net device
+ *
+ *	Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
+ */
+int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	u64 cntrl, *end;
+	int qidx, credits;
+	unsigned int flits, ndesc;
+	struct adapter *adapter;
+	struct sge_eth_txq *txq;
+	const struct port_info *pi;
+	struct fw_eth_tx_pkt_vm_wr *wr;
+	struct cpl_tx_pkt_core *cpl;
+	const struct skb_shared_info *ssi;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+	const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+					sizeof(wr->ethmacsrc) +
+					sizeof(wr->ethtype) +
+					sizeof(wr->vlantci));
+
+	/*
+	 * The chip minimum packet length is 10 octets but the firmware
+	 * command that we are using requires that we copy the Ethernet header
+	 * (including the VLAN tag) into the header so we reject anything
+	 * smaller than that ...
+	 */
+	if (unlikely(skb->len < fw_hdr_copy_len))
+		goto out_free;
+
+	/*
+	 * Figure out which TX Queue we're going to use.
+	 */
+	pi = netdev_priv(dev);
+	adapter = pi->adapter;
+	qidx = skb_get_queue_mapping(skb);
+	BUG_ON(qidx >= pi->nqsets);
+	txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+	/*
+	 * Take this opportunity to reclaim any TX Descriptors whose DMA
+	 * transfers have completed.
+	 */
+	reclaim_completed_tx(adapter, &txq->q, true);
+
+	/*
+	 * Calculate the number of flits and TX Descriptors we're going to
+	 * need along with how many TX Descriptors will be left over after
+	 * we inject our Work Request.
+	 */
+	flits = calc_tx_flits(skb);
+	ndesc = flits_to_desc(flits);
+	credits = txq_avail(&txq->q) - ndesc;
+
+	if (unlikely(credits < 0)) {
+		/*
+		 * Not enough room for this packet's Work Request.  Stop the
+		 * TX Queue and return a "busy" condition.  The queue will get
+		 * started later on when the firmware informs us that space
+		 * has opened up.
+		 */
+		txq_stop(txq);
+		dev_err(adapter->pdev_dev,
+			"%s: TX ring %u full while queue awake!\n",
+			dev->name, qidx);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (!is_eth_imm(skb) &&
+	    unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+		/*
+		 * We need to map the skb into PCI DMA space (because it can't
+		 * be in-lined directly into the Work Request) and the mapping
+		 * operation failed.  Record the error and drop the packet.
+		 */
+		txq->mapping_err++;
+		goto out_free;
+	}
+
+	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+		/*
+		 * After we're done injecting the Work Request for this
+		 * packet, we'll be below our "stop threshhold" so stop the TX
+		 * Queue now.  The queue will get started later on when the
+		 * firmware informs us that space has opened up.
+		 */
+		txq_stop(txq);
+	}
+
+	/*
+	 * Start filling in our Work Request.  Note that we do _not_ handle
+	 * the WR Header wrapping around the TX Descriptor Ring.  If our
+	 * maximum header size ever exceeds one TX Descriptor, we'll need to
+	 * do something else here.
+	 */
+	BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+	wr = (void *)&txq->q.desc[txq->q.pidx];
+	wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flits, 2)));
+	wr->r3[0] = cpu_to_be64(0);
+	wr->r3[1] = cpu_to_be64(0);
+	skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+	end = (u64 *)wr + flits;
+
+	/*
+	 * If this is a Large Send Offload packet we'll put in an LSO CPL
+	 * message with an encapsulated TX Packet CPL message.  Otherwise we
+	 * just use a TX Packet CPL message.
+	 */
+	ssi = skb_shinfo(skb);
+	if (ssi->gso_size) {
+		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+		int l3hdr_len = skb_network_header_len(skb);
+		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+		wr->op_immdlen =
+			cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
+				    FW_WR_IMMDLEN(sizeof(*lso) +
+						  sizeof(*cpl)));
+		/*
+		 * Fill in the LSO CPL message.
+		 */
+		lso->lso_ctrl =
+			cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
+				    LSO_FIRST_SLICE |
+				    LSO_LAST_SLICE |
+				    LSO_IPV6(v6) |
+				    LSO_ETHHDR_LEN(eth_xtra_len/4) |
+				    LSO_IPHDR_LEN(l3hdr_len/4) |
+				    LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+		lso->ipid_ofst = cpu_to_be16(0);
+		lso->mss = cpu_to_be16(ssi->gso_size);
+		lso->seqno_offset = cpu_to_be32(0);
+		lso->len = cpu_to_be32(skb->len);
+
+		/*
+		 * Set up TX Packet CPL pointer, control word and perform
+		 * accounting.
+		 */
+		cpl = (void *)(lso + 1);
+		cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+			 TXPKT_IPHDR_LEN(l3hdr_len) |
+			 TXPKT_ETHHDR_LEN(eth_xtra_len));
+		txq->tso++;
+		txq->tx_cso += ssi->gso_segs;
+	} else {
+		int len;
+
+		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+		wr->op_immdlen =
+			cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
+				    FW_WR_IMMDLEN(len));
+
+		/*
+		 * Set up TX Packet CPL pointer, control word and perform
+		 * accounting.
+		 */
+		cpl = (void *)(wr + 1);
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+			txq->tx_cso++;
+		} else
+			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+	}
+
+	/*
+	 * If there's a VLAN tag present, add that to the list of things to
+	 * do in this Work Request.
+	 */
+	if (vlan_tx_tag_present(skb)) {
+		txq->vlan_ins++;
+		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+	}
+
+	/*
+	 * Fill in the TX Packet CPL message header.
+	 */
+	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
+				 TXPKT_INTF(pi->port_id) |
+				 TXPKT_PF(0));
+	cpl->pack = cpu_to_be16(0);
+	cpl->len = cpu_to_be16(skb->len);
+	cpl->ctrl1 = cpu_to_be64(cntrl);
+
+#ifdef T4_TRACE
+	T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
+		  "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
+		  ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
+#endif
+
+	/*
+	 * Fill in the body of the TX Packet CPL message with either in-lined
+	 * data or a Scatter/Gather List.
+	 */
+	if (is_eth_imm(skb)) {
+		/*
+		 * In-line the packet's data and free the skb since we don't
+		 * need it any longer.
+		 */
+		inline_tx_skb(skb, &txq->q, cpl + 1);
+		dev_kfree_skb(skb);
+	} else {
+		/*
+		 * Write the skb's Scatter/Gather list into the TX Packet CPL
+		 * message and retain a pointer to the skb so we can free it
+		 * later when its DMA completes.  (We store the skb pointer
+		 * in the Software Descriptor corresponding to the last TX
+		 * Descriptor used by the Work Request.)
+		 *
+		 * The retained skb will be freed when the corresponding TX
+		 * Descriptors are reclaimed after their DMAs complete.
+		 * However, this could take quite a while since, in general,
+		 * the hardware is set up to be lazy about sending DMA
+		 * completion notifications to us and we mostly perform TX
+		 * reclaims in the transmit routine.
+		 *
+		 * This is good for performamce but means that we rely on new
+		 * TX packets arriving to run the destructors of completed
+		 * packets, which open up space in their sockets' send queues.
+		 * Sometimes we do not get such new packets causing TX to
+		 * stall.  A single UDP transmitter is a good example of this
+		 * situation.  We have a clean up timer that periodically
+		 * reclaims completed packets but it doesn't run often enough
+		 * (nor do we want it to) to prevent lengthy stalls.  A
+		 * solution to this problem is to run the destructor early,
+		 * after the packet is queued but before it's DMAd.  A con is
+		 * that we lie to socket memory accounting, but the amount of
+		 * extra memory is reasonable (limited by the number of TX
+		 * descriptors), the packets do actually get freed quickly by
+		 * new packets almost always, and for protocols like TCP that
+		 * wait for acks to really free up the data the extra memory
+		 * is even less.  On the positive side we run the destructors
+		 * on the sending CPU rather than on a potentially different
+		 * completing CPU, usually a good thing.
+		 *
+		 * Run the destructor before telling the DMA engine about the
+		 * packet to make sure it doesn't complete and get freed
+		 * prematurely.
+		 */
+		struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+		struct sge_txq *tq = &txq->q;
+		int last_desc;
+
+		/*
+		 * If the Work Request header was an exact multiple of our TX
+		 * Descriptor length, then it's possible that the starting SGL
+		 * pointer lines up exactly with the end of our TX Descriptor
+		 * ring.  If that's the case, wrap around to the beginning
+		 * here ...
+		 */
+		if (unlikely((void *)sgl == (void *)tq->stat)) {
+			sgl = (void *)tq->desc;
+			end = (void *)((void *)tq->desc +
+				       ((void *)end - (void *)tq->stat));
+		}
+
+		write_sgl(skb, tq, sgl, end, 0, addr);
+		skb_orphan(skb);
+
+		last_desc = tq->pidx + ndesc - 1;
+		if (last_desc >= tq->size)
+			last_desc -= tq->size;
+		tq->sdesc[last_desc].skb = skb;
+		tq->sdesc[last_desc].sgl = sgl;
+	}
+
+	/*
+	 * Advance our internal TX Queue state, tell the hardware about
+	 * the new TX descriptors and return success.
+	 */
+	txq_advance(&txq->q, ndesc);
+	dev->trans_start = jiffies;
+	ring_tx_db(adapter, &txq->q, ndesc);
+	return NETDEV_TX_OK;
+
+out_free:
+	/*
+	 * An error of some sort happened.  Free the TX skb and tell the
+	 * OS that we've "dealt" with the packet ...
+	 */
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+/**
+ *	t4vf_pktgl_free - free a packet gather list
+ *	@gl: the gather list
+ *
+ *	Releases the pages of a packet gather list.  We do not own the last
+ *	page on the list and do not free it.
+ */
+void t4vf_pktgl_free(const struct pkt_gl *gl)
+{
+	int frag;
+
+	frag = gl->nfrags - 1;
+	while (frag--)
+		put_page(gl->frags[frag].page);
+}
+
+/**
+ *	copy_frags - copy fragments from gather list into skb_shared_info
+ *	@si: destination skb shared info structure
+ *	@gl: source internal packet gather list
+ *	@offset: packet start offset in first page
+ *
+ *	Copy an internal packet gather list into a Linux skb_shared_info
+ *	structure.
+ */
+static inline void copy_frags(struct skb_shared_info *si,
+			      const struct pkt_gl *gl,
+			      unsigned int offset)
+{
+	unsigned int n;
+
+	/* usually there's just one frag */
+	si->frags[0].page = gl->frags[0].page;
+	si->frags[0].page_offset = gl->frags[0].page_offset + offset;
+	si->frags[0].size = gl->frags[0].size - offset;
+	si->nr_frags = gl->nfrags;
+
+	n = gl->nfrags - 1;
+	if (n)
+		memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
+
+	/* get a reference to the last page, we don't own it */
+	get_page(gl->frags[n].page);
+}
+
+/**
+ *	do_gro - perform Generic Receive Offload ingress packet processing
+ *	@rxq: ingress RX Ethernet Queue
+ *	@gl: gather list for ingress packet
+ *	@pkt: CPL header for last packet fragment
+ *
+ *	Perform Generic Receive Offload (GRO) ingress packet processing.
+ *	We use the standard Linux GRO interfaces for this.
+ */
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+		   const struct cpl_rx_pkt *pkt)
+{
+	int ret;
+	struct sk_buff *skb;
+
+	skb = napi_get_frags(&rxq->rspq.napi);
+	if (unlikely(!skb)) {
+		t4vf_pktgl_free(gl);
+		rxq->stats.rx_drops++;
+		return;
+	}
+
+	copy_frags(skb_shinfo(skb), gl, PKTSHIFT);
+	skb->len = gl->tot_len - PKTSHIFT;
+	skb->data_len = skb->len;
+	skb->truesize += skb->data_len;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb_record_rx_queue(skb, rxq->rspq.idx);
+
+	if (unlikely(pkt->vlan_ex)) {
+		struct port_info *pi = netdev_priv(rxq->rspq.netdev);
+		struct vlan_group *grp = pi->vlan_grp;
+
+		rxq->stats.vlan_ex++;
+		if (likely(grp)) {
+			ret = vlan_gro_frags(&rxq->rspq.napi, grp,
+					     be16_to_cpu(pkt->vlan));
+			goto stats;
+		}
+	}
+	ret = napi_gro_frags(&rxq->rspq.napi);
+
+stats:
+	if (ret == GRO_HELD)
+		rxq->stats.lro_pkts++;
+	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+		rxq->stats.lro_merged++;
+	rxq->stats.pkts++;
+	rxq->stats.rx_cso++;
+}
+
+/**
+ *	t4vf_ethrx_handler - process an ingress ethernet packet
+ *	@rspq: the response queue that received the packet
+ *	@rsp: the response queue descriptor holding the RX_PKT message
+ *	@gl: the gather list of packet fragments
+ *
+ *	Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
+		       const struct pkt_gl *gl)
+{
+	struct sk_buff *skb;
+	struct port_info *pi;
+	struct skb_shared_info *ssi;
+	const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
+	bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+	unsigned int len = be16_to_cpu(pkt->len);
+	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+
+	/*
+	 * If this is a good TCP packet and we have Generic Receive Offload
+	 * enabled, handle the packet in the GRO path.
+	 */
+	if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
+	    (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
+	    !pkt->ip_frag) {
+		do_gro(rxq, gl, pkt);
+		return 0;
+	}
+
+	/*
+	 * If the ingress packet is small enough, allocate an skb large enough
+	 * for all of the data and copy it inline.  Otherwise, allocate an skb
+	 * with enough room to pull in the header and reference the rest of
+	 * the data via the skb fragment list.
+	 */
+	if (len <= RX_COPY_THRES) {
+		/* small packets have only one fragment */
+		skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
+		if (!skb)
+			goto nomem;
+		__skb_put(skb, gl->frags[0].size);
+		skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
+	} else {
+		skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
+		if (!skb)
+			goto nomem;
+		__skb_put(skb, RX_PKT_PULL_LEN);
+		skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
+
+		ssi = skb_shinfo(skb);
+		ssi->frags[0].page = gl->frags[0].page;
+		ssi->frags[0].page_offset = (gl->frags[0].page_offset +
+					     RX_PKT_PULL_LEN);
+		ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
+		if (gl->nfrags > 1)
+			memcpy(&ssi->frags[1], &gl->frags[1],
+			       (gl->nfrags-1) * sizeof(skb_frag_t));
+		ssi->nr_frags = gl->nfrags;
+		skb->len = len + PKTSHIFT;
+		skb->data_len = skb->len - RX_PKT_PULL_LEN;
+		skb->truesize += skb->data_len;
+
+		/* Get a reference for the last page, we don't own it */
+		get_page(gl->frags[gl->nfrags - 1].page);
+	}
+
+	__skb_pull(skb, PKTSHIFT);
+	skb->protocol = eth_type_trans(skb, rspq->netdev);
+	skb_record_rx_queue(skb, rspq->idx);
+	skb->dev->last_rx = jiffies;                  /* XXX removed 2.6.29 */
+	pi = netdev_priv(skb->dev);
+	rxq->stats.pkts++;
+
+	if (csum_ok && (pi->rx_offload & RX_CSO) && !pkt->err_vec &&
+	    (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+		if (!pkt->ip_frag)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else {
+			__sum16 c = (__force __sum16)pkt->csum;
+			skb->csum = csum_unfold(c);
+			skb->ip_summed = CHECKSUM_COMPLETE;
+		}
+		rxq->stats.rx_cso++;
+	} else
+		skb->ip_summed = CHECKSUM_NONE;
+
+	if (unlikely(pkt->vlan_ex)) {
+		struct vlan_group *grp = pi->vlan_grp;
+
+		rxq->stats.vlan_ex++;
+		if (likely(grp))
+			vlan_hwaccel_receive_skb(skb, grp,
+						 be16_to_cpu(pkt->vlan));
+		else
+			dev_kfree_skb_any(skb);
+	} else
+		netif_receive_skb(skb);
+
+	return 0;
+
+nomem:
+	t4vf_pktgl_free(gl);
+	rxq->stats.rx_drops++;
+	return 0;
+}
+
+/**
+ *	is_new_response - check if a response is newly written
+ *	@rc: the response control descriptor
+ *	@rspq: the response queue
+ *
+ *	Returns true if a response descriptor contains a yet unprocessed
+ *	response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *rc,
+				   const struct sge_rspq *rspq)
+{
+	return RSPD_GEN(rc->type_gen) == rspq->gen;
+}
+
+/**
+ *	restore_rx_bufs - put back a packet's RX buffers
+ *	@gl: the packet gather list
+ *	@fl: the SGE Free List
+ *	@nfrags: how many fragments in @si
+ *
+ *	Called when we find out that the current packet, @si, can't be
+ *	processed right away for some reason.  This is a very rare event and
+ *	there's no effort to make this suspension/resumption process
+ *	particularly efficient.
+ *
+ *	We implement the suspension by putting all of the RX buffers associated
+ *	with the current packet back on the original Free List.  The buffers
+ *	have already been unmapped and are left unmapped, we mark them as
+ *	unmapped in order to prevent further unmapping attempts.  (Effectively
+ *	this function undoes the series of @unmap_rx_buf calls which were done
+ *	to create the current packet's gather list.)  This leaves us ready to
+ *	restart processing of the packet the next time we start processing the
+ *	RX Queue ...
+ */
+static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
+			    int frags)
+{
+	struct rx_sw_desc *sdesc;
+
+	while (frags--) {
+		if (fl->cidx == 0)
+			fl->cidx = fl->size - 1;
+		else
+			fl->cidx--;
+		sdesc = &fl->sdesc[fl->cidx];
+		sdesc->page = gl->frags[frags].page;
+		sdesc->dma_addr |= RX_UNMAPPED_BUF;
+		fl->avail++;
+	}
+}
+
+/**
+ *	rspq_next - advance to the next entry in a response queue
+ *	@rspq: the queue
+ *
+ *	Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *rspq)
+{
+	rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
+	if (unlikely(++rspq->cidx == rspq->size)) {
+		rspq->cidx = 0;
+		rspq->gen ^= 1;
+		rspq->cur_desc = rspq->desc;
+	}
+}
+
+/**
+ *	process_responses - process responses from an SGE response queue
+ *	@rspq: the ingress response queue to process
+ *	@budget: how many responses can be processed in this round
+ *
+ *	Process responses from a Scatter Gather Engine response queue up to
+ *	the supplied budget.  Responses include received packets as well as
+ *	control messages from firmware or hardware.
+ *
+ *	Additionally choose the interrupt holdoff time for the next interrupt
+ *	on this queue.  If the system is under memory shortage use a fairly
+ *	long delay to help recovery.
+ */
+int process_responses(struct sge_rspq *rspq, int budget)
+{
+	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+	int budget_left = budget;
+
+	while (likely(budget_left)) {
+		int ret, rsp_type;
+		const struct rsp_ctrl *rc;
+
+		rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
+		if (!is_new_response(rc, rspq))
+			break;
+
+		/*
+		 * Figure out what kind of response we've received from the
+		 * SGE.
+		 */
+		rmb();
+		rsp_type = RSPD_TYPE(rc->type_gen);
+		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+			skb_frag_t *fp;
+			struct pkt_gl gl;
+			const struct rx_sw_desc *sdesc;
+			u32 bufsz, frag;
+			u32 len = be32_to_cpu(rc->pldbuflen_qid);
+
+			/*
+			 * If we get a "new buffer" message from the SGE we
+			 * need to move on to the next Free List buffer.
+			 */
+			if (len & RSPD_NEWBUF) {
+				/*
+				 * We get one "new buffer" message when we
+				 * first start up a queue so we need to ignore
+				 * it when our offset into the buffer is 0.
+				 */
+				if (likely(rspq->offset > 0)) {
+					free_rx_bufs(rspq->adapter, &rxq->fl,
+						     1);
+					rspq->offset = 0;
+				}
+				len = RSPD_LEN(len);
+			}
+
+			/*
+			 * Gather packet fragments.
+			 */
+			for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
+				BUG_ON(frag >= MAX_SKB_FRAGS);
+				BUG_ON(rxq->fl.avail == 0);
+				sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
+				bufsz = get_buf_size(sdesc);
+				fp->page = sdesc->page;
+				fp->page_offset = rspq->offset;
+				fp->size = min(bufsz, len);
+				len -= fp->size;
+				if (!len)
+					break;
+				unmap_rx_buf(rspq->adapter, &rxq->fl);
+			}
+			gl.nfrags = frag+1;
+
+			/*
+			 * Last buffer remains mapped so explicitly make it
+			 * coherent for CPU access and start preloading first
+			 * cache line ...
+			 */
+			dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
+						get_buf_addr(sdesc),
+						fp->size, DMA_FROM_DEVICE);
+			gl.va = (page_address(gl.frags[0].page) +
+				 gl.frags[0].page_offset);
+			prefetch(gl.va);
+
+			/*
+			 * Hand the new ingress packet to the handler for
+			 * this Response Queue.
+			 */
+			ret = rspq->handler(rspq, rspq->cur_desc, &gl);
+			if (likely(ret == 0))
+				rspq->offset += ALIGN(fp->size, FL_ALIGN);
+			else
+				restore_rx_bufs(&gl, &rxq->fl, frag);
+		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
+			ret = rspq->handler(rspq, rspq->cur_desc, NULL);
+		} else {
+			WARN_ON(rsp_type > RSP_TYPE_CPL);
+			ret = 0;
+		}
+
+		if (unlikely(ret)) {
+			/*
+			 * Couldn't process descriptor, back off for recovery.
+			 * We use the SGE's last timer which has the longest
+			 * interrupt coalescing value ...
+			 */
+			const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
+			rspq->next_intr_params =
+				QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+			break;
+		}
+
+		rspq_next(rspq);
+		budget_left--;
+	}
+
+	/*
+	 * If this is a Response Queue with an associated Free List and
+	 * at least two Egress Queue units available in the Free List
+	 * for new buffer pointers, refill the Free List.
+	 */
+	if (rspq->offset >= 0 &&
+	    rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
+		__refill_fl(rspq->adapter, &rxq->fl);
+	return budget - budget_left;
+}
+
+/**
+ *	napi_rx_handler - the NAPI handler for RX processing
+ *	@napi: the napi instance
+ *	@budget: how many packets we can process in this round
+ *
+ *	Handler for new data events when using NAPI.  This does not need any
+ *	locking or protection from interrupts as data interrupts are off at
+ *	this point and other adapter interrupts do not interfere (the latter
+ *	in not a concern at all with MSI-X as non-data interrupts then have
+ *	a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+	unsigned int intr_params;
+	struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
+	int work_done = process_responses(rspq, budget);
+
+	if (likely(work_done < budget)) {
+		napi_complete(napi);
+		intr_params = rspq->next_intr_params;
+		rspq->next_intr_params = rspq->intr_params;
+	} else
+		intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+
+	t4_write_reg(rspq->adapter,
+		     T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+		     CIDXINC(work_done) |
+		     INGRESSQID((u32)rspq->cntxt_id) |
+		     SEINTARM(intr_params));
+	return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
+{
+	struct sge_rspq *rspq = cookie;
+
+	napi_schedule(&rspq->napi);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	struct sge_rspq *intrq = &s->intrq;
+	unsigned int work_done;
+
+	spin_lock(&adapter->sge.intrq_lock);
+	for (work_done = 0; ; work_done++) {
+		const struct rsp_ctrl *rc;
+		unsigned int qid, iq_idx;
+		struct sge_rspq *rspq;
+
+		/*
+		 * Grab the next response from the interrupt queue and bail
+		 * out if it's not a new response.
+		 */
+		rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
+		if (!is_new_response(rc, intrq))
+			break;
+
+		/*
+		 * If the response isn't a forwarded interrupt message issue a
+		 * error and go on to the next response message.  This should
+		 * never happen ...
+		 */
+		rmb();
+		if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+			dev_err(adapter->pdev_dev,
+				"Unexpected INTRQ response type %d\n",
+				RSPD_TYPE(rc->type_gen));
+			continue;
+		}
+
+		/*
+		 * Extract the Queue ID from the interrupt message and perform
+		 * sanity checking to make sure it really refers to one of our
+		 * Ingress Queues which is active and matches the queue's ID.
+		 * None of these error conditions should ever happen so we may
+		 * want to either make them fatal and/or conditionalized under
+		 * DEBUG.
+		 */
+		qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+		iq_idx = IQ_IDX(s, qid);
+		if (unlikely(iq_idx >= MAX_INGQ)) {
+			dev_err(adapter->pdev_dev,
+				"Ingress QID %d out of range\n", qid);
+			continue;
+		}
+		rspq = s->ingr_map[iq_idx];
+		if (unlikely(rspq == NULL)) {
+			dev_err(adapter->pdev_dev,
+				"Ingress QID %d RSPQ=NULL\n", qid);
+			continue;
+		}
+		if (unlikely(rspq->abs_id != qid)) {
+			dev_err(adapter->pdev_dev,
+				"Ingress QID %d refers to RSPQ %d\n",
+				qid, rspq->abs_id);
+			continue;
+		}
+
+		/*
+		 * Schedule NAPI processing on the indicated Response Queue
+		 * and move on to the next entry in the Forwarded Interrupt
+		 * Queue.
+		 */
+		napi_schedule(&rspq->napi);
+		rspq_next(intrq);
+	}
+
+	t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+		     CIDXINC(work_done) |
+		     INGRESSQID(intrq->cntxt_id) |
+		     SEINTARM(intrq->intr_params));
+
+	spin_unlock(&adapter->sge.intrq_lock);
+
+	return work_done;
+}
+
+/*
+ * The MSI interrupt handler handles data events from SGE response queues as
+ * well as error and other async events as they all use the same MSI vector.
+ */
+irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+{
+	struct adapter *adapter = cookie;
+
+	process_intrq(adapter);
+	return IRQ_HANDLED;
+}
+
+/**
+ *	t4vf_intr_handler - select the top-level interrupt handler
+ *	@adapter: the adapter
+ *
+ *	Selects the top-level interrupt handler based on the type of interrupts
+ *	(MSI-X or MSI).
+ */
+irq_handler_t t4vf_intr_handler(struct adapter *adapter)
+{
+	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+	if (adapter->flags & USING_MSIX)
+		return t4vf_sge_intr_msix;
+	else
+		return t4vf_intr_msi;
+}
+
+/**
+ *	sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
+ *	@data: the adapter
+ *
+ *	Runs periodically from a timer to perform maintenance of SGE RX queues.
+ *
+ *	a) Replenishes RX queues that have run out due to memory shortage.
+ *	Normally new RX buffers are added when existing ones are consumed but
+ *	when out of memory a queue can become empty.  We schedule NAPI to do
+ *	the actual refill.
+ */
+static void sge_rx_timer_cb(unsigned long data)
+{
+	struct adapter *adapter = (struct adapter *)data;
+	struct sge *s = &adapter->sge;
+	unsigned int i;
+
+	/*
+	 * Scan the "Starving Free Lists" flag array looking for any Free
+	 * Lists in need of more free buffers.  If we find one and it's not
+	 * being actively polled, then bump its "starving" counter and attempt
+	 * to refill it.  If we're successful in adding enough buffers to push
+	 * the Free List over the starving threshold, then we can clear its
+	 * "starving" status.
+	 */
+	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
+		unsigned long m;
+
+		for (m = s->starving_fl[i]; m; m &= m - 1) {
+			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+			struct sge_fl *fl = s->egr_map[id];
+
+			clear_bit(id, s->starving_fl);
+			smp_mb__after_clear_bit();
+
+			/*
+			 * Since we are accessing fl without a lock there's a
+			 * small probability of a false positive where we
+			 * schedule napi but the FL is no longer starving.
+			 * No biggie.
+			 */
+			if (fl_starving(fl)) {
+				struct sge_eth_rxq *rxq;
+
+				rxq = container_of(fl, struct sge_eth_rxq, fl);
+				if (napi_reschedule(&rxq->rspq.napi))
+					fl->starving++;
+				else
+					set_bit(id, s->starving_fl);
+			}
+		}
+	}
+
+	/*
+	 * Reschedule the next scan for starving Free Lists ...
+	 */
+	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+/**
+ *	sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
+ *	@data: the adapter
+ *
+ *	Runs periodically from a timer to perform maintenance of SGE TX queues.
+ *
+ *	b) Reclaims completed Tx packets for the Ethernet queues.  Normally
+ *	packets are cleaned up by new Tx packets, this timer cleans up packets
+ *	when no new packets are being submitted.  This is essential for pktgen,
+ *	at least.
+ */
+static void sge_tx_timer_cb(unsigned long data)
+{
+	struct adapter *adapter = (struct adapter *)data;
+	struct sge *s = &adapter->sge;
+	unsigned int i, budget;
+
+	budget = MAX_TIMER_TX_RECLAIM;
+	i = s->ethtxq_rover;
+	do {
+		struct sge_eth_txq *txq = &s->ethtxq[i];
+
+		if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
+			int avail = reclaimable(&txq->q);
+
+			if (avail > budget)
+				avail = budget;
+
+			free_tx_desc(adapter, &txq->q, avail, true);
+			txq->q.in_use -= avail;
+			__netif_tx_unlock(txq->txq);
+
+			budget -= avail;
+			if (!budget)
+				break;
+		}
+
+		i++;
+		if (i >= s->ethqsets)
+			i = 0;
+	} while (i != s->ethtxq_rover);
+	s->ethtxq_rover = i;
+
+	/*
+	 * If we found too many reclaimable packets schedule a timer in the
+	 * near future to continue where we left off.  Otherwise the next timer
+	 * will be at its normal interval.
+	 */
+	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+/**
+ *	t4vf_sge_alloc_rxq - allocate an SGE RX Queue
+ *	@adapter: the adapter
+ *	@rspq: pointer to to the new rxq's Response Queue to be filled in
+ *	@iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
+ *	@dev: the network device associated with the new rspq
+ *	@intr_dest: MSI-X vector index (overriden in MSI mode)
+ *	@fl: pointer to the new rxq's Free List to be filled in
+ *	@hnd: the interrupt handler to invoke for the rspq
+ */
+int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
+		       bool iqasynch, struct net_device *dev,
+		       int intr_dest,
+		       struct sge_fl *fl, rspq_handler_t hnd)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct fw_iq_cmd cmd, rpl;
+	int ret, iqandst, flsz = 0;
+
+	/*
+	 * If we're using MSI interrupts and we're not initializing the
+	 * Forwarded Interrupt Queue itself, then set up this queue for
+	 * indirect interrupts to the Forwarded Interrupt Queue.  Obviously
+	 * the Forwarded Interrupt Queue must be set up before any other
+	 * ingress queue ...
+	 */
+	if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
+		iqandst = SGE_INTRDST_IQ;
+		intr_dest = adapter->sge.intrq.abs_id;
+	} else
+		iqandst = SGE_INTRDST_PCI;
+
+	/*
+	 * Allocate the hardware ring for the Response Queue.  The size needs
+	 * to be a multiple of 16 which includes the mandatory status entry
+	 * (regardless of whether the Status Page capabilities are enabled or
+	 * not).
+	 */
+	rspq->size = roundup(rspq->size, 16);
+	rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
+				0, &rspq->phys_addr, NULL, 0);
+	if (!rspq->desc)
+		return -ENOMEM;
+
+	/*
+	 * Fill in the Ingress Queue Command.  Note: Ideally this code would
+	 * be in t4vf_hw.c but there are so many parameters and dependencies
+	 * on our Linux SGE state that we would end up having to pass tons of
+	 * parameters.  We'll have to think about how this might be migrated
+	 * into OS-independent common code ...
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
+				    FW_CMD_REQUEST |
+				    FW_CMD_WRITE |
+				    FW_CMD_EXEC);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC |
+					 FW_IQ_CMD_IQSTART(1) |
+					 FW_LEN16(cmd));
+	cmd.type_to_iqandstindex =
+		cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+			    FW_IQ_CMD_IQASYNCH(iqasynch) |
+			    FW_IQ_CMD_VIID(pi->viid) |
+			    FW_IQ_CMD_IQANDST(iqandst) |
+			    FW_IQ_CMD_IQANUS(1) |
+			    FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) |
+			    FW_IQ_CMD_IQANDSTINDEX(intr_dest));
+	cmd.iqdroprss_to_iqesize =
+		cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) |
+			    FW_IQ_CMD_IQGTSMODE |
+			    FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) |
+			    FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4));
+	cmd.iqsize = cpu_to_be16(rspq->size);
+	cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
+
+	if (fl) {
+		/*
+		 * Allocate the ring for the hardware free list (with space
+		 * for its status page) along with the associated software
+		 * descriptor ring.  The free list size needs to be a multiple
+		 * of the Egress Queue Unit.
+		 */
+		fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
+		fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
+				      sizeof(__be64), sizeof(struct rx_sw_desc),
+				      &fl->addr, &fl->sdesc, STAT_LEN);
+		if (!fl->desc) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		/*
+		 * Calculate the size of the hardware free list ring plus
+		 * status page (which the SGE will place at the end of the
+		 * free list ring) in Egress Queue Units.
+		 */
+		flsz = (fl->size / FL_PER_EQ_UNIT +
+			STAT_LEN / EQ_UNIT);
+
+		/*
+		 * Fill in all the relevant firmware Ingress Queue Command
+		 * fields for the free list.
+		 */
+		cmd.iqns_to_fl0congen =
+			cpu_to_be32(
+				FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
+				FW_IQ_CMD_FL0PACKEN |
+				FW_IQ_CMD_FL0PADEN);
+		cmd.fl0dcaen_to_fl0cidxfthresh =
+			cpu_to_be16(
+				FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
+				FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B));
+		cmd.fl0size = cpu_to_be16(flsz);
+		cmd.fl0addr = cpu_to_be64(fl->addr);
+	}
+
+	/*
+	 * Issue the firmware Ingress Queue Command and extract the results if
+	 * it completes successfully.
+	 */
+	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (ret)
+		goto err;
+
+	netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+	rspq->cur_desc = rspq->desc;
+	rspq->cidx = 0;
+	rspq->gen = 1;
+	rspq->next_intr_params = rspq->intr_params;
+	rspq->cntxt_id = be16_to_cpu(rpl.iqid);
+	rspq->abs_id = be16_to_cpu(rpl.physiqid);
+	rspq->size--;			/* subtract status entry */
+	rspq->adapter = adapter;
+	rspq->netdev = dev;
+	rspq->handler = hnd;
+
+	/* set offset to -1 to distinguish ingress queues without FL */
+	rspq->offset = fl ? 0 : -1;
+
+	if (fl) {
+		fl->cntxt_id = be16_to_cpu(rpl.fl0id);
+		fl->avail = 0;
+		fl->pend_cred = 0;
+		fl->pidx = 0;
+		fl->cidx = 0;
+		fl->alloc_failed = 0;
+		fl->large_alloc_failed = 0;
+		fl->starving = 0;
+		refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
+	}
+
+	return 0;
+
+err:
+	/*
+	 * An error occurred.  Clean up our partial allocation state and
+	 * return the error.
+	 */
+	if (rspq->desc) {
+		dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
+				  rspq->desc, rspq->phys_addr);
+		rspq->desc = NULL;
+	}
+	if (fl && fl->desc) {
+		kfree(fl->sdesc);
+		fl->sdesc = NULL;
+		dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
+				  fl->desc, fl->addr);
+		fl->desc = NULL;
+	}
+	return ret;
+}
+
+/**
+ *	t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
+ *	@adapter: the adapter
+ *	@txq: pointer to the new txq to be filled in
+ *	@devq: the network TX queue associated with the new txq
+ *	@iqid: the relative ingress queue ID to which events relating to
+ *		the new txq should be directed
+ */
+int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
+			   struct net_device *dev, struct netdev_queue *devq,
+			   unsigned int iqid)
+{
+	int ret, nentries;
+	struct fw_eq_eth_cmd cmd, rpl;
+	struct port_info *pi = netdev_priv(dev);
+
+	/*
+	 * Calculate the size of the hardware TX Queue (including the
+	 * status age on the end) in units of TX Descriptors.
+	 */
+	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+	/*
+	 * Allocate the hardware ring for the TX ring (with space for its
+	 * status page) along with the associated software descriptor ring.
+	 */
+	txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
+				 sizeof(struct tx_desc),
+				 sizeof(struct tx_sw_desc),
+				 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+	if (!txq->q.desc)
+		return -ENOMEM;
+
+	/*
+	 * Fill in the Egress Queue Command.  Note: As with the direct use of
+	 * the firmware Ingress Queue COmmand above in our RXQ allocation
+	 * routine, ideally, this code would be in t4vf_hw.c.  Again, we'll
+	 * have to see if there's some reasonable way to parameterize it
+	 * into the common code ...
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
+				    FW_CMD_REQUEST |
+				    FW_CMD_WRITE |
+				    FW_CMD_EXEC);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
+					 FW_EQ_ETH_CMD_EQSTART |
+					 FW_LEN16(cmd));
+	cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
+	cmd.fetchszm_to_iqid =
+		cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
+			    FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
+			    FW_EQ_ETH_CMD_IQID(iqid));
+	cmd.dcaen_to_eqsize =
+		cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) |
+			    FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) |
+			    FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) |
+			    FW_EQ_ETH_CMD_EQSIZE(nentries));
+	cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+	/*
+	 * Issue the firmware Egress Queue Command and extract the results if
+	 * it completes successfully.
+	 */
+	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (ret) {
+		/*
+		 * The girmware Ingress Queue Command failed for some reason.
+		 * Free up our partial allocation state and return the error.
+		 */
+		kfree(txq->q.sdesc);
+		txq->q.sdesc = NULL;
+		dma_free_coherent(adapter->pdev_dev,
+				  nentries * sizeof(struct tx_desc),
+				  txq->q.desc, txq->q.phys_addr);
+		txq->q.desc = NULL;
+		return ret;
+	}
+
+	txq->q.in_use = 0;
+	txq->q.cidx = 0;
+	txq->q.pidx = 0;
+	txq->q.stat = (void *)&txq->q.desc[txq->q.size];
+	txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd));
+	txq->q.abs_id =
+		FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd));
+	txq->txq = devq;
+	txq->tso = 0;
+	txq->tx_cso = 0;
+	txq->vlan_ins = 0;
+	txq->q.stops = 0;
+	txq->q.restarts = 0;
+	txq->mapping_err = 0;
+	return 0;
+}
+
+/*
+ * Free the DMA map resources associated with a TX queue.
+ */
+static void free_txq(struct adapter *adapter, struct sge_txq *tq)
+{
+	dma_free_coherent(adapter->pdev_dev,
+			  tq->size * sizeof(*tq->desc) + STAT_LEN,
+			  tq->desc, tq->phys_addr);
+	tq->cntxt_id = 0;
+	tq->sdesc = NULL;
+	tq->desc = NULL;
+}
+
+/*
+ * Free the resources associated with a response queue (possibly including a
+ * free list).
+ */
+static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
+			 struct sge_fl *fl)
+{
+	unsigned int flid = fl ? fl->cntxt_id : 0xffff;
+
+	t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
+		     rspq->cntxt_id, flid, 0xffff);
+	dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
+			  rspq->desc, rspq->phys_addr);
+	netif_napi_del(&rspq->napi);
+	rspq->netdev = NULL;
+	rspq->cntxt_id = 0;
+	rspq->abs_id = 0;
+	rspq->desc = NULL;
+
+	if (fl) {
+		free_rx_bufs(adapter, fl, fl->avail);
+		dma_free_coherent(adapter->pdev_dev,
+				  fl->size * sizeof(*fl->desc) + STAT_LEN,
+				  fl->desc, fl->addr);
+		kfree(fl->sdesc);
+		fl->sdesc = NULL;
+		fl->cntxt_id = 0;
+		fl->desc = NULL;
+	}
+}
+
+/**
+ *	t4vf_free_sge_resources - free SGE resources
+ *	@adapter: the adapter
+ *
+ *	Frees resources used by the SGE queue sets.
+ */
+void t4vf_free_sge_resources(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	struct sge_eth_rxq *rxq = s->ethrxq;
+	struct sge_eth_txq *txq = s->ethtxq;
+	struct sge_rspq *evtq = &s->fw_evtq;
+	struct sge_rspq *intrq = &s->intrq;
+	int qs;
+
+	for (qs = 0; qs < adapter->sge.ethqsets; qs++) {
+		if (rxq->rspq.desc)
+			free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
+		if (txq->q.desc) {
+			t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
+			free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
+			kfree(txq->q.sdesc);
+			free_txq(adapter, &txq->q);
+		}
+	}
+	if (evtq->desc)
+		free_rspq_fl(adapter, evtq, NULL);
+	if (intrq->desc)
+		free_rspq_fl(adapter, intrq, NULL);
+}
+
+/**
+ *	t4vf_sge_start - enable SGE operation
+ *	@adapter: the adapter
+ *
+ *	Start tasklets and timers associated with the DMA engine.
+ */
+void t4vf_sge_start(struct adapter *adapter)
+{
+	adapter->sge.ethtxq_rover = 0;
+	mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+	mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ *	t4vf_sge_stop - disable SGE operation
+ *	@adapter: the adapter
+ *
+ *	Stop tasklets and timers associated with the DMA engine.  Note that
+ *	this is effective only if measures have been taken to disable any HW
+ *	events that may restart them.
+ */
+void t4vf_sge_stop(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+
+	if (s->rx_timer.function)
+		del_timer_sync(&s->rx_timer);
+	if (s->tx_timer.function)
+		del_timer_sync(&s->tx_timer);
+}
+
+/**
+ *	t4vf_sge_init - initialize SGE
+ *	@adapter: the adapter
+ *
+ *	Performs SGE initialization needed every time after a chip reset.
+ *	We do not initialize any of the queue sets here, instead the driver
+ *	top-level must request those individually.  We also do not enable DMA
+ *	here, that should be done after the queues have been set up.
+ */
+int t4vf_sge_init(struct adapter *adapter)
+{
+	struct sge_params *sge_params = &adapter->params.sge;
+	u32 fl0 = sge_params->sge_fl_buffer_size[0];
+	u32 fl1 = sge_params->sge_fl_buffer_size[1];
+	struct sge *s = &adapter->sge;
+
+	/*
+	 * Start by vetting the basic SGE parameters which have been set up by
+	 * the Physical Function Driver.  Ideally we should be able to deal
+	 * with _any_ configuration.  Practice is different ...
+	 */
+	if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+		dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+			fl0, fl1);
+		return -EINVAL;
+	}
+	if ((sge_params->sge_control & RXPKTCPLMODE) == 0) {
+		dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Now translate the adapter parameters into our internal forms.
+	 */
+	if (fl1)
+		FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
+	STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64);
+	PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
+	FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
+			 SGE_INGPADBOUNDARY_SHIFT);
+
+	/*
+	 * Set up tasklet timers.
+	 */
+	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
+	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
+
+	/*
+	 * Initialize Forwarded Interrupt Queue lock.
+	 */
+	spin_lock_init(&s->intrq_lock);
+
+	return 0;
+}
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
new file mode 100644
index 0000000..5c7bde7
--- /dev/null
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -0,0 +1,273 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4VF_COMMON_H__
+#define __T4VF_COMMON_H__
+
+#include "../cxgb4/t4fw_api.h"
+
+/*
+ * The "len16" field of a Firmware Command Structure ...
+ */
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+/*
+ * Per-VF statistics.
+ */
+struct t4vf_port_stats {
+	/*
+	 * TX statistics.
+	 */
+	u64 tx_bcast_bytes;		/* broadcast */
+	u64 tx_bcast_frames;
+	u64 tx_mcast_bytes;		/* multicast */
+	u64 tx_mcast_frames;
+	u64 tx_ucast_bytes;		/* unicast */
+	u64 tx_ucast_frames;
+	u64 tx_drop_frames;		/* TX dropped frames */
+	u64 tx_offload_bytes;		/* offload */
+	u64 tx_offload_frames;
+
+	/*
+	 * RX statistics.
+	 */
+	u64 rx_bcast_bytes;		/* broadcast */
+	u64 rx_bcast_frames;
+	u64 rx_mcast_bytes;		/* multicast */
+	u64 rx_mcast_frames;
+	u64 rx_ucast_bytes;
+	u64 rx_ucast_frames;		/* unicast */
+
+	u64 rx_err_frames;		/* RX error frames */
+};
+
+/*
+ * Per-"port" (Virtual Interface) link configuration ...
+ */
+struct link_config {
+	unsigned int   supported;        /* link capabilities */
+	unsigned int   advertising;      /* advertised capabilities */
+	unsigned short requested_speed;  /* speed user has requested */
+	unsigned short speed;            /* actual link speed */
+	unsigned char  requested_fc;     /* flow control user has requested */
+	unsigned char  fc;               /* actual link flow control */
+	unsigned char  autoneg;          /* autonegotiating? */
+	unsigned char  link_ok;          /* link up? */
+};
+
+enum {
+	PAUSE_RX      = 1 << 0,
+	PAUSE_TX      = 1 << 1,
+	PAUSE_AUTONEG = 1 << 2
+};
+
+/*
+ * General device parameters ...
+ */
+struct dev_params {
+	u32 fwrev;			/* firmware version */
+	u32 tprev;			/* TP Microcode Version */
+};
+
+/*
+ * Scatter Gather Engine parameters.  These are almost all determined by the
+ * Physical Function Driver.  We just need to grab them to see within which
+ * environment we're playing ...
+ */
+struct sge_params {
+	u32 sge_control;		/* padding, boundaries, lengths, etc. */
+	u32 sge_host_page_size;		/* RDMA page sizes */
+	u32 sge_queues_per_page;	/* RDMA queues/page */
+	u32 sge_user_mode_limits;	/* limits for BAR2 user mode accesses */
+	u32 sge_fl_buffer_size[16];	/* free list buffer sizes */
+	u32 sge_ingress_rx_threshold;	/* RX counter interrupt threshold[4] */
+	u32 sge_timer_value_0_and_1;	/* interrupt coalescing timer values */
+	u32 sge_timer_value_2_and_3;
+	u32 sge_timer_value_4_and_5;
+};
+
+/*
+ * Vital Product Data parameters.
+ */
+struct vpd_params {
+	u32 cclk;			/* Core Clock (KHz) */
+};
+
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+	unsigned int mode;		/* RSS mode */
+	union {
+	    struct {
+		int synmapen:1;		/* SYN Map Enable */
+		int syn4tupenipv6:1;	/* enable hashing 4-tuple IPv6 SYNs */
+		int syn2tupenipv6:1;	/* enable hashing 2-tuple IPv6 SYNs */
+		int syn4tupenipv4:1;	/* enable hashing 4-tuple IPv4 SYNs */
+		int syn2tupenipv4:1;	/* enable hashing 2-tuple IPv4 SYNs */
+		int ofdmapen:1;		/* Offload Map Enable */
+		int tnlmapen:1;		/* Tunnel Map Enable */
+		int tnlalllookup:1;	/* Tunnel All Lookup */
+		int hashtoeplitz:1;	/* use Toeplitz hash */
+	    } basicvirtual;
+	} u;
+};
+
+/*
+ * Virtual Interface RSS Configuration in host-native format.
+ */
+union rss_vi_config {
+    struct {
+	u16 defaultq;			/* Ingress Queue ID for !tnlalllookup */
+	int ip6fourtupen:1;		/* hash 4-tuple IPv6 ingress packets */
+	int ip6twotupen:1;		/* hash 2-tuple IPv6 ingress packets */
+	int ip4fourtupen:1;		/* hash 4-tuple IPv4 ingress packets */
+	int ip4twotupen:1;		/* hash 2-tuple IPv4 ingress packets */
+	int udpen;			/* hash 4-tuple UDP ingress packets */
+    } basicvirtual;
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+	unsigned int nvi;		/* N virtual interfaces */
+	unsigned int neq;		/* N egress Qs */
+	unsigned int nethctrl;		/* N egress ETH or CTRL Qs */
+	unsigned int niqflint;		/* N ingress Qs/w free list(s) & intr */
+	unsigned int niq;		/* N ingress Qs */
+	unsigned int tc;		/* PCI-E traffic class */
+	unsigned int pmask;		/* port access rights mask */
+	unsigned int nexactf;		/* N exact MPS filters */
+	unsigned int r_caps;		/* read capabilities */
+	unsigned int wx_caps;		/* write/execute capabilities */
+};
+
+/*
+ * Per-"adapter" (Virtual Function) parameters.
+ */
+struct adapter_params {
+	struct dev_params dev;		/* general device parameters */
+	struct sge_params sge;		/* Scatter Gather Engine */
+	struct vpd_params vpd;		/* Vital Product Data */
+	struct rss_params rss;		/* Receive Side Scaling */
+	struct vf_resources vfres;	/* Virtual Function Resource limits */
+	u8 nports;			/* # of Ethernet "ports" */
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; iter++)
+
+static inline bool is_10g_port(const struct link_config *lc)
+{
+	return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
+{
+	return adapter->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adapter,
+					    unsigned int us)
+{
+	return (us * adapter->params.vpd.cclk) / 1000;
+}
+
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+					    unsigned int ticks)
+{
+	return (ticks * 1000) / adapter->params.vpd.cclk;
+}
+
+int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
+
+static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
+			       int size, void *rpl)
+{
+	return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
+}
+
+static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
+				  int size, void *rpl)
+{
+	return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
+}
+
+int __devinit t4vf_wait_dev_ready(struct adapter *);
+int __devinit t4vf_port_init(struct adapter *, int);
+
+int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
+int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
+
+int t4vf_get_sge_params(struct adapter *);
+int t4vf_get_vpd_params(struct adapter *);
+int t4vf_get_dev_params(struct adapter *);
+int t4vf_get_rss_glb_config(struct adapter *);
+int t4vf_get_vfres(struct adapter *);
+
+int t4vf_read_rss_vi_config(struct adapter *, unsigned int,
+			    union rss_vi_config *);
+int t4vf_write_rss_vi_config(struct adapter *, unsigned int,
+			     union rss_vi_config *);
+int t4vf_config_rss_range(struct adapter *, unsigned int, int, int,
+			  const u16 *, int);
+
+int t4vf_alloc_vi(struct adapter *, int);
+int t4vf_free_vi(struct adapter *, int);
+int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool);
+int t4vf_identify_port(struct adapter *, unsigned int, unsigned int);
+
+int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int,
+		    bool);
+int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int,
+			const u8 **, u16 *, u64 *, bool);
+int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool);
+int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool);
+int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *);
+
+int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int,
+		 unsigned int);
+int t4vf_eth_eq_free(struct adapter *, unsigned int);
+
+int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
+
+#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/cxgb4vf/t4vf_defs.h b/drivers/net/cxgb4vf/t4vf_defs.h
new file mode 100644
index 0000000..c7b127d
--- /dev/null
+++ b/drivers/net/cxgb4vf/t4vf_defs.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4VF_DEFS_H__
+#define __T4VF_DEFS_H__
+
+#include "../cxgb4/t4_regs.h"
+
+/*
+ * The VF Register Map.
+ *
+ * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local
+ * bus module (PL) and CPU Interface Module (CIM) components are mapped via
+ * the Slice to Module Map Table (see below) in the Physical Function Register
+ * Map.  The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base
+ * and Offset registers in the PF Register Map.  The MBDATA base address is
+ * quite constrained as it determines the Mailbox Data addresses for both PFs
+ * and VFs, and therefore must fit in both the VF and PF Register Maps without
+ * overlapping other registers.
+ */
+#define T4VF_SGE_BASE_ADDR	0x0000
+#define T4VF_MPS_BASE_ADDR	0x0100
+#define T4VF_PL_BASE_ADDR	0x0200
+#define T4VF_MBDATA_BASE_ADDR	0x0240
+#define T4VF_CIM_BASE_ADDR	0x0300
+
+#define T4VF_REGMAP_START	0x0000
+#define T4VF_REGMAP_SIZE	0x0400
+
+/*
+ * There's no hardware limitation which requires that the addresses of the
+ * Mailbox Data in the fixed CIM PF map and the programmable VF map must
+ * match.  However, it's a useful convention ...
+ */
+#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
+#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
+#endif
+
+/*
+ * Virtual Function "Slice to Module Map Table" definitions.
+ *
+ * This table allows us to map subsets of the various module register sets
+ * into the T4VF Register Map.  Each table entry identifies the index of the
+ * module whose registers are being mapped, the offset within the module's
+ * register set that the mapping should start at, the limit of the mapping,
+ * and the offset within the T4VF Register Map to which the module's registers
+ * are being mapped.  All addresses and qualtities are in terms of 32-bit
+ * words.  The "limit" value is also in terms of 32-bit words and is equal to
+ * the last address mapped in the T4VF Register Map 1 (i.e. it's a "<="
+ * relation rather than a "<").
+ */
+#define T4VF_MOD_MAP(module, index, first, last) \
+	T4VF_MOD_MAP_##module##_INDEX  = (index), \
+	T4VF_MOD_MAP_##module##_FIRST  = (first), \
+	T4VF_MOD_MAP_##module##_LAST   = (last), \
+	T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \
+	T4VF_MOD_MAP_##module##_BASE = \
+		(T4VF_##module##_BASE_ADDR/4 + (first)/4), \
+	T4VF_MOD_MAP_##module##_LIMIT = \
+		(T4VF_##module##_BASE_ADDR/4 + (last)/4),
+
+#define SGE_VF_KDOORBELL 0x0
+#define SGE_VF_GTS 0x4
+#define MPS_VF_CTL 0x0
+#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
+#define PL_VF_WHOAMI 0x0
+#define CIM_VF_EXT_MAILBOX_CTRL 0x0
+#define CIM_VF_EXT_MAILBOX_STATUS 0x4
+
+enum {
+    T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS)
+    T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H)
+    T4VF_MOD_MAP(PL,  3, PL_VF_WHOAMI, PL_VF_WHOAMI)
+    T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS)
+};
+
+/*
+ * There isn't a Slice to Module Map Table entry for the Mailbox Data
+ * registers, but it's convenient to use similar names as above.  There are 8
+ * little-endian 64-bit Mailbox Data registers.  Note that the "instances"
+ * value below is in terms of 32-bit words which matches the "word" addressing
+ * space we use above for the Slice to Module Map Space.
+ */
+#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16
+
+#define T4VF_MBDATA_FIRST	0
+#define T4VF_MBDATA_LAST	((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4)
+
+#endif /* __T4T4VF_DEFS_H__ */
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
new file mode 100644
index 0000000..1ef2528
--- /dev/null
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -0,0 +1,1333 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/version.h>
+#include <linux/pci.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4fw_api.h"
+
+/*
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's).  Return an error if it doesn't
+ * become ready ...
+ */
+int __devinit t4vf_wait_dev_ready(struct adapter *adapter)
+{
+	const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
+	const u32 notready1 = 0xffffffff;
+	const u32 notready2 = 0xeeeeeeee;
+	u32 val;
+
+	val = t4_read_reg(adapter, whoami);
+	if (val != notready1 && val != notready2)
+		return 0;
+	msleep(500);
+	val = t4_read_reg(adapter, whoami);
+	if (val != notready1 && val != notready2)
+		return 0;
+	else
+		return -EIO;
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order
+ * (since the firmware data structures are specified in a big-endian layout).
+ */
+static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
+			 u32 mbox_data)
+{
+	for ( ; size; size -= 8, mbox_data += 8)
+		*rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
+}
+
+/*
+ * Dump contents of mailbox with a leading tag.
+ */
+static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
+{
+	dev_err(adapter->pdev_dev,
+		"mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
+		(unsigned long long)t4_read_reg64(adapter, mbox_data +  0),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data +  8),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
+}
+
+/**
+ *	t4vf_wr_mbox_core - send a command to FW through the mailbox
+ *	@adapter: the adapter
+ *	@cmd: the command to write
+ *	@size: command length in bytes
+ *	@rpl: where to optionally store the reply
+ *	@sleep_ok: if true we may sleep while awaiting command completion
+ *
+ *	Sends the given command to FW through the mailbox and waits for the
+ *	FW to execute the command.  If @rpl is not %NULL it is used to store
+ *	the FW's reply to the command.  The command and its optional reply
+ *	are of the same length.  FW can take up to 500 ms to respond.
+ *	@sleep_ok determines whether we may sleep while awaiting the response.
+ *	If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ *	The return value is 0 on success or a negative errno on failure.  A
+ *	failure can happen either because we are not able to execute the
+ *	command or FW executes it but signals an error.  In the latter case
+ *	the return value is the error code indicated by FW (negated).
+ */
+int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
+		      void *rpl, bool sleep_ok)
+{
+	static int delay[] = {
+		1, 1, 3, 5, 10, 10, 20, 50, 100
+	};
+
+	u32 v;
+	int i, ms, delay_idx;
+	const __be64 *p;
+	u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
+	u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+
+	/*
+	 * Commands must be multiples of 16 bytes in length and may not be
+	 * larger than the size of the Mailbox Data register array.
+	 */
+	if ((size % 16) != 0 ||
+	    size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+		return -EINVAL;
+
+	/*
+	 * Loop trying to get ownership of the mailbox.  Return an error
+	 * if we can't gain ownership.
+	 */
+	v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
+		v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+	if (v != MBOX_OWNER_DRV)
+		return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
+
+	/*
+	 * Write the command array into the Mailbox Data register array and
+	 * transfer ownership of the mailbox to the firmware.
+	 */
+	for (i = 0, p = cmd; i < size; i += 8)
+		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+	t4_write_reg(adapter, mbox_ctl,
+		     MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+	t4_read_reg(adapter, mbox_ctl);          /* flush write */
+
+	/*
+	 * Spin waiting for firmware to acknowledge processing our command.
+	 */
+	delay_idx = 0;
+	ms = delay[0];
+
+	for (i = 0; i < 500; i += ms) {
+		if (sleep_ok) {
+			ms = delay[delay_idx];
+			if (delay_idx < ARRAY_SIZE(delay))
+				delay_idx++;
+			msleep(ms);
+		} else
+			mdelay(ms);
+
+		/*
+		 * If we're the owner, see if this is the reply we wanted.
+		 */
+		v = t4_read_reg(adapter, mbox_ctl);
+		if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+			/*
+			 * If the Message Valid bit isn't on, revoke ownership
+			 * of the mailbox and continue waiting for our reply.
+			 */
+			if ((v & MBMSGVALID) == 0) {
+				t4_write_reg(adapter, mbox_ctl,
+					     MBOWNER(MBOX_OWNER_NONE));
+				continue;
+			}
+
+			/*
+			 * We now have our reply.  Extract the command return
+			 * value, copy the reply back to our caller's buffer
+			 * (if specified) and revoke ownership of the mailbox.
+			 * We return the (negated) firmware command return
+			 * code (this depends on FW_SUCCESS == 0).
+			 */
+
+			/* return value in low-order little-endian word */
+			v = t4_read_reg(adapter, mbox_data);
+			if (FW_CMD_RETVAL_GET(v))
+				dump_mbox(adapter, "FW Error", mbox_data);
+
+			if (rpl) {
+				/* request bit in high-order BE word */
+				WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+					 & FW_CMD_REQUEST) == 0);
+				get_mbox_rpl(adapter, rpl, size, mbox_data);
+				WARN_ON((be32_to_cpu(*(u32 *)rpl)
+					 & FW_CMD_REQUEST) != 0);
+			}
+			t4_write_reg(adapter, mbox_ctl,
+				     MBOWNER(MBOX_OWNER_NONE));
+			return -FW_CMD_RETVAL_GET(v);
+		}
+	}
+
+	/*
+	 * We timed out.  Return the error ...
+	 */
+	dump_mbox(adapter, "FW Timeout", mbox_data);
+	return -ETIMEDOUT;
+}
+
+/**
+ *	hash_mac_addr - return the hash value of a MAC address
+ *	@addr: the 48-bit Ethernet MAC address
+ *
+ *	Hashes a MAC address according to the hash function used by hardware
+ *	inexact (hash) address matching.
+ */
+static int hash_mac_addr(const u8 *addr)
+{
+	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+	a ^= b;
+	a ^= (a >> 12);
+	a ^= (a >> 6);
+	return a & 0x3f;
+}
+
+/**
+ *	init_link_config - initialize a link's SW state
+ *	@lc: structure holding the link state
+ *	@caps: link capabilities
+ *
+ *	Initializes the SW state maintained for each link, including the link's
+ *	capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void __devinit init_link_config(struct link_config *lc,
+				       unsigned int caps)
+{
+	lc->supported = caps;
+	lc->requested_speed = 0;
+	lc->speed = 0;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising = lc->supported;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+/**
+ *	t4vf_port_init - initialize port hardware/software state
+ *	@adapter: the adapter
+ *	@pidx: the adapter port index
+ */
+int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
+{
+	struct port_info *pi = adap2pinfo(adapter, pidx);
+	struct fw_vi_cmd vi_cmd, vi_rpl;
+	struct fw_port_cmd port_cmd, port_rpl;
+	int v;
+	u32 word;
+
+	/*
+	 * Execute a VI Read command to get our Virtual Interface information
+	 * like MAC address, etc.
+	 */
+	memset(&vi_cmd, 0, sizeof(vi_cmd));
+	vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
+				       FW_CMD_REQUEST |
+				       FW_CMD_READ);
+	vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
+	vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid));
+	v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
+	if (v)
+		return v;
+
+	BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd));
+	pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd));
+	t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
+
+	/*
+	 * If we don't have read access to our port information, we're done
+	 * now.  Otherwise, execute a PORT Read command to get it ...
+	 */
+	if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
+		return 0;
+
+	memset(&port_cmd, 0, sizeof(port_cmd));
+	port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) |
+					    FW_CMD_REQUEST |
+					    FW_CMD_READ |
+					    FW_PORT_CMD_PORTID(pi->port_id));
+	port_cmd.action_to_len16 =
+		cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+			    FW_LEN16(port_cmd));
+	v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
+	if (v)
+		return v;
+
+	v = 0;
+	word = be16_to_cpu(port_rpl.u.info.pcap);
+	if (word & FW_PORT_CAP_SPEED_100M)
+		v |= SUPPORTED_100baseT_Full;
+	if (word & FW_PORT_CAP_SPEED_1G)
+		v |= SUPPORTED_1000baseT_Full;
+	if (word & FW_PORT_CAP_SPEED_10G)
+		v |= SUPPORTED_10000baseT_Full;
+	if (word & FW_PORT_CAP_ANEG)
+		v |= SUPPORTED_Autoneg;
+	init_link_config(&pi->link_cfg, v);
+
+	return 0;
+}
+
+/**
+ *	t4vf_query_params - query FW or device parameters
+ *	@adapter: the adapter
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@vals: the parameter values
+ *
+ *	Reads the values of firmware or device parameters.  Up to 7 parameters
+ *	can be queried at once.
+ */
+int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+		      const u32 *params, u32 *vals)
+{
+	int i, ret;
+	struct fw_params_cmd cmd, rpl;
+	struct fw_params_param *p;
+	size_t len16;
+
+	if (nparams > 7)
+		return -EINVAL;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
+				    FW_CMD_REQUEST |
+				    FW_CMD_READ);
+	len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+				      param[nparams].mnem), 16);
+	cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+	for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
+		p->mnem = htonl(*params++);
+
+	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (ret == 0)
+		for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
+			*vals++ = be32_to_cpu(p->val);
+	return ret;
+}
+
+/**
+ *	t4vf_set_params - sets FW or device parameters
+ *	@adapter: the adapter
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@vals: the parameter values
+ *
+ *	Sets the values of firmware or device parameters.  Up to 7 parameters
+ *	can be specified at once.
+ */
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+		    const u32 *params, const u32 *vals)
+{
+	int i;
+	struct fw_params_cmd cmd;
+	struct fw_params_param *p;
+	size_t len16;
+
+	if (nparams > 7)
+		return -EINVAL;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
+				    FW_CMD_REQUEST |
+				    FW_CMD_WRITE);
+	len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+				      param[nparams]), 16);
+	cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+	for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
+		p->mnem = cpu_to_be32(*params++);
+		p->val = cpu_to_be32(*vals++);
+	}
+
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
+ *	@adapter: the adapter
+ *
+ *	Retrieves various core SGE parameters in the form of hardware SGE
+ *	register values.  The caller is responsible for decoding these as
+ *	needed.  The SGE parameters are stored in @adapter->params.sge.
+ */
+int t4vf_get_sge_params(struct adapter *adapter)
+{
+	struct sge_params *sge_params = &adapter->params.sge;
+	u32 params[7], vals[7];
+	int v;
+
+	params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ(SGE_CONTROL));
+	params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE));
+	params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0));
+	params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1));
+	params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1));
+	params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3));
+	params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5));
+	v = t4vf_query_params(adapter, 7, params, vals);
+	if (v)
+		return v;
+	sge_params->sge_control = vals[0];
+	sge_params->sge_host_page_size = vals[1];
+	sge_params->sge_fl_buffer_size[0] = vals[2];
+	sge_params->sge_fl_buffer_size[1] = vals[3];
+	sge_params->sge_timer_value_0_and_1 = vals[4];
+	sge_params->sge_timer_value_2_and_3 = vals[5];
+	sge_params->sge_timer_value_4_and_5 = vals[6];
+
+	params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
+	v = t4vf_query_params(adapter, 1, params, vals);
+	if (v)
+		return v;
+	sge_params->sge_ingress_rx_threshold = vals[0];
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_vpd_params - retrieve device VPD paremeters
+ *	@adapter: the adapter
+ *
+ *	Retrives various device Vital Product Data parameters.  The parameters
+ *	are stored in @adapter->params.vpd.
+ */
+int t4vf_get_vpd_params(struct adapter *adapter)
+{
+	struct vpd_params *vpd_params = &adapter->params.vpd;
+	u32 params[7], vals[7];
+	int v;
+
+	params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+		     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+	v = t4vf_query_params(adapter, 1, params, vals);
+	if (v)
+		return v;
+	vpd_params->cclk = vals[0];
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_dev_params - retrieve device paremeters
+ *	@adapter: the adapter
+ *
+ *	Retrives various device parameters.  The parameters are stored in
+ *	@adapter->params.dev.
+ */
+int t4vf_get_dev_params(struct adapter *adapter)
+{
+	struct dev_params *dev_params = &adapter->params.dev;
+	u32 params[7], vals[7];
+	int v;
+
+	params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+		     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
+	params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+		     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+	v = t4vf_query_params(adapter, 2, params, vals);
+	if (v)
+		return v;
+	dev_params->fwrev = vals[0];
+	dev_params->tprev = vals[1];
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ *	@adapter: the adapter
+ *
+ *	Retrieves global RSS mode and parameters with which we have to live
+ *	and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+	struct rss_params *rss = &adapter->params.rss;
+	struct fw_rss_glb_config_cmd cmd, rpl;
+	int v;
+
+	/*
+	 * Execute an RSS Global Configuration read command to retrieve
+	 * our RSS configuration.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+				      FW_CMD_REQUEST |
+				      FW_CMD_READ);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v)
+		return v;
+
+	/*
+	 * Transate the big-endian RSS Global Configuration into our
+	 * cpu-endian format based on the RSS mode.  We also do first level
+	 * filtering at this point to weed out modes which don't support
+	 * VF Drivers ...
+	 */
+	rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET(
+			be32_to_cpu(rpl.u.manual.mode_pkd));
+	switch (rss->mode) {
+	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+		u32 word = be32_to_cpu(
+				rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+		rss->u.basicvirtual.synmapen =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+		rss->u.basicvirtual.syn4tupenipv6 =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+		rss->u.basicvirtual.syn2tupenipv6 =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+		rss->u.basicvirtual.syn4tupenipv4 =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+		rss->u.basicvirtual.syn2tupenipv4 =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+
+		rss->u.basicvirtual.ofdmapen =
+			((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+
+		rss->u.basicvirtual.tnlmapen =
+			((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+		rss->u.basicvirtual.tnlalllookup =
+			((word  & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+
+		rss->u.basicvirtual.hashtoeplitz =
+			((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+
+		/* we need at least Tunnel Map Enable to be set */
+		if (!rss->u.basicvirtual.tnlmapen)
+			return -EINVAL;
+		break;
+	}
+
+	default:
+		/* all unknown/unsupported RSS modes result in an error */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_vfres - retrieve VF resource limits
+ *	@adapter: the adapter
+ *
+ *	Retrieves configured resource limits and capabilities for a virtual
+ *	function.  The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+	struct vf_resources *vfres = &adapter->params.vfres;
+	struct fw_pfvf_cmd cmd, rpl;
+	int v;
+	u32 word;
+
+	/*
+	 * Execute PFVF Read command to get VF resource limits; bail out early
+	 * with error on command failure.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) |
+				    FW_CMD_REQUEST |
+				    FW_CMD_READ);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v)
+		return v;
+
+	/*
+	 * Extract VF resource limits and return success.
+	 */
+	word = be32_to_cpu(rpl.niqflint_niq);
+	vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word);
+	vfres->niq = FW_PFVF_CMD_NIQ_GET(word);
+
+	word = be32_to_cpu(rpl.type_to_neq);
+	vfres->neq = FW_PFVF_CMD_NEQ_GET(word);
+	vfres->pmask = FW_PFVF_CMD_PMASK_GET(word);
+
+	word = be32_to_cpu(rpl.tc_to_nexactf);
+	vfres->tc = FW_PFVF_CMD_TC_GET(word);
+	vfres->nvi = FW_PFVF_CMD_NVI_GET(word);
+	vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word);
+
+	word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+	vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word);
+	vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word);
+	vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word);
+
+	return 0;
+}
+
+/**
+ *	t4vf_read_rss_vi_config - read a VI's RSS configuration
+ *	@adapter: the adapter
+ *	@viid: Virtual Interface ID
+ *	@config: pointer to host-native VI RSS Configuration buffer
+ *
+ *	Reads the Virtual Interface's RSS configuration information and
+ *	translates it into CPU-native format.
+ */
+int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
+			    union rss_vi_config *config)
+{
+	struct fw_rss_vi_config_cmd cmd, rpl;
+	int v;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+				     FW_CMD_REQUEST |
+				     FW_CMD_READ |
+				     FW_RSS_VI_CONFIG_CMD_VIID(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v)
+		return v;
+
+	switch (adapter->params.rss.mode) {
+	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+		u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
+
+		config->basicvirtual.ip6fourtupen =
+			((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0);
+		config->basicvirtual.ip6twotupen =
+			((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0);
+		config->basicvirtual.ip4fourtupen =
+			((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0);
+		config->basicvirtual.ip4twotupen =
+			((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0);
+		config->basicvirtual.udpen =
+			((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0);
+		config->basicvirtual.defaultq =
+			FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word);
+		break;
+	}
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ *	t4vf_write_rss_vi_config - write a VI's RSS configuration
+ *	@adapter: the adapter
+ *	@viid: Virtual Interface ID
+ *	@config: pointer to host-native VI RSS Configuration buffer
+ *
+ *	Write the Virtual Interface's RSS configuration information
+ *	(translating it into firmware-native format before writing).
+ */
+int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
+			     union rss_vi_config *config)
+{
+	struct fw_rss_vi_config_cmd cmd, rpl;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+				     FW_CMD_REQUEST |
+				     FW_CMD_WRITE |
+				     FW_RSS_VI_CONFIG_CMD_VIID(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	switch (adapter->params.rss.mode) {
+	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+		u32 word = 0;
+
+		if (config->basicvirtual.ip6fourtupen)
+			word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+		if (config->basicvirtual.ip6twotupen)
+			word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+		if (config->basicvirtual.ip4fourtupen)
+			word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+		if (config->basicvirtual.ip4twotupen)
+			word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+		if (config->basicvirtual.udpen)
+			word |= FW_RSS_VI_CONFIG_CMD_UDPEN;
+		word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ(
+				config->basicvirtual.defaultq);
+		cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
+		break;
+	}
+
+	default:
+		return -EINVAL;
+	}
+
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+}
+
+/**
+ *	t4vf_config_rss_range - configure a portion of the RSS mapping table
+ *	@adapter: the adapter
+ *	@viid: Virtual Interface of RSS Table Slice
+ *	@start: starting entry in the table to write
+ *	@n: how many table entries to write
+ *	@rspq: values for the "Response Queue" (Ingress Queue) lookup table
+ *	@nrspq: number of values in @rspq
+ *
+ *	Programs the selected part of the VI's RSS mapping table with the
+ *	provided values.  If @nrspq < @n the supplied values are used repeatedly
+ *	until the full table range is populated.
+ *
+ *	The caller must ensure the values in @rspq are in the range 0..1023.
+ */
+int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
+			  int start, int n, const u16 *rspq, int nrspq)
+{
+	const u16 *rsp = rspq;
+	const u16 *rsp_end = rspq+nrspq;
+	struct fw_rss_ind_tbl_cmd cmd;
+
+	/*
+	 * Initialize firmware command template to write the RSS table.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+				     FW_CMD_REQUEST |
+				     FW_CMD_WRITE |
+				     FW_RSS_IND_TBL_CMD_VIID(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+	/*
+	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
+	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
+	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
+	 * reserved.
+	 */
+	while (n > 0) {
+		__be32 *qp = &cmd.iq0_to_iq2;
+		int nq = min(n, 32);
+		int ret;
+
+		/*
+		 * Set up the firmware RSS command header to send the next
+		 * "nq" Ingress Queue IDs to the firmware.
+		 */
+		cmd.niqid = cpu_to_be16(nq);
+		cmd.startidx = cpu_to_be16(start);
+
+		/*
+		 * "nq" more done for the start of the next loop.
+		 */
+		start += nq;
+		n -= nq;
+
+		/*
+		 * While there are still Ingress Queue IDs to stuff into the
+		 * current firmware RSS command, retrieve them from the
+		 * Ingress Queue ID array and insert them into the command.
+		 */
+		while (nq > 0) {
+			/*
+			 * Grab up to the next 3 Ingress Queue IDs (wrapping
+			 * around the Ingress Queue ID array if necessary) and
+			 * insert them into the firmware RSS command at the
+			 * current 3-tuple position within the commad.
+			 */
+			u16 qbuf[3];
+			u16 *qbp = qbuf;
+			int nqbuf = min(3, nq);
+
+			nq -= nqbuf;
+			qbuf[0] = qbuf[1] = qbuf[2] = 0;
+			while (nqbuf) {
+				nqbuf--;
+				*qbp++ = *rsp++;
+				if (rsp >= rsp_end)
+					rsp = rspq;
+			}
+			*qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
+					    FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
+					    FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
+		}
+
+		/*
+		 * Send this portion of the RRS table update to the firmware;
+		 * bail out on any errors.
+		 */
+		ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/**
+ *	t4vf_alloc_vi - allocate a virtual interface on a port
+ *	@adapter: the adapter
+ *	@port_id: physical port associated with the VI
+ *
+ *	Allocate a new Virtual Interface and bind it to the indicated
+ *	physical port.  Return the new Virtual Interface Identifier on
+ *	success, or a [negative] error number on failure.
+ */
+int t4vf_alloc_vi(struct adapter *adapter, int port_id)
+{
+	struct fw_vi_cmd cmd, rpl;
+	int v;
+
+	/*
+	 * Execute a VI command to allocate Virtual Interface and return its
+	 * VIID.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
+				    FW_CMD_REQUEST |
+				    FW_CMD_WRITE |
+				    FW_CMD_EXEC);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+					 FW_VI_CMD_ALLOC);
+	cmd.portid_pkd = FW_VI_CMD_PORTID(port_id);
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v)
+		return v;
+
+	return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid));
+}
+
+/**
+ *	t4vf_free_vi -- free a virtual interface
+ *	@adapter: the adapter
+ *	@viid: the virtual interface identifier
+ *
+ *	Free a previously allocated Virtual Interface.  Return an error on
+ *	failure.
+ */
+int t4vf_free_vi(struct adapter *adapter, int viid)
+{
+	struct fw_vi_cmd cmd;
+
+	/*
+	 * Execute a VI command to free the Virtual Interface.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
+				    FW_CMD_REQUEST |
+				    FW_CMD_EXEC);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+					 FW_VI_CMD_FREE);
+	cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid));
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_enable_vi - enable/disable a virtual interface
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface ID
+ *	@rx_en: 1=enable Rx, 0=disable Rx
+ *	@tx_en: 1=enable Tx, 0=disable Tx
+ *
+ *	Enables/disables a virtual interface.
+ */
+int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
+		   bool rx_en, bool tx_en)
+{
+	struct fw_vi_enable_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
+				     FW_CMD_REQUEST |
+				     FW_CMD_EXEC |
+				     FW_VI_ENABLE_CMD_VIID(viid));
+	cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) |
+				       FW_VI_ENABLE_CMD_EEN(tx_en) |
+				       FW_LEN16(cmd));
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_identify_port - identify a VI's port by blinking its LED
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface ID
+ *	@nblinks: how many times to blink LED at 2.5 Hz
+ *
+ *	Identifies a VI's port by blinking its LED.
+ */
+int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
+		       unsigned int nblinks)
+{
+	struct fw_vi_enable_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
+				     FW_CMD_REQUEST |
+				     FW_CMD_EXEC |
+				     FW_VI_ENABLE_CMD_VIID(viid));
+	cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED |
+				       FW_LEN16(cmd));
+	cmd.blinkdur = cpu_to_be16(nblinks);
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_set_rxmode - set Rx properties of a virtual interface
+ *	@adapter: the adapter
+ *	@viid: the VI id
+ *	@mtu: the new MTU or -1 for no change
+ *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ *	@vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
+ *		-1 no change
+ *
+ *	Sets Rx properties of a virtual interface.
+ */
+int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
+		    int mtu, int promisc, int all_multi, int bcast, int vlanex,
+		    bool sleep_ok)
+{
+	struct fw_vi_rxmode_cmd cmd;
+
+	/* convert to FW values */
+	if (mtu < 0)
+		mtu = FW_VI_RXMODE_CMD_MTU_MASK;
+	if (promisc < 0)
+		promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+	if (all_multi < 0)
+		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+	if (bcast < 0)
+		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+	if (vlanex < 0)
+		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) |
+				     FW_CMD_REQUEST |
+				     FW_CMD_WRITE |
+				     FW_VI_RXMODE_CMD_VIID(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	cmd.mtu_to_vlanexen =
+		cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) |
+			    FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+			    FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+			    FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+			    FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+	return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
+}
+
+/**
+ *	t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface Identifier
+ *	@free: if true any existing filters for this VI id are first removed
+ *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
+ *	@addr: the MAC address(es)
+ *	@idx: where to store the index of each allocated filter
+ *	@hash: pointer to hash address filter bitmap
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Allocates an exact-match filter for each of the supplied addresses and
+ *	sets it to the corresponding address.  If @idx is not %NULL it should
+ *	have at least @naddr entries, each of which will be set to the index of
+ *	the filter allocated for the corresponding MAC address.  If a filter
+ *	could not be allocated for an address its index is set to 0xffff.
+ *	If @hash is not %NULL addresses that fail to allocate an exact filter
+ *	are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ *	Returns a negative error number or the number of filters allocated.
+ */
+int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
+			unsigned int naddr, const u8 **addr, u16 *idx,
+			u64 *hash, bool sleep_ok)
+{
+	int i, ret;
+	struct fw_vi_mac_cmd cmd, rpl;
+	struct fw_vi_mac_exact *p;
+	size_t len16;
+
+	if (naddr > ARRAY_SIZE(cmd.u.exact))
+		return -EINVAL;
+	len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+				      u.exact[naddr]), 16);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+				     FW_CMD_REQUEST |
+				     FW_CMD_WRITE |
+				     (free ? FW_CMD_EXEC : 0) |
+				     FW_VI_MAC_CMD_VIID(viid));
+	cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+					    FW_CMD_LEN16(len16));
+
+	for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
+		p->valid_to_idx =
+			cpu_to_be16(FW_VI_MAC_CMD_VALID |
+				    FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+		memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
+	}
+
+	ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
+	if (ret)
+		return ret;
+
+	for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
+		u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
+
+		if (idx)
+			idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
+				  ? 0xffff
+				  : index);
+		if (index < FW_CLS_TCAM_NUM_ENTRIES)
+			ret++;
+		else if (hash)
+			*hash |= (1 << hash_mac_addr(addr[i]));
+	}
+	return ret;
+}
+
+/**
+ *	t4vf_change_mac - modifies the exact-match filter for a MAC address
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface ID
+ *	@idx: index of existing filter for old value of MAC address, or -1
+ *	@addr: the new MAC address value
+ *	@persist: if idx < 0, the new MAC allocation should be persistent
+ *
+ *	Modifies an exact-match filter and sets it to the new MAC address.
+ *	Note that in general it is not possible to modify the value of a given
+ *	filter so the generic way to modify an address filter is to free the
+ *	one being used by the old address value and allocate a new filter for
+ *	the new address value.  @idx can be -1 if the address is a new
+ *	addition.
+ *
+ *	Returns a negative error number or the index of the filter with the new
+ *	MAC value.
+ */
+int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
+		    int idx, const u8 *addr, bool persist)
+{
+	int ret;
+	struct fw_vi_mac_cmd cmd, rpl;
+	struct fw_vi_mac_exact *p = &cmd.u.exact[0];
+	size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+					     u.exact[1]), 16);
+
+	/*
+	 * If this is a new allocation, determine whether it should be
+	 * persistent (across a "freemacs" operation) or not.
+	 */
+	if (idx < 0)
+		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+				     FW_CMD_REQUEST |
+				     FW_CMD_WRITE |
+				     FW_VI_MAC_CMD_VIID(viid));
+	cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID |
+				      FW_VI_MAC_CMD_IDX(idx));
+	memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (ret == 0) {
+		p = &rpl.u.exact[0];
+		ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
+		if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
+			ret = -ENOMEM;
+	}
+	return ret;
+}
+
+/**
+ *	t4vf_set_addr_hash - program the MAC inexact-match hash filter
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface Identifier
+ *	@ucast: whether the hash filter should also match unicast addresses
+ *	@vec: the value to be written to the hash filter
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
+		       bool ucast, u64 vec, bool sleep_ok)
+{
+	struct fw_vi_mac_cmd cmd;
+	size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+					     u.exact[0]), 16);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+				     FW_CMD_REQUEST |
+				     FW_CMD_WRITE |
+				     FW_VI_ENABLE_CMD_VIID(viid));
+	cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN |
+					    FW_VI_MAC_CMD_HASHUNIEN(ucast) |
+					    FW_CMD_LEN16(len16));
+	cmd.u.hash.hashvec = cpu_to_be64(vec);
+	return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
+}
+
+/**
+ *	t4vf_get_port_stats - collect "port" statistics
+ *	@adapter: the adapter
+ *	@pidx: the port index
+ *	@s: the stats structure to fill
+ *
+ *	Collect statistics for the "port"'s Virtual Interface.
+ */
+int t4vf_get_port_stats(struct adapter *adapter, int pidx,
+			struct t4vf_port_stats *s)
+{
+	struct port_info *pi = adap2pinfo(adapter, pidx);
+	struct fw_vi_stats_vf fwstats;
+	unsigned int rem = VI_VF_NUM_STATS;
+	__be64 *fwsp = (__be64 *)&fwstats;
+
+	/*
+	 * Grab the Virtual Interface statistics a chunk at a time via mailbox
+	 * commands.  We could use a Work Request and get all of them at once
+	 * but that's an asynchronous interface which is awkward to use.
+	 */
+	while (rem) {
+		unsigned int ix = VI_VF_NUM_STATS - rem;
+		unsigned int nstats = min(6U, rem);
+		struct fw_vi_stats_cmd cmd, rpl;
+		size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
+			      sizeof(struct fw_vi_stats_ctl));
+		size_t len16 = DIV_ROUND_UP(len, 16);
+		int ret;
+
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) |
+					     FW_VI_STATS_CMD_VIID(pi->viid) |
+					     FW_CMD_REQUEST |
+					     FW_CMD_READ);
+		cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+		cmd.u.ctl.nstats_ix =
+			cpu_to_be16(FW_VI_STATS_CMD_IX(ix) |
+				    FW_VI_STATS_CMD_NSTATS(nstats));
+		ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
+		if (ret)
+			return ret;
+
+		memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
+
+		rem -= nstats;
+		fwsp += nstats;
+	}
+
+	/*
+	 * Translate firmware statistics into host native statistics.
+	 */
+	s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
+	s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
+	s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
+	s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
+	s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
+	s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
+	s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
+	s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
+	s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
+
+	s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
+	s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
+	s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
+	s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
+	s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
+	s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
+
+	s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
+
+	return 0;
+}
+
+/**
+ *	t4vf_iq_free - free an ingress queue and its free lists
+ *	@adapter: the adapter
+ *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ *	@iqid: ingress queue ID
+ *	@fl0id: FL0 queue ID or 0xffff if no attached FL0
+ *	@fl1id: FL1 queue ID or 0xffff if no attached FL1
+ *
+ *	Frees an ingress queue and its associated free lists, if any.
+ */
+int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
+		 unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
+{
+	struct fw_iq_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
+				    FW_CMD_REQUEST |
+				    FW_CMD_EXEC);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE |
+					 FW_LEN16(cmd));
+	cmd.type_to_iqandstindex =
+		cpu_to_be32(FW_IQ_CMD_TYPE(iqtype));
+
+	cmd.iqid = cpu_to_be16(iqid);
+	cmd.fl0id = cpu_to_be16(fl0id);
+	cmd.fl1id = cpu_to_be16(fl1id);
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_eth_eq_free - free an Ethernet egress queue
+ *	@adapter: the adapter
+ *	@eqid: egress queue ID
+ *
+ *	Frees an Ethernet egress queue.
+ */
+int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
+{
+	struct fw_eq_eth_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
+				    FW_CMD_REQUEST |
+				    FW_CMD_EXEC);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE |
+					 FW_LEN16(cmd));
+	cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid));
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_handle_fw_rpl - process a firmware reply message
+ *	@adapter: the adapter
+ *	@rpl: start of the firmware message
+ *
+ *	Processes a firmware message, such as link state change messages.
+ */
+int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
+{
+	struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl;
+	u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
+
+	switch (opcode) {
+	case FW_PORT_CMD: {
+		/*
+		 * Link/module state change message.
+		 */
+		const struct fw_port_cmd *port_cmd = (void *)rpl;
+		u32 word;
+		int action, port_id, link_ok, speed, fc, pidx;
+
+		/*
+		 * Extract various fields from port status change message.
+		 */
+		action = FW_PORT_CMD_ACTION_GET(
+			be32_to_cpu(port_cmd->action_to_len16));
+		if (action != FW_PORT_ACTION_GET_PORT_INFO) {
+			dev_err(adapter->pdev_dev,
+				"Unknown firmware PORT reply action %x\n",
+				action);
+			break;
+		}
+
+		port_id = FW_PORT_CMD_PORTID_GET(
+			be32_to_cpu(port_cmd->op_to_portid));
+
+		word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
+		link_ok = (word & FW_PORT_CMD_LSTATUS) != 0;
+		speed = 0;
+		fc = 0;
+		if (word & FW_PORT_CMD_RXPAUSE)
+			fc |= PAUSE_RX;
+		if (word & FW_PORT_CMD_TXPAUSE)
+			fc |= PAUSE_TX;
+		if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+			speed = SPEED_100;
+		else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+			speed = SPEED_1000;
+		else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+			speed = SPEED_10000;
+
+		/*
+		 * Scan all of our "ports" (Virtual Interfaces) looking for
+		 * those bound to the physical port which has changed.  If
+		 * our recorded state doesn't match the current state,
+		 * signal that change to the OS code.
+		 */
+		for_each_port(adapter, pidx) {
+			struct port_info *pi = adap2pinfo(adapter, pidx);
+			struct link_config *lc;
+
+			if (pi->port_id != port_id)
+				continue;
+
+			lc = &pi->link_cfg;
+			if (link_ok != lc->link_ok || speed != lc->speed ||
+			    fc != lc->fc) {
+				/* something changed */
+				lc->link_ok = link_ok;
+				lc->speed = speed;
+				lc->fc = fc;
+				t4vf_os_link_changed(adapter, pidx, link_ok);
+			}
+		}
+		break;
+	}
+
+	default:
+		dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
+			opcode);
+	}
+	return 0;
+}
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 08e82b1..25e14d2 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -470,7 +470,6 @@
 	u32 isr_count;
 	u8 rmii_en;
 	u8 version;
-	struct net_device_stats net_dev_stats;
 	u32 mac_hash1;
 	u32 mac_hash2;
 	u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
@@ -1180,16 +1179,17 @@
 				void **net_data_tokens,
 				int num_tokens, u32 ch)
 {
+	struct net_device *ndev = priv->ndev;
 	u32 cnt;
 
-	if (unlikely(num_tokens && netif_queue_stopped(priv->ndev)))
-		netif_start_queue(priv->ndev);
+	if (unlikely(num_tokens && netif_queue_stopped(dev)))
+		netif_start_queue(dev);
 	for (cnt = 0; cnt < num_tokens; cnt++) {
 		struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt];
 		if (skb == NULL)
 			continue;
-		priv->net_dev_stats.tx_packets++;
-		priv->net_dev_stats.tx_bytes += skb->len;
+		ndev->stats.tx_packets++;
+		ndev->stats.tx_bytes += skb->len;
 		dev_kfree_skb_any(skb);
 	}
 	return 0;
@@ -1476,7 +1476,7 @@
 					" err. Out of TX BD's");
 			netif_stop_queue(priv->ndev);
 		}
-		priv->net_dev_stats.tx_dropped++;
+		ndev->stats.tx_dropped++;
 		return NETDEV_TX_BUSY;
 	}
 
@@ -1501,7 +1501,7 @@
 	if (netif_msg_tx_err(priv))
 		dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
 
-	priv->net_dev_stats.tx_errors++;
+	ndev->stats.tx_errors++;
 	emac_int_disable(priv);
 	emac_stop_txch(priv, EMAC_DEF_TX_CH);
 	emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
@@ -1926,14 +1926,14 @@
 static int emac_net_rx_cb(struct emac_priv *priv,
 			  struct emac_netpktobj *net_pkt_list)
 {
-	struct sk_buff *p_skb;
-	p_skb = (struct sk_buff *)net_pkt_list->pkt_token;
+	struct net_device *ndev = priv->ndev;
+	struct sk_buff *p_skb = net_pkt_list->pkt_token;
 	/* set length of packet */
 	skb_put(p_skb, net_pkt_list->pkt_length);
 	p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
 	netif_receive_skb(p_skb);
-	priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length;
-	priv->net_dev_stats.rx_packets++;
+	ndev->stats.rx_bytes += net_pkt_list->pkt_length;
+	ndev->stats.rx_packets++;
 	return 0;
 }
 
@@ -2570,39 +2570,39 @@
 	else
 		stats_clear_mask = 0;
 
-	priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
+	ndev->stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
 	emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
 
-	priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) +
+	ndev->stats.collisions += (emac_read(EMAC_TXCOLLISION) +
 					   emac_read(EMAC_TXSINGLECOLL) +
 					   emac_read(EMAC_TXMULTICOLL));
 	emac_write(EMAC_TXCOLLISION, stats_clear_mask);
 	emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
 	emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
 
-	priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
+	ndev->stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
 						emac_read(EMAC_RXJABBER) +
 						emac_read(EMAC_RXUNDERSIZED));
 	emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
 	emac_write(EMAC_RXJABBER, stats_clear_mask);
 	emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
 
-	priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
+	ndev->stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
 					       emac_read(EMAC_RXMOFOVERRUNS));
 	emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
 	emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
 
-	priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
+	ndev->stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
 	emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
 
-	priv->net_dev_stats.tx_carrier_errors +=
+	ndev->stats.tx_carrier_errors +=
 		emac_read(EMAC_TXCARRIERSENSE);
 	emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
 
-	priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
+	ndev->stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
 	emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
 
-	return &priv->net_dev_stats;
+	return &ndev->stats;
 }
 
 static const struct net_device_ops emac_netdev_ops = {
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index abcc838..4fd6b2b 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -961,7 +961,7 @@
 	u8	RxPktReady;
 	u8	RxStatus;
 	__le16	RxLen;
-} __attribute__((__packed__));
+} __packed;
 
 /*
  *  Received a packet and pass to upper layer
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index f654db9..a4a0d2b 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 4dc02c7..307a72f4 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -359,6 +359,7 @@
 #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
 #define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE       0x00000008
 #define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG           0x00000080
 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
 #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
@@ -714,6 +715,7 @@
 #define BME1000_E_PHY_ID_R2  0x01410CB1
 #define I82577_E_PHY_ID      0x01540050
 #define I82578_E_PHY_ID      0x004DD040
+#define I82579_E_PHY_ID      0x01540090
 
 /* M88E1000 Specific Registers */
 #define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c0b3db4..9ee133f 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -164,6 +164,7 @@
 	board_ich9lan,
 	board_ich10lan,
 	board_pchlan,
+	board_pch2lan,
 };
 
 struct e1000_queue_stats {
@@ -421,6 +422,8 @@
 #define FLAG2_HAS_PHY_WAKEUP              (1 << 1)
 #define FLAG2_IS_DISCARDING               (1 << 2)
 #define FLAG2_DISABLE_ASPM_L1             (1 << 3)
+#define FLAG2_HAS_PHY_STATS               (1 << 4)
+#define FLAG2_HAS_EEE                     (1 << 5)
 
 #define E1000_RX_DESC_PS(R, i)	    \
 	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -458,7 +461,6 @@
 extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
 extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
 extern void e1000e_update_stats(struct e1000_adapter *adapter);
-extern bool e1000e_has_link(struct e1000_adapter *adapter);
 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
@@ -476,6 +478,7 @@
 extern struct e1000_info e1000_ich9_info;
 extern struct e1000_info e1000_ich10_info;
 extern struct e1000_info e1000_pch_info;
+extern struct e1000_info e1000_pch2_info;
 extern struct e1000_info e1000_es2_info;
 
 extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@ -494,6 +497,8 @@
 extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
 extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
 extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
 
 extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
 extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 38d79a6..45aebb4 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 2c52121..6355a1b 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -118,7 +118,6 @@
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	u32 status;
 
 	if (hw->phy.media_type == e1000_media_type_copper) {
 
@@ -156,22 +155,29 @@
 		ecmd->transceiver = XCVR_EXTERNAL;
 	}
 
-	status = er32(STATUS);
-	if (status & E1000_STATUS_LU) {
-		if (status & E1000_STATUS_SPEED_1000)
-			ecmd->speed = 1000;
-		else if (status & E1000_STATUS_SPEED_100)
-			ecmd->speed = 100;
-		else
-			ecmd->speed = 10;
+	ecmd->speed = -1;
+	ecmd->duplex = -1;
 
-		if (status & E1000_STATUS_FD)
-			ecmd->duplex = DUPLEX_FULL;
-		else
-			ecmd->duplex = DUPLEX_HALF;
+	if (netif_running(netdev)) {
+		if (netif_carrier_ok(netdev)) {
+			ecmd->speed = adapter->link_speed;
+			ecmd->duplex = adapter->link_duplex - 1;
+		}
 	} else {
-		ecmd->speed = -1;
-		ecmd->duplex = -1;
+		u32 status = er32(STATUS);
+		if (status & E1000_STATUS_LU) {
+			if (status & E1000_STATUS_SPEED_1000)
+				ecmd->speed = 1000;
+			else if (status & E1000_STATUS_SPEED_100)
+				ecmd->speed = 100;
+			else
+				ecmd->speed = 10;
+
+			if (status & E1000_STATUS_FD)
+				ecmd->duplex = DUPLEX_FULL;
+			else
+				ecmd->duplex = DUPLEX_HALF;
+		}
 	}
 
 	ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
@@ -179,7 +185,7 @@
 
 	/* MDI-X => 2; MDI =>1; Invalid =>0 */
 	if ((hw->phy.media_type == e1000_media_type_copper) &&
-	    !hw->mac.get_link_status)
+	    netif_carrier_ok(netdev))
 		ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
 		                                      ETH_TP_MDI;
 	else
@@ -191,19 +197,15 @@
 static u32 e1000_get_link(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_hw *hw = &adapter->hw;
 
 	/*
-	 * If the link is not reported up to netdev, interrupts are disabled,
-	 * and so the physical link state may have changed since we last
-	 * looked. Set get_link_status to make sure that the true link
-	 * state is interrogated, rather than pulling a cached and possibly
-	 * stale link state from the driver.
+	 * Avoid touching hardware registers when possible, otherwise
+	 * link negotiation can get messed up when user-level scripts
+	 * are rapidly polling the driver to see if link is up.
 	 */
-	if (!netif_carrier_ok(netdev))
-		mac->get_link_status = 1;
-
-	return e1000e_has_link(adapter);
+	return netif_running(netdev) ? netif_carrier_ok(netdev) :
+	    !!(er32(STATUS) & E1000_STATUS_LU);
 }
 
 static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
@@ -880,6 +882,7 @@
 	switch (mac->type) {
 	case e1000_ich10lan:
 	case e1000_pchlan:
+	case e1000_pch2lan:
 		mask |= (1 << 18);
 		break;
 	default:
@@ -1263,33 +1266,36 @@
 
 	hw->mac.autoneg = 0;
 
-	/* Workaround: K1 must be disabled for stable 1Gbps operation */
-	if (hw->mac.type == e1000_pchlan)
-		e1000_configure_k1_ich8lan(hw, false);
+	if (hw->phy.type == e1000_phy_ife) {
+		/* force 100, set loopback */
+		e1e_wphy(hw, PHY_CONTROL, 0x6100);
 
-	if (hw->phy.type == e1000_phy_m88) {
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg = er32(CTRL);
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+
+		ew32(CTRL, ctrl_reg);
+		udelay(500);
+
+		return 0;
+	}
+
+	/* Specific PHY configuration for loopback */
+	switch (hw->phy.type) {
+	case e1000_phy_m88:
 		/* Auto-MDI/MDIX Off */
 		e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
 		/* reset to update Auto-MDI/MDIX */
 		e1e_wphy(hw, PHY_CONTROL, 0x9140);
 		/* autoneg off */
 		e1e_wphy(hw, PHY_CONTROL, 0x8140);
-	} else if (hw->phy.type == e1000_phy_gg82563)
+		break;
+	case e1000_phy_gg82563:
 		e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
-
-	ctrl_reg = er32(CTRL);
-
-	switch (hw->phy.type) {
-	case e1000_phy_ife:
-		/* force 100, set loopback */
-		e1e_wphy(hw, PHY_CONTROL, 0x6100);
-
-		/* Now set up the MAC to the same speed/duplex as the PHY. */
-		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
-		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
-			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
-			     E1000_CTRL_SPD_100 |/* Force Speed to 100 */
-			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
 		break;
 	case e1000_phy_bm:
 		/* Set Default MAC Interface speed to 1GB */
@@ -1312,24 +1318,42 @@
 		/* Set Early Link Enable */
 		e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
 		e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
-		/* fall through */
+		break;
+	case e1000_phy_82577:
+	case e1000_phy_82578:
+		/* Workaround: K1 must be disabled for stable 1Gbps operation */
+		e1000_configure_k1_ich8lan(hw, false);
+		break;
+	case e1000_phy_82579:
+		/* Disable PHY energy detect power down */
+		e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
+		e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+		/* Disable full chip energy detect */
+		e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
+		e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
+		/* Enable loopback on the PHY */
+#define I82577_PHY_LBK_CTRL          19
+		e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
+		break;
 	default:
-		/* force 1000, set loopback */
-		e1e_wphy(hw, PHY_CONTROL, 0x4140);
-		mdelay(250);
-
-		/* Now set up the MAC to the same speed/duplex as the PHY. */
-		ctrl_reg = er32(CTRL);
-		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
-		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
-			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
-			     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
-			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
-
-		if (adapter->flags & FLAG_IS_ICH)
-			ctrl_reg |= E1000_CTRL_SLU;	/* Set Link Up */
+		break;
 	}
 
+	/* force 1000, set loopback */
+	e1e_wphy(hw, PHY_CONTROL, 0x4140);
+	mdelay(250);
+
+	/* Now set up the MAC to the same speed/duplex as the PHY. */
+	ctrl_reg = er32(CTRL);
+	ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+	ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+		     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+		     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+		     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+
+	if (adapter->flags & FLAG_IS_ICH)
+		ctrl_reg |= E1000_CTRL_SLU;	/* Set Link Up */
+
 	if (hw->phy.media_type == e1000_media_type_copper &&
 	    hw->phy.type == e1000_phy_m88) {
 		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
@@ -1868,6 +1892,7 @@
 
 	if ((hw->phy.type == e1000_phy_ife) ||
 	    (hw->mac.type == e1000_pchlan) ||
+	    (hw->mac.type == e1000_pch2lan) ||
 	    (hw->mac.type == e1000_82583) ||
 	    (hw->mac.type == e1000_82574)) {
 		INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
@@ -2026,7 +2051,6 @@
 	.get_coalesce		= e1000_get_coalesce,
 	.set_coalesce		= e1000_set_coalesce,
 	.get_flags		= ethtool_op_get_flags,
-	.set_flags		= ethtool_op_set_flags,
 };
 
 void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 5d1220d..0cd569a 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -217,7 +217,10 @@
 	E1000_SWSM      = 0x05B50, /* SW Semaphore */
 	E1000_FWSM      = 0x05B54, /* FW Semaphore */
 	E1000_SWSM2     = 0x05B58, /* Driver-only SW semaphore */
-	E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */
+	E1000_FFLT_DBG  = 0x05F04, /* Debug Register */
+	E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
+#define E1000_PCH_RAICC(_n)	(E1000_PCH_RAICC_BASE + ((_n) * 4))
+#define E1000_CRC_OFFSET	E1000_PCH_RAICC_BASE
 	E1000_HICR      = 0x08F00, /* Host Interface Control */
 };
 
@@ -303,13 +306,14 @@
 #define E1000_KMRNCTRLSTA_OFFSET	0x001F0000
 #define E1000_KMRNCTRLSTA_OFFSET_SHIFT	16
 #define E1000_KMRNCTRLSTA_REN		0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET	0x1    /* Kumeran Control */
 #define E1000_KMRNCTRLSTA_DIAG_OFFSET	0x3    /* Kumeran Diagnostic */
 #define E1000_KMRNCTRLSTA_TIMEOUTS	0x4    /* Kumeran Timeouts */
 #define E1000_KMRNCTRLSTA_INBAND_PARAM	0x9    /* Kumeran InBand Parameters */
 #define E1000_KMRNCTRLSTA_DIAG_NELPBK	0x1000 /* Nearend Loopback mode */
 #define E1000_KMRNCTRLSTA_K1_CONFIG	0x7
 #define E1000_KMRNCTRLSTA_K1_ENABLE	0x140E
-#define E1000_KMRNCTRLSTA_K1_DISABLE	0x1400
+#define E1000_KMRNCTRLSTA_HD_CTRL	0x0002
 
 #define IFE_PHY_EXTENDED_STATUS_CONTROL	0x10
 #define IFE_PHY_SPECIAL_CONTROL		0x11 /* 100BaseTx PHY Special Control */
@@ -387,6 +391,8 @@
 #define E1000_DEV_ID_PCH_M_HV_LC		0x10EB
 #define E1000_DEV_ID_PCH_D_HV_DM		0x10EF
 #define E1000_DEV_ID_PCH_D_HV_DC		0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM			0x1502
+#define E1000_DEV_ID_PCH2_LV_V			0x1503
 
 #define E1000_REVISION_4 4
 
@@ -406,6 +412,7 @@
 	e1000_ich9lan,
 	e1000_ich10lan,
 	e1000_pchlan,
+	e1000_pch2lan,
 };
 
 enum e1000_media_type {
@@ -442,6 +449,7 @@
 	e1000_phy_bm,
 	e1000_phy_82578,
 	e1000_phy_82577,
+	e1000_phy_82579,
 };
 
 enum e1000_bus_width {
@@ -929,6 +937,7 @@
 	bool kmrn_lock_loss_workaround_enabled;
 	struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
 	bool nvm_k1_enabled;
+	bool eee_disable;
 };
 
 struct e1000_hw {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index b2507d9..63930d1 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,8 @@
  * 82577LC Gigabit Network Connection
  * 82578DM Gigabit Network Connection
  * 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
  */
 
 #include "e1000.h"
@@ -126,6 +128,13 @@
 #define HV_SMB_ADDR_PEC_EN     0x0200
 #define HV_SMB_ADDR_VALID      0x0080
 
+/* PHY Power Management Control */
+#define HV_PM_CTRL		PHY_REG(770, 17)
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL			PHY_REG(772, 20)
+#define I82579_LPI_CTRL_ENABLE_MASK	0x6000
+
 /* Strapping Option Register - RO */
 #define E1000_STRAP                     0x0000C
 #define E1000_STRAP_SMBUS_ADDRESS_MASK  0x00FE0000
@@ -226,6 +235,8 @@
 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
 
 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 {
@@ -277,13 +288,13 @@
 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
+	/*
+	 * The MAC-PHY interconnect may still be in SMBus mode
+	 * after Sx->S0.  If the manageability engine (ME) is
+	 * disabled, then toggle the LANPHYPC Value bit to force
+	 * the interconnect to PCIe mode.
+	 */
 	if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
-		/*
-		 * The MAC-PHY interconnect may still be in SMBus mode
-		 * after Sx->S0.  Toggle the LANPHYPC Value bit to force
-		 * the interconnect to PCIe mode, but only if there is no
-		 * firmware present otherwise firmware will have done it.
-		 */
 		ctrl = er32(CTRL);
 		ctrl |=  E1000_CTRL_LANPHYPC_OVERRIDE;
 		ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -324,6 +335,7 @@
 
 	switch (phy->type) {
 	case e1000_phy_82577:
+	case e1000_phy_82579:
 		phy->ops.check_polarity = e1000_check_polarity_82577;
 		phy->ops.force_speed_duplex =
 			e1000_phy_force_speed_duplex_82577;
@@ -515,6 +527,8 @@
 	case e1000_ich8lan:
 	case e1000_ich9lan:
 	case e1000_ich10lan:
+		/* check management mode */
+		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
 		/* ID LED init */
 		mac->ops.id_led_init = e1000e_id_led_init;
 		/* setup LED */
@@ -526,6 +540,9 @@
 		mac->ops.led_off = e1000_led_off_ich8lan;
 		break;
 	case e1000_pchlan:
+	case e1000_pch2lan:
+		/* check management mode */
+		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
 		/* ID LED init */
 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
 		/* setup LED */
@@ -544,10 +561,47 @@
 	if (mac->type == e1000_ich8lan)
 		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 
+	/* Disable PHY configuration by hardware, config by software */
+	if (mac->type == e1000_pch2lan) {
+		u32 extcnf_ctrl = er32(EXTCNF_CTRL);
+
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
+	}
+
 	return 0;
 }
 
 /**
+ *  e1000_set_eee_pchlan - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *
+ *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
+ *  the LPI Control register will remain set only if/when link is up.
+ **/
+static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 phy_reg;
+
+	if (hw->phy.type != e1000_phy_82579)
+		goto out;
+
+	ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+	if (ret_val)
+		goto out;
+
+	if (hw->dev_spec.ich8lan.eee_disable)
+		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
+	else
+		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
+
+	ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+out:
+	return ret_val;
+}
+
+/**
  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
  *  @hw: pointer to the HW structure
  *
@@ -604,6 +658,11 @@
 	 */
 	e1000e_check_downshift(hw);
 
+	/* Enable/Disable EEE after link up */
+	ret_val = e1000_set_eee_pchlan(hw);
+	if (ret_val)
+		goto out;
+
 	/*
 	 * If we are forcing speed/duplex, then we simply return since
 	 * we have already determined whether we have link or not.
@@ -647,10 +706,19 @@
 	if (rc)
 		return rc;
 
-	if (hw->mac.type == e1000_pchlan)
-		rc = e1000_init_phy_params_pchlan(hw);
-	else
+	switch (hw->mac.type) {
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+	case e1000_ich10lan:
 		rc = e1000_init_phy_params_ich8lan(hw);
+		break;
+	case e1000_pchlan:
+	case e1000_pch2lan:
+		rc = e1000_init_phy_params_pchlan(hw);
+		break;
+	default:
+		break;
+	}
 	if (rc)
 		return rc;
 
@@ -663,6 +731,10 @@
 	    (adapter->hw.phy.type == e1000_phy_igp_3))
 		adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
 
+	/* Disable EEE by default until IEEE802.3az spec is finalized */
+	if (adapter->flags2 & FLAG2_HAS_EEE)
+		adapter->hw.dev_spec.ich8lan.eee_disable = true;
+
 	return 0;
 }
 
@@ -774,7 +846,7 @@
  *  e1000_check_mng_mode_ich8lan - Checks management mode
  *  @hw: pointer to the HW structure
  *
- *  This checks if the adapter has manageability enabled.
+ *  This checks if the adapter has any manageability enabled.
  *  This is a function pointer entry point only called by read/write
  *  routines for the PHY and NVM parts.
  **/
@@ -783,9 +855,26 @@
 	u32 fwsm;
 
 	fwsm = er32(FWSM);
+	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+	       ((fwsm & E1000_FWSM_MODE_MASK) ==
+		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
 
-	return (fwsm & E1000_FWSM_MODE_MASK) ==
-		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+/**
+ *  e1000_check_mng_mode_pchlan - Checks management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has iAMT enabled.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	fwsm = er32(FWSM);
+	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
 }
 
 /**
@@ -820,14 +909,6 @@
 	s32 ret_val = 0;
 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
 
-	if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
-		!(hw->mac.type == e1000_pchlan))
-		return ret_val;
-
-	ret_val = hw->phy.ops.acquire(hw);
-	if (ret_val)
-		return ret_val;
-
 	/*
 	 * Initialize the PHY from the NVM on ICH platforms.  This
 	 * is needed due to an issue where the NVM configuration is
@@ -835,12 +916,27 @@
 	 * Therefore, after each PHY reset, we will load the
 	 * configuration data out of the NVM manually.
 	 */
-	if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
-	    (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
-	    (hw->mac.type == e1000_pchlan))
+	switch (hw->mac.type) {
+	case e1000_ich8lan:
+		if (phy->type != e1000_phy_igp_3)
+			return ret_val;
+
+		if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
+			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+			break;
+		}
+		/* Fall-thru */
+	case e1000_pchlan:
+	case e1000_pch2lan:
 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
-	else
-		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+		break;
+	default:
+		return ret_val;
+	}
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
 
 	data = er32(FEXTNVM);
 	if (!(data & sw_cfg_mask))
@@ -851,8 +947,10 @@
 	 * extended configuration before SW configuration
 	 */
 	data = er32(EXTCNF_CTRL);
-	if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
-		goto out;
+	if (!(hw->mac.type == e1000_pch2lan)) {
+		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+			goto out;
+	}
 
 	cnf_size = er32(EXTCNF_SIZE);
 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
@@ -864,7 +962,8 @@
 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
 
 	if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
-	    (hw->mac.type == e1000_pchlan)) {
+	    ((hw->mac.type == e1000_pchlan) ||
+	     (hw->mac.type == e1000_pch2lan))) {
 		/*
 		 * HW configures the SMBus address and LEDs when the
 		 * OEM and LCD Write Enable bits are set in the NVM.
@@ -1071,16 +1170,18 @@
 	u32 mac_reg;
 	u16 oem_reg;
 
-	if (hw->mac.type != e1000_pchlan)
+	if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
 		return ret_val;
 
 	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
-	mac_reg = er32(EXTCNF_CTRL);
-	if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
-		goto out;
+	if (!(hw->mac.type == e1000_pch2lan)) {
+		mac_reg = er32(EXTCNF_CTRL);
+		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+			goto out;
+	}
 
 	mac_reg = er32(FEXTNVM);
 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
@@ -1221,6 +1322,243 @@
 }
 
 /**
+ *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ *  @hw:   pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+	u32 mac_reg;
+	u16 i;
+
+	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
+	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+		mac_reg = er32(RAL(i));
+		e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
+		e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
+		mac_reg = er32(RAH(i));
+		e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
+		e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
+	}
+}
+
+static u32 e1000_calc_rx_da_crc(u8 mac[])
+{
+	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
+	u32 i, j, mask, crc;
+
+	crc = 0xffffffff;
+	for (i = 0; i < 6; i++) {
+		crc = crc ^ mac[i];
+		for (j = 8; j > 0; j--) {
+			mask = (crc & 1) * (-1);
+			crc = (crc >> 1) ^ (poly & mask);
+		}
+	}
+	return ~crc;
+}
+
+/**
+ *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ *  with 82579 PHY
+ *  @hw: pointer to the HW structure
+ *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+	s32 ret_val = 0;
+	u16 phy_reg, data;
+	u32 mac_reg;
+	u16 i;
+
+	if (hw->mac.type != e1000_pch2lan)
+		goto out;
+
+	/* disable Rx path while enabling/disabling workaround */
+	e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+	if (ret_val)
+		goto out;
+
+	if (enable) {
+		/*
+		 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
+		 * SHRAL/H) and initial CRC values to the MAC
+		 */
+		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+			u8 mac_addr[ETH_ALEN] = {0};
+			u32 addr_high, addr_low;
+
+			addr_high = er32(RAH(i));
+			if (!(addr_high & E1000_RAH_AV))
+				continue;
+			addr_low = er32(RAL(i));
+			mac_addr[0] = (addr_low & 0xFF);
+			mac_addr[1] = ((addr_low >> 8) & 0xFF);
+			mac_addr[2] = ((addr_low >> 16) & 0xFF);
+			mac_addr[3] = ((addr_low >> 24) & 0xFF);
+			mac_addr[4] = (addr_high & 0xFF);
+			mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+			ew32(PCH_RAICC(i),
+					e1000_calc_rx_da_crc(mac_addr));
+		}
+
+		/* Write Rx addresses to the PHY */
+		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+		/* Enable jumbo frame workaround in the MAC */
+		mac_reg = er32(FFLT_DBG);
+		mac_reg &= ~(1 << 14);
+		mac_reg |= (7 << 15);
+		ew32(FFLT_DBG, mac_reg);
+
+		mac_reg = er32(RCTL);
+		mac_reg |= E1000_RCTL_SECRC;
+		ew32(RCTL, mac_reg);
+
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						&data);
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						data | (1 << 0));
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						&data);
+		if (ret_val)
+			goto out;
+		data &= ~(0xF << 8);
+		data |= (0xB << 8);
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						data);
+		if (ret_val)
+			goto out;
+
+		/* Enable jumbo frame workaround in the PHY */
+		e1e_rphy(hw, PHY_REG(769, 20), &data);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(769, 23), &data);
+		data &= ~(0x7F << 5);
+		data |= (0x37 << 5);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(769, 16), &data);
+		data &= ~(1 << 13);
+		data |= (1 << 12);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(776, 20), &data);
+		data &= ~(0x3FF << 2);
+		data |= (0x1A << 2);
+		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+		if (ret_val)
+			goto out;
+		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, HV_PM_CTRL, &data);
+		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+		if (ret_val)
+			goto out;
+	} else {
+		/* Write MAC register values back to h/w defaults */
+		mac_reg = er32(FFLT_DBG);
+		mac_reg &= ~(0xF << 14);
+		ew32(FFLT_DBG, mac_reg);
+
+		mac_reg = er32(RCTL);
+		mac_reg &= ~E1000_RCTL_SECRC;
+		ew32(FFLT_DBG, mac_reg);
+
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						&data);
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						data & ~(1 << 0));
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						&data);
+		if (ret_val)
+			goto out;
+		data &= ~(0xF << 8);
+		data |= (0xB << 8);
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						data);
+		if (ret_val)
+			goto out;
+
+		/* Write PHY register values back to h/w defaults */
+		e1e_rphy(hw, PHY_REG(769, 20), &data);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(769, 23), &data);
+		data &= ~(0x7F << 5);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(769, 16), &data);
+		data &= ~(1 << 12);
+		data |= (1 << 13);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(776, 20), &data);
+		data &= ~(0x3FF << 2);
+		data |= (0x8 << 2);
+		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+		if (ret_val)
+			goto out;
+		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, HV_PM_CTRL, &data);
+		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+		if (ret_val)
+			goto out;
+	}
+
+	/* re-enable Rx path after enabling/disabling workaround */
+	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ *  done after every PHY reset.
+ **/
+static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	if (hw->mac.type != e1000_pch2lan)
+		goto out;
+
+	/* Set MDIO slow mode before any other MDIO access */
+	ret_val = e1000_set_mdio_slow_mode_hv(hw);
+
+out:
+	return ret_val;
+}
+
+/**
  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
  *  @hw: pointer to the HW structure
  *
@@ -1271,12 +1609,17 @@
 		if (ret_val)
 			goto out;
 		break;
+	case e1000_pch2lan:
+		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+		if (ret_val)
+			goto out;
+		break;
 	default:
 		break;
 	}
 
 	/* Dummy read to clear the phy wakeup bit after lcd reset */
-	if (hw->mac.type == e1000_pchlan)
+	if (hw->mac.type >= e1000_pchlan)
 		e1e_rphy(hw, BM_WUC, &reg);
 
 	/* Configure the LCD with the extended configuration region in NVM */
@@ -2800,6 +3143,7 @@
 
 	ew32(FCTTV, hw->fc.pause_time);
 	if ((hw->phy.type == e1000_phy_82578) ||
+	    (hw->phy.type == e1000_phy_82579) ||
 	    (hw->phy.type == e1000_phy_82577)) {
 		ew32(FCRTV_PCH, hw->fc.refresh_time);
 
@@ -2863,6 +3207,7 @@
 			return ret_val;
 		break;
 	case e1000_phy_82577:
+	case e1000_phy_82579:
 		ret_val = e1000_copper_link_setup_82577(hw);
 		if (ret_val)
 			return ret_val;
@@ -3116,21 +3461,12 @@
 {
 	u32 phy_ctrl;
 
-	switch (hw->mac.type) {
-	case e1000_ich8lan:
-	case e1000_ich9lan:
-	case e1000_ich10lan:
-	case e1000_pchlan:
-		phy_ctrl = er32(PHY_CTRL);
-		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
-		            E1000_PHY_CTRL_GBE_DISABLE;
-		ew32(PHY_CTRL, phy_ctrl);
+	phy_ctrl = er32(PHY_CTRL);
+	phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
+	ew32(PHY_CTRL, phy_ctrl);
 
-		if (hw->mac.type == e1000_pchlan)
-			e1000_phy_hw_reset_ich8lan(hw);
-	default:
-		break;
-	}
+	if (hw->mac.type >= e1000_pchlan)
+		e1000_phy_hw_reset_ich8lan(hw);
 }
 
 /**
@@ -3370,6 +3706,7 @@
 
 	/* Clear PHY statistics registers */
 	if ((hw->phy.type == e1000_phy_82578) ||
+	    (hw->phy.type == e1000_phy_82579) ||
 	    (hw->phy.type == e1000_phy_82577)) {
 		hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
 		hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
@@ -3390,7 +3727,7 @@
 
 static struct e1000_mac_operations ich8_mac_ops = {
 	.id_led_init		= e1000e_id_led_init,
-	.check_mng_mode		= e1000_check_mng_mode_ich8lan,
+	/* check_mng_mode dependent on mac type */
 	.check_for_link		= e1000_check_for_copper_link_ich8lan,
 	/* cleanup_led dependent on mac type */
 	.clear_hw_cntrs		= e1000_clear_hw_cntrs_ich8lan,
@@ -3497,6 +3834,7 @@
 				  | FLAG_HAS_JUMBO_FRAMES
 				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
 				  | FLAG_APME_IN_WUC,
+	.flags2			= FLAG2_HAS_PHY_STATS,
 	.pba			= 26,
 	.max_hw_frame_size	= 4096,
 	.get_variants		= e1000_get_variants_ich8lan,
@@ -3504,3 +3842,23 @@
 	.phy_ops		= &ich8_phy_ops,
 	.nvm_ops		= &ich8_nvm_ops,
 };
+
+struct e1000_info e1000_pch2_info = {
+	.mac			= e1000_pch2lan,
+	.flags			= FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_RX_CSUM_ENABLED
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_APME_IN_WUC,
+	.flags2			= FLAG2_HAS_PHY_STATS
+				  | FLAG2_HAS_EEE,
+	.pba			= 18,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a968e3a..df4a279 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57a7e41..6aa795a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -52,7 +52,9 @@
 
 #include "e1000.h"
 
-#define DRV_VERSION "1.0.2-k4"
+#define DRV_EXTRAVERSION "-k2"
+
+#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -67,6 +69,7 @@
 	[board_ich9lan]		= &e1000_ich9_info,
 	[board_ich10lan]	= &e1000_ich10_info,
 	[board_pchlan]		= &e1000_pch_info,
+	[board_pch2lan]		= &e1000_pch2_info,
 };
 
 struct e1000_reg_info {
@@ -221,10 +224,10 @@
 	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
 	printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
 		0, tx_ring->next_to_use, tx_ring->next_to_clean,
-		(u64)buffer_info->dma,
+		(unsigned long long)buffer_info->dma,
 		buffer_info->length,
 		buffer_info->next_to_watch,
-		(u64)buffer_info->time_stamp);
+		(unsigned long long)buffer_info->time_stamp);
 
 	/* Print TX Rings */
 	if (!netif_msg_tx_done(adapter))
@@ -276,9 +279,11 @@
 			"%04X  %3X %016llX %p",
 		       (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
 			((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
-		       le64_to_cpu(u0->a), le64_to_cpu(u0->b),
-		       (u64)buffer_info->dma, buffer_info->length,
-		       buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
+		       (unsigned long long)le64_to_cpu(u0->a),
+		       (unsigned long long)le64_to_cpu(u0->b),
+		       (unsigned long long)buffer_info->dma,
+		       buffer_info->length, buffer_info->next_to_watch,
+		       (unsigned long long)buffer_info->time_stamp,
 		       buffer_info->skb);
 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
 			printk(KERN_CONT " NTC/U\n");
@@ -353,19 +358,19 @@
 				printk(KERN_INFO "RWB[0x%03X]     %016llX "
 					"%016llX %016llX %016llX "
 					"---------------- %p", i,
-					le64_to_cpu(u1->a),
-					le64_to_cpu(u1->b),
-					le64_to_cpu(u1->c),
-					le64_to_cpu(u1->d),
+					(unsigned long long)le64_to_cpu(u1->a),
+					(unsigned long long)le64_to_cpu(u1->b),
+					(unsigned long long)le64_to_cpu(u1->c),
+					(unsigned long long)le64_to_cpu(u1->d),
 					buffer_info->skb);
 			} else {
 				printk(KERN_INFO "R  [0x%03X]     %016llX "
 					"%016llX %016llX %016llX %016llX %p", i,
-					le64_to_cpu(u1->a),
-					le64_to_cpu(u1->b),
-					le64_to_cpu(u1->c),
-					le64_to_cpu(u1->d),
-					(u64)buffer_info->dma,
+					(unsigned long long)le64_to_cpu(u1->a),
+					(unsigned long long)le64_to_cpu(u1->b),
+					(unsigned long long)le64_to_cpu(u1->c),
+					(unsigned long long)le64_to_cpu(u1->d),
+					(unsigned long long)buffer_info->dma,
 					buffer_info->skb);
 
 				if (netif_msg_pktdata(adapter))
@@ -402,9 +407,11 @@
 			buffer_info = &rx_ring->buffer_info[i];
 			u0 = (struct my_u0 *)rx_desc;
 			printk(KERN_INFO "Rl[0x%03X]    %016llX %016llX "
-				"%016llX %p",
-				i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
-				(u64)buffer_info->dma, buffer_info->skb);
+				"%016llX %p", i,
+				(unsigned long long)le64_to_cpu(u0->a),
+				(unsigned long long)le64_to_cpu(u0->b),
+				(unsigned long long)buffer_info->dma,
+				buffer_info->skb);
 			if (i == rx_ring->next_to_use)
 				printk(KERN_CONT " NTU\n");
 			else if (i == rx_ring->next_to_clean)
@@ -2723,6 +2730,16 @@
 		e1e_wphy(hw, 22, phy_data);
 	}
 
+	/* Workaround Si errata on 82579 - configure jumbo frame flow */
+	if (hw->mac.type == e1000_pch2lan) {
+		s32 ret_val;
+
+		if (rctl & E1000_RCTL_LPE)
+			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+		else
+			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+	}
+
 	/* Setup buffer sizes */
 	rctl &= ~E1000_RCTL_SZ_4096;
 	rctl |= E1000_RCTL_BSEX;
@@ -2759,7 +2776,7 @@
 	 * per packet.
 	 */
 	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
-	if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
+	if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
 	    (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
 		adapter->rx_ps_pages = pages;
 	else
@@ -3118,7 +3135,27 @@
 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
 	 * - the full Rx FIFO size minus one full frame
 	 */
-	if (hw->mac.type == e1000_pchlan) {
+	if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
+		fc->pause_time = 0xFFFF;
+	else
+		fc->pause_time = E1000_FC_PAUSE_TIME;
+	fc->send_xon = 1;
+	fc->current_mode = fc->requested_mode;
+
+	switch (hw->mac.type) {
+	default:
+		if ((adapter->flags & FLAG_HAS_ERT) &&
+		    (adapter->netdev->mtu > ETH_DATA_LEN))
+			hwm = min(((pba << 10) * 9 / 10),
+				  ((pba << 10) - (E1000_ERT_2048 << 3)));
+		else
+			hwm = min(((pba << 10) * 9 / 10),
+				  ((pba << 10) - adapter->max_frame_size));
+
+		fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+		fc->low_water = fc->high_water - 8;
+		break;
+	case e1000_pchlan:
 		/*
 		 * Workaround PCH LOM adapter hangs with certain network
 		 * loads.  If hangs persist, try disabling Tx flow control.
@@ -3131,26 +3168,15 @@
 			fc->low_water  = 0x3000;
 		}
 		fc->refresh_time = 0x1000;
-	} else {
-		if ((adapter->flags & FLAG_HAS_ERT) &&
-		    (adapter->netdev->mtu > ETH_DATA_LEN))
-			hwm = min(((pba << 10) * 9 / 10),
-				  ((pba << 10) - (E1000_ERT_2048 << 3)));
-		else
-			hwm = min(((pba << 10) * 9 / 10),
-				  ((pba << 10) - adapter->max_frame_size));
-
-		fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
-		fc->low_water = fc->high_water - 8;
+		break;
+	case e1000_pch2lan:
+		fc->high_water = 0x05C20;
+		fc->low_water = 0x05048;
+		fc->pause_time = 0x0650;
+		fc->refresh_time = 0x0400;
+		break;
 	}
 
-	if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
-		fc->pause_time = 0xFFFF;
-	else
-		fc->pause_time = E1000_FC_PAUSE_TIME;
-	fc->send_xon = 1;
-	fc->current_mode = fc->requested_mode;
-
 	/* Allow time for pending master requests to run */
 	mac->ops.reset_hw(hw);
 
@@ -3162,8 +3188,6 @@
 		e1000_get_hw_control(adapter);
 
 	ew32(WUC, 0);
-	if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
-		e1e_wphy(&adapter->hw, BM_WUC, 0);
 
 	if (mac->ops.init_hw(hw))
 		e_err("Hardware Error\n");
@@ -3419,13 +3443,18 @@
 
 	/* disable SERR in case the MSI write causes a master abort */
 	pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
-	pci_write_config_word(adapter->pdev, PCI_COMMAND,
-			      pci_cmd & ~PCI_COMMAND_SERR);
+	if (pci_cmd & PCI_COMMAND_SERR)
+		pci_write_config_word(adapter->pdev, PCI_COMMAND,
+				      pci_cmd & ~PCI_COMMAND_SERR);
 
 	err = e1000_test_msi_interrupt(adapter);
 
-	/* restore previous setting of command word */
-	pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+	/* re-enable SERR */
+	if (pci_cmd & PCI_COMMAND_SERR) {
+		pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+		pci_cmd |= PCI_COMMAND_SERR;
+		pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+	}
 
 	/* success ! */
 	if (!err)
@@ -3672,6 +3701,110 @@
 }
 
 /**
+ * e1000e_update_phy_stats - Update the PHY statistics counters
+ * @adapter: board private structure
+ **/
+static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	s32 ret_val;
+	u16 phy_data;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return;
+
+	hw->phy.addr = 1;
+
+#define HV_PHY_STATS_PAGE	778
+	/*
+	 * A page set is expensive so check if already on desired page.
+	 * If not, set to the page with the PHY status registers.
+	 */
+	ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+					   &phy_data);
+	if (ret_val)
+		goto release;
+	if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+						    IGP01E1000_PHY_PAGE_SELECT,
+						    (HV_PHY_STATS_PAGE <<
+						     IGP_PAGE_SHIFT));
+		if (ret_val)
+			goto release;
+	}
+
+	/* Read/clear the upper 16-bit registers and read/accumulate lower */
+
+	/* Single Collision Count */
+	e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
+				 &phy_data);
+	ret_val = e1000e_read_phy_reg_mdic(hw,
+					   HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
+					   &phy_data);
+	if (!ret_val)
+		adapter->stats.scc += phy_data;
+
+	/* Excessive Collision Count */
+	e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
+				 &phy_data);
+	ret_val = e1000e_read_phy_reg_mdic(hw,
+					   HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
+					   &phy_data);
+	if (!ret_val)
+		adapter->stats.ecol += phy_data;
+
+	/* Multiple Collision Count */
+	e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
+				 &phy_data);
+	ret_val = e1000e_read_phy_reg_mdic(hw,
+					   HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
+					   &phy_data);
+	if (!ret_val)
+		adapter->stats.mcc += phy_data;
+
+	/* Late Collision Count */
+	e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
+				 &phy_data);
+	ret_val = e1000e_read_phy_reg_mdic(hw,
+					   HV_LATECOL_LOWER &
+					   MAX_PHY_REG_ADDRESS,
+					   &phy_data);
+	if (!ret_val)
+		adapter->stats.latecol += phy_data;
+
+	/* Collision Count - also used for adaptive IFS */
+	e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
+				 &phy_data);
+	ret_val = e1000e_read_phy_reg_mdic(hw,
+					   HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
+					   &phy_data);
+	if (!ret_val)
+		hw->mac.collision_delta = phy_data;
+
+	/* Defer Count */
+	e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
+				 &phy_data);
+	ret_val = e1000e_read_phy_reg_mdic(hw,
+					   HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
+					   &phy_data);
+	if (!ret_val)
+		adapter->stats.dc += phy_data;
+
+	/* Transmit with no CRS */
+	e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
+				 &phy_data);
+	ret_val = e1000e_read_phy_reg_mdic(hw,
+					   HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
+					   &phy_data);
+	if (!ret_val)
+		adapter->stats.tncrs += phy_data;
+
+release:
+	hw->phy.ops.release(hw);
+}
+
+/**
  * e1000e_update_stats - Update the board statistics counters
  * @adapter: board private structure
  **/
@@ -3680,7 +3813,6 @@
 	struct net_device *netdev = adapter->netdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
-	u16 phy_data;
 
 	/*
 	 * Prevent stats update while adapter is being reset, or if the pci
@@ -3700,34 +3832,27 @@
 	adapter->stats.roc += er32(ROC);
 
 	adapter->stats.mpc += er32(MPC);
-	if ((hw->phy.type == e1000_phy_82578) ||
-	    (hw->phy.type == e1000_phy_82577)) {
-		e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
-		if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
-			adapter->stats.scc += phy_data;
 
-		e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
-		if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
-			adapter->stats.ecol += phy_data;
+	/* Half-duplex statistics */
+	if (adapter->link_duplex == HALF_DUPLEX) {
+		if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
+			e1000e_update_phy_stats(adapter);
+		} else {
+			adapter->stats.scc += er32(SCC);
+			adapter->stats.ecol += er32(ECOL);
+			adapter->stats.mcc += er32(MCC);
+			adapter->stats.latecol += er32(LATECOL);
+			adapter->stats.dc += er32(DC);
 
-		e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
-		if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
-			adapter->stats.mcc += phy_data;
+			hw->mac.collision_delta = er32(COLC);
 
-		e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
-		if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
-			adapter->stats.latecol += phy_data;
-
-		e1e_rphy(hw, HV_DC_UPPER, &phy_data);
-		if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
-			adapter->stats.dc += phy_data;
-	} else {
-		adapter->stats.scc += er32(SCC);
-		adapter->stats.ecol += er32(ECOL);
-		adapter->stats.mcc += er32(MCC);
-		adapter->stats.latecol += er32(LATECOL);
-		adapter->stats.dc += er32(DC);
+			if ((hw->mac.type != e1000_82574) &&
+			    (hw->mac.type != e1000_82583))
+				adapter->stats.tncrs += er32(TNCRS);
+		}
+		adapter->stats.colc += hw->mac.collision_delta;
 	}
+
 	adapter->stats.xonrxc += er32(XONRXC);
 	adapter->stats.xontxc += er32(XONTXC);
 	adapter->stats.xoffrxc += er32(XOFFRXC);
@@ -3745,28 +3870,9 @@
 
 	hw->mac.tx_packet_delta = er32(TPT);
 	adapter->stats.tpt += hw->mac.tx_packet_delta;
-	if ((hw->phy.type == e1000_phy_82578) ||
-	    (hw->phy.type == e1000_phy_82577)) {
-		e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
-		if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
-			hw->mac.collision_delta = phy_data;
-	} else {
-		hw->mac.collision_delta = er32(COLC);
-	}
-	adapter->stats.colc += hw->mac.collision_delta;
 
 	adapter->stats.algnerrc += er32(ALGNERRC);
 	adapter->stats.rxerrc += er32(RXERRC);
-	if ((hw->phy.type == e1000_phy_82578) ||
-	    (hw->phy.type == e1000_phy_82577)) {
-		e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
-		if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
-			adapter->stats.tncrs += phy_data;
-	} else {
-		if ((hw->mac.type != e1000_82574) &&
-		    (hw->mac.type != e1000_82583))
-			adapter->stats.tncrs += er32(TNCRS);
-	}
 	adapter->stats.cexterr += er32(CEXTERR);
 	adapter->stats.tsctc += er32(TSCTC);
 	adapter->stats.tsctfc += er32(TSCTFC);
@@ -3865,7 +3971,7 @@
 	       ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
 }
 
-bool e1000e_has_link(struct e1000_adapter *adapter)
+static bool e1000e_has_link(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	bool link_active = 0;
@@ -4841,14 +4947,7 @@
 	int retval = 0;
 
 	/* copy MAC RARs to PHY RARs */
-	for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
-		mac_reg = er32(RAL(i));
-		e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
-		e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
-		mac_reg = er32(RAH(i));
-		e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
-		e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
-	}
+	e1000_copy_rx_addrs_to_phy_ich8lan(hw);
 
 	/* copy MAC MTA to PHY MTA */
 	for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
@@ -5899,6 +5998,9 @@
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
 
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
+
 	{ }	/* terminate list */
 };
 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
@@ -5935,7 +6037,7 @@
 	int ret;
 	pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
 		e1000e_driver_version);
-	pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
+	pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
 	ret = pci_register_driver(&e1000_driver);
 
 	return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a150e48..34aeec1 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index b4ac82d..3d3dc0c 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -2319,6 +2319,9 @@
 	case I82577_E_PHY_ID:
 		phy_type = e1000_phy_82577;
 		break;
+	case I82579_E_PHY_ID:
+		phy_type = e1000_phy_82579;
+		break;
 	default:
 		phy_type = e1000_phy_unknown;
 		break;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 8b92acb..3beba70 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -335,7 +335,7 @@
 
 	memset(stats, 0, sizeof(*stats));
 
-	cb2 = (void *)get_zeroed_page(GFP_ATOMIC);
+	cb2 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb2) {
 		ehea_error("no mem for cb2");
 		goto out;
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 882c50c..f608a6c 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -126,7 +126,7 @@
 			u8 immediate_data[SWQE2_MAX_IMM];
 			/* 0xd0 */
 			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
-		} immdata_desc __attribute__ ((packed));
+		} immdata_desc __packed;
 
 		/*  Send WQE Format 3 */
 		struct {
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h
index 1eb289f..d6dd1b4 100644
--- a/drivers/net/enic/cq_desc.h
+++ b/drivers/net/enic/cq_desc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
index 337d194..c2c0680 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -73,7 +73,16 @@
 #define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED             (0x1 << 14)
 #define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED         (0x1 << 15)
 
-#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS               4
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS          12
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \
+	((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK           (0x1 << 12)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS     3
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \
+	((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT    13
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS               8
 #define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
 	((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
 #define CQ_ENET_RQ_DESC_FCOE_EOF_BITS               8
@@ -96,7 +105,7 @@
 	u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
 	u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
 	u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
-	u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
+	u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
 	u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
 	u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
 	u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
@@ -136,7 +145,10 @@
 	*vlan_stripped = (bytes_written_flags &
 		CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
 
-	*vlan = le16_to_cpu(desc->vlan);
+	/*
+	 * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+	 */
+	*vlan_tci = le16_to_cpu(desc->vlan);
 
 	if (*fcoe) {
 		*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 45e86d1..f239aa8 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -20,8 +20,6 @@
 #ifndef _ENIC_H_
 #define _ENIC_H_
 
-#include <linux/inet_lro.h>
-
 #include "vnic_enet.h"
 #include "vnic_dev.h"
 #include "vnic_wq.h"
@@ -34,12 +32,8 @@
 
 #define DRV_NAME		"enic"
 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION		"1.3.1.1-pp"
-#define DRV_COPYRIGHT		"Copyright 2008-2009 Cisco Systems, Inc"
-#define PFX			DRV_NAME ": "
-
-#define ENIC_LRO_MAX_DESC	8
-#define ENIC_LRO_MAX_AGGR	64
+#define DRV_VERSION		"1.4.1.1"
+#define DRV_COPYRIGHT		"Copyright 2008-2010 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX		6
 
@@ -116,6 +110,8 @@
 	spinlock_t wq_lock[ENIC_WQ_MAX];
 	unsigned int wq_count;
 	struct vlan_group *vlan_group;
+	u16 loop_enable;
+	u16 loop_tag;
 
 	/* receive queue cache line section */
 	____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
@@ -124,8 +120,6 @@
 	u64 rq_truncated_pkts;
 	u64 rq_bad_fcs;
 	struct napi_struct napi;
-	struct net_lro_mgr lro_mgr;
-	struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
 
 	/* interrupt resource cache line section */
 	____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
@@ -137,4 +131,9 @@
 	unsigned int cq_count;
 };
 
+static inline struct device *enic_get_dev(struct enic *enic)
+{
+	return &(enic->pdev->dev);
+}
+
 #endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index bc7d6b9..77a7f87 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -29,12 +29,12 @@
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
-#include <linux/if_link.h>
 #include <linux/ethtool.h>
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
+#include <linux/rtnetlink.h>
 #include <net/ip6_checksum.h>
 
 #include "cq_enet_desc.h"
@@ -145,15 +145,25 @@
 	return 0;
 }
 
+static int enic_dev_fw_info(struct enic *enic,
+	struct vnic_devcmd_fw_info **fw_info)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_fw_info(enic->vdev, fw_info);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
 static void enic_get_drvinfo(struct net_device *netdev,
 	struct ethtool_drvinfo *drvinfo)
 {
 	struct enic *enic = netdev_priv(netdev);
 	struct vnic_devcmd_fw_info *fw_info;
 
-	spin_lock(&enic->devcmd_lock);
-	vnic_dev_fw_info(enic->vdev, &fw_info);
-	spin_unlock(&enic->devcmd_lock);
+	enic_dev_fw_info(enic, &fw_info);
 
 	strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
 	strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -191,6 +201,17 @@
 	}
 }
 
+static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_stats_dump(enic->vdev, vstats);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
 static void enic_get_ethtool_stats(struct net_device *netdev,
 	struct ethtool_stats *stats, u64 *data)
 {
@@ -198,9 +219,7 @@
 	struct vnic_stats *vstats;
 	unsigned int i;
 
-	spin_lock(&enic->devcmd_lock);
-	vnic_dev_stats_dump(enic->vdev, &vstats);
-	spin_unlock(&enic->devcmd_lock);
+	enic_dev_stats_dump(enic, &vstats);
 
 	for (i = 0; i < enic_n_tx_stats; i++)
 		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
@@ -346,7 +365,6 @@
 	.get_coalesce = enic_get_coalesce,
 	.set_coalesce = enic_set_coalesce,
 	.get_flags = ethtool_op_get_flags,
-	.set_flags = ethtool_op_set_flags,
 };
 
 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
@@ -399,15 +417,41 @@
 	for (i = 0; i < enic->wq_count; i++) {
 		error_status = vnic_wq_error_status(&enic->wq[i]);
 		if (error_status)
-			printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n",
-				enic->netdev->name, i, error_status);
+			netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
+				i, error_status);
 	}
 
 	for (i = 0; i < enic->rq_count; i++) {
 		error_status = vnic_rq_error_status(&enic->rq[i]);
 		if (error_status)
-			printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n",
-				enic->netdev->name, i, error_status);
+			netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
+				i, error_status);
+	}
+}
+
+static void enic_msglvl_check(struct enic *enic)
+{
+	u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
+
+	if (msg_enable != enic->msg_enable) {
+		netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
+			enic->msg_enable, msg_enable);
+		enic->msg_enable = msg_enable;
+	}
+}
+
+static void enic_mtu_check(struct enic *enic)
+{
+	u32 mtu = vnic_dev_mtu(enic->vdev);
+	struct net_device *netdev = enic->netdev;
+
+	if (mtu && mtu != enic->port_mtu) {
+		enic->port_mtu = mtu;
+		if (mtu < netdev->mtu)
+			netdev_warn(netdev,
+				"interface MTU (%d) set higher "
+				"than switch port MTU (%d)\n",
+				netdev->mtu, mtu);
 	}
 }
 
@@ -417,39 +461,14 @@
 	int carrier_ok = netif_carrier_ok(enic->netdev);
 
 	if (link_status && !carrier_ok) {
-		printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name);
+		netdev_info(enic->netdev, "Link UP\n");
 		netif_carrier_on(enic->netdev);
 	} else if (!link_status && carrier_ok) {
-		printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name);
+		netdev_info(enic->netdev, "Link DOWN\n");
 		netif_carrier_off(enic->netdev);
 	}
 }
 
-static void enic_mtu_check(struct enic *enic)
-{
-	u32 mtu = vnic_dev_mtu(enic->vdev);
-
-	if (mtu && mtu != enic->port_mtu) {
-		enic->port_mtu = mtu;
-		if (mtu < enic->netdev->mtu)
-			printk(KERN_WARNING PFX
-				"%s: interface MTU (%d) set higher "
-				"than switch port MTU (%d)\n",
-				enic->netdev->name, enic->netdev->mtu, mtu);
-	}
-}
-
-static void enic_msglvl_check(struct enic *enic)
-{
-	u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
-
-	if (msg_enable != enic->msg_enable) {
-		printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n",
-			enic->netdev->name, enic->msg_enable, msg_enable);
-		enic->msg_enable = msg_enable;
-	}
-}
-
 static void enic_notify_check(struct enic *enic)
 {
 	enic_msglvl_check(enic);
@@ -574,7 +593,7 @@
 
 static inline void enic_queue_wq_skb_cont(struct enic *enic,
 	struct vnic_wq *wq, struct sk_buff *skb,
-	unsigned int len_left)
+	unsigned int len_left, int loopback)
 {
 	skb_frag_t *frag;
 
@@ -586,13 +605,14 @@
 				frag->page_offset, frag->size,
 				PCI_DMA_TODEVICE),
 			frag->size,
-			(len_left == 0));	/* EOP? */
+			(len_left == 0),	/* EOP? */
+			loopback);
 	}
 }
 
 static inline void enic_queue_wq_skb_vlan(struct enic *enic,
 	struct vnic_wq *wq, struct sk_buff *skb,
-	int vlan_tag_insert, unsigned int vlan_tag)
+	int vlan_tag_insert, unsigned int vlan_tag, int loopback)
 {
 	unsigned int head_len = skb_headlen(skb);
 	unsigned int len_left = skb->len - head_len;
@@ -608,15 +628,15 @@
 			head_len, PCI_DMA_TODEVICE),
 		head_len,
 		vlan_tag_insert, vlan_tag,
-		eop);
+		eop, loopback);
 
 	if (!eop)
-		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+		enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 }
 
 static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
 	struct vnic_wq *wq, struct sk_buff *skb,
-	int vlan_tag_insert, unsigned int vlan_tag)
+	int vlan_tag_insert, unsigned int vlan_tag, int loopback)
 {
 	unsigned int head_len = skb_headlen(skb);
 	unsigned int len_left = skb->len - head_len;
@@ -636,15 +656,15 @@
 		csum_offset,
 		hdr_len,
 		vlan_tag_insert, vlan_tag,
-		eop);
+		eop, loopback);
 
 	if (!eop)
-		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+		enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 }
 
 static inline void enic_queue_wq_skb_tso(struct enic *enic,
 	struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
-	int vlan_tag_insert, unsigned int vlan_tag)
+	int vlan_tag_insert, unsigned int vlan_tag, int loopback)
 {
 	unsigned int frag_len_left = skb_headlen(skb);
 	unsigned int len_left = skb->len - frag_len_left;
@@ -681,7 +701,7 @@
 			len,
 			mss, hdr_len,
 			vlan_tag_insert, vlan_tag,
-			eop && (len == frag_len_left));
+			eop && (len == frag_len_left), loopback);
 		frag_len_left -= len;
 		offset += len;
 	}
@@ -707,7 +727,8 @@
 				dma_addr,
 				len,
 				(len_left == 0) &&
-				(len == frag_len_left));	/* EOP? */
+				(len == frag_len_left),		/* EOP? */
+				loopback);
 			frag_len_left -= len;
 			offset += len;
 		}
@@ -720,22 +741,26 @@
 	unsigned int mss = skb_shinfo(skb)->gso_size;
 	unsigned int vlan_tag = 0;
 	int vlan_tag_insert = 0;
+	int loopback = 0;
 
 	if (enic->vlan_group && vlan_tx_tag_present(skb)) {
 		/* VLAN tag from trunking driver */
 		vlan_tag_insert = 1;
 		vlan_tag = vlan_tx_tag_get(skb);
+	} else if (enic->loop_enable) {
+		vlan_tag = enic->loop_tag;
+		loopback = 1;
 	}
 
 	if (mss)
 		enic_queue_wq_skb_tso(enic, wq, skb, mss,
-			vlan_tag_insert, vlan_tag);
+			vlan_tag_insert, vlan_tag, loopback);
 	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
 		enic_queue_wq_skb_csum_l4(enic, wq, skb,
-			vlan_tag_insert, vlan_tag);
+			vlan_tag_insert, vlan_tag, loopback);
 	else
 		enic_queue_wq_skb_vlan(enic, wq, skb,
-			vlan_tag_insert, vlan_tag);
+			vlan_tag_insert, vlan_tag, loopback);
 }
 
 /* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -769,8 +794,7 @@
 	    skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
 		netif_stop_queue(netdev);
 		/* This is a hard error, log it */
-		printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
-			"queue awake!\n", netdev->name);
+		netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
 		spin_unlock_irqrestore(&enic->wq_lock[0], flags);
 		return NETDEV_TX_BUSY;
 	}
@@ -792,9 +816,7 @@
 	struct net_device_stats *net_stats = &netdev->stats;
 	struct vnic_stats *stats;
 
-	spin_lock(&enic->devcmd_lock);
-	vnic_dev_stats_dump(enic->vdev, &stats);
-	spin_unlock(&enic->devcmd_lock);
+	enic_dev_stats_dump(enic, &stats);
 
 	net_stats->tx_packets = stats->tx.tx_frames_ok;
 	net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -812,9 +834,10 @@
 	return net_stats;
 }
 
-static void enic_reset_mcaddrs(struct enic *enic)
+static void enic_reset_multicast_list(struct enic *enic)
 {
 	enic->mc_count = 0;
+	enic->flags = 0;
 }
 
 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
@@ -891,6 +914,41 @@
 	return -EOPNOTSUPP;
 }
 
+static int enic_dev_packet_filter(struct enic *enic, int directed,
+	int multicast, int broadcast, int promisc, int allmulti)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_packet_filter(enic->vdev, directed,
+		multicast, broadcast, promisc, allmulti);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_add_addr(enic->vdev, addr);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_del_addr(enic->vdev, addr);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
 /* netif_tx_lock held, BHs disabled */
 static void enic_set_multicast_list(struct net_device *netdev)
 {
@@ -910,11 +968,9 @@
 	if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
 		mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
 
-	spin_lock(&enic->devcmd_lock);
-
 	if (enic->flags != flags) {
 		enic->flags = flags;
-		vnic_dev_packet_filter(enic->vdev, directed,
+		enic_dev_packet_filter(enic, directed,
 			multicast, broadcast, promisc, allmulti);
 	}
 
@@ -937,7 +993,7 @@
 				mc_addr[j]) == 0)
 				break;
 		if (j == mc_count)
-			enic_del_multicast_addr(enic, enic->mc_addr[i]);
+			enic_dev_del_multicast_addr(enic, enic->mc_addr[i]);
 	}
 
 	for (i = 0; i < mc_count; i++) {
@@ -946,7 +1002,7 @@
 				enic->mc_addr[j]) == 0)
 				break;
 		if (j == enic->mc_count)
-			enic_add_multicast_addr(enic, mc_addr[i]);
+			enic_dev_add_multicast_addr(enic, mc_addr[i]);
 	}
 
 	/* Save the list to compare against next time
@@ -956,8 +1012,6 @@
 		memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
 
 	enic->mc_count = mc_count;
-
-	spin_unlock(&enic->devcmd_lock);
 }
 
 /* rtnl lock is held */
@@ -1226,7 +1280,7 @@
 	struct enic *enic = vnic_dev_priv(rq->vdev);
 	struct net_device *netdev = enic->netdev;
 	struct sk_buff *skb;
-	unsigned int len = netdev->mtu + ETH_HLEN;
+	unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
 	unsigned int os_buf_index = 0;
 	dma_addr_t dma_addr;
 
@@ -1263,12 +1317,24 @@
 	return 0;
 }
 
+static int enic_dev_hw_version(struct enic *enic,
+	enum vnic_dev_hw_version *hw_ver)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_hw_version(enic->vdev, hw_ver);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
 static int enic_set_rq_alloc_buf(struct enic *enic)
 {
 	enum vnic_dev_hw_version hw_ver;
 	int err;
 
-	err = vnic_dev_hw_version(enic->vdev, &hw_ver);
+	err = enic_dev_hw_version(enic, &hw_ver);
 	if (err)
 		return err;
 
@@ -1287,51 +1353,6 @@
 	return 0;
 }
 
-static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
-	void **tcph, u64 *hdr_flags, void *priv)
-{
-	struct cq_enet_rq_desc *cq_desc = priv;
-	unsigned int ip_len;
-	struct iphdr *iph;
-
-	u8 type, color, eop, sop, ingress_port, vlan_stripped;
-	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
-	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
-	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
-	u8 packet_error;
-	u16 q_number, completed_index, bytes_written, vlan, checksum;
-	u32 rss_hash;
-
-	cq_enet_rq_desc_dec(cq_desc,
-		&type, &color, &q_number, &completed_index,
-		&ingress_port, &fcoe, &eop, &sop, &rss_type,
-		&csum_not_calc, &rss_hash, &bytes_written,
-		&packet_error, &vlan_stripped, &vlan, &checksum,
-		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
-		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
-		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
-		&fcs_ok);
-
-	if (!(ipv4 && tcp && !ipv4_fragment))
-		return -1;
-
-	skb_reset_network_header(skb);
-	iph = ip_hdr(skb);
-
-	ip_len = ip_hdrlen(skb);
-	skb_set_transport_header(skb, ip_len);
-
-	/* check if ip header and tcp header are complete */
-	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
-		return -1;
-
-	*hdr_flags = LRO_IPV4 | LRO_TCP;
-	*tcph = tcp_hdr(skb);
-	*iphdr = iph;
-
-	return 0;
-}
-
 static void enic_rq_indicate_buf(struct vnic_rq *rq,
 	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
 	int skipped, void *opaque)
@@ -1345,7 +1366,7 @@
 	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
 	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
 	u8 packet_error;
-	u16 q_number, completed_index, bytes_written, vlan, checksum;
+	u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
 	u32 rss_hash;
 
 	if (skipped)
@@ -1360,7 +1381,7 @@
 		&type, &color, &q_number, &completed_index,
 		&ingress_port, &fcoe, &eop, &sop, &rss_type,
 		&csum_not_calc, &rss_hash, &bytes_written,
-		&packet_error, &vlan_stripped, &vlan, &checksum,
+		&packet_error, &vlan_stripped, &vlan_tci, &checksum,
 		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
 		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
 		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
@@ -1395,20 +1416,20 @@
 
 		skb->dev = netdev;
 
-		if (enic->vlan_group && vlan_stripped) {
+		if (enic->vlan_group && vlan_stripped &&
+			(vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
 
-			if ((netdev->features & NETIF_F_LRO) && ipv4)
-				lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
-					skb, enic->vlan_group,
-					vlan, cq_desc);
+			if (netdev->features & NETIF_F_GRO)
+				vlan_gro_receive(&enic->napi, enic->vlan_group,
+					vlan_tci, skb);
 			else
 				vlan_hwaccel_receive_skb(skb,
-					enic->vlan_group, vlan);
+					enic->vlan_group, vlan_tci);
 
 		} else {
 
-			if ((netdev->features & NETIF_F_LRO) && ipv4)
-				lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
+			if (netdev->features & NETIF_F_GRO)
+				napi_gro_receive(&enic->napi, skb);
 			else
 				netif_receive_skb(skb);
 
@@ -1438,7 +1459,6 @@
 static int enic_poll(struct napi_struct *napi, int budget)
 {
 	struct enic *enic = container_of(napi, struct enic, napi);
-	struct net_device *netdev = enic->netdev;
 	unsigned int rq_work_to_do = budget;
 	unsigned int wq_work_to_do = -1; /* no limit */
 	unsigned int  work_done, rq_work_done, wq_work_done;
@@ -1478,12 +1498,9 @@
 	if (rq_work_done < rq_work_to_do) {
 
 		/* Some work done, but not enough to stay in polling,
-		 * flush all LROs and exit polling
+		 * exit polling
 		 */
 
-		if (netdev->features & NETIF_F_LRO)
-			lro_flush_all(&enic->lro_mgr);
-
 		napi_complete(napi);
 		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
 	}
@@ -1494,7 +1511,6 @@
 static int enic_poll_msix(struct napi_struct *napi, int budget)
 {
 	struct enic *enic = container_of(napi, struct enic, napi);
-	struct net_device *netdev = enic->netdev;
 	unsigned int work_to_do = budget;
 	unsigned int work_done;
 	int err;
@@ -1528,12 +1544,9 @@
 	if (work_done < work_to_do) {
 
 		/* Some work done, but not enough to stay in polling,
-		 * flush all LROs and exit polling
+		 * exit polling
 		 */
 
-		if (netdev->features & NETIF_F_LRO)
-			lro_flush_all(&enic->lro_mgr);
-
 		napi_complete(napi);
 		vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
 	}
@@ -1655,7 +1668,7 @@
 	}
 }
 
-static int enic_notify_set(struct enic *enic)
+static int enic_dev_notify_set(struct enic *enic)
 {
 	int err;
 
@@ -1676,6 +1689,39 @@
 	return err;
 }
 
+static int enic_dev_notify_unset(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_notify_unset(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+static int enic_dev_enable(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_enable(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+static int enic_dev_disable(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_disable(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
 static void enic_notify_timer_start(struct enic *enic)
 {
 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1697,16 +1743,14 @@
 
 	err = enic_request_intr(enic);
 	if (err) {
-		printk(KERN_ERR PFX "%s: Unable to request irq.\n",
-			netdev->name);
+		netdev_err(netdev, "Unable to request irq.\n");
 		return err;
 	}
 
-	err = enic_notify_set(enic);
+	err = enic_dev_notify_set(enic);
 	if (err) {
-		printk(KERN_ERR PFX
-			"%s: Failed to alloc notify buffer, aborting.\n",
-			netdev->name);
+		netdev_err(netdev,
+			"Failed to alloc notify buffer, aborting.\n");
 		goto err_out_free_intr;
 	}
 
@@ -1714,9 +1758,7 @@
 		vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
 		/* Need at least one buffer on ring to get going */
 		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
-			printk(KERN_ERR PFX
-				"%s: Unable to alloc receive buffers.\n",
-				netdev->name);
+			netdev_err(netdev, "Unable to alloc receive buffers\n");
 			err = -ENOMEM;
 			goto err_out_notify_unset;
 		}
@@ -1732,9 +1774,7 @@
 
 	netif_wake_queue(netdev);
 	napi_enable(&enic->napi);
-	spin_lock(&enic->devcmd_lock);
-	vnic_dev_enable(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
+	enic_dev_enable(enic);
 
 	for (i = 0; i < enic->intr_count; i++)
 		vnic_intr_unmask(&enic->intr[i]);
@@ -1744,9 +1784,7 @@
 	return 0;
 
 err_out_notify_unset:
-	spin_lock(&enic->devcmd_lock);
-	vnic_dev_notify_unset(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
+	enic_dev_notify_unset(enic);
 err_out_free_intr:
 	enic_free_intr(enic);
 
@@ -1760,20 +1798,19 @@
 	unsigned int i;
 	int err;
 
-	for (i = 0; i < enic->intr_count; i++)
+	for (i = 0; i < enic->intr_count; i++) {
 		vnic_intr_mask(&enic->intr[i]);
+		(void)vnic_intr_masked(&enic->intr[i]); /* flush write */
+	}
 
 	enic_synchronize_irqs(enic);
 
 	del_timer_sync(&enic->notify_timer);
 
-	spin_lock(&enic->devcmd_lock);
-	vnic_dev_disable(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
+	enic_dev_disable(enic);
 	napi_disable(&enic->napi);
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
-
 	enic_dev_del_station_addr(enic);
 
 	for (i = 0; i < enic->wq_count; i++) {
@@ -1787,9 +1824,7 @@
 			return err;
 	}
 
-	spin_lock(&enic->devcmd_lock);
-	vnic_dev_notify_unset(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
+	enic_dev_notify_unset(enic);
 	enic_free_intr(enic);
 
 	for (i = 0; i < enic->wq_count; i++)
@@ -1818,10 +1853,9 @@
 	netdev->mtu = new_mtu;
 
 	if (netdev->mtu > enic->port_mtu)
-		printk(KERN_WARNING PFX
-			"%s: interface MTU (%d) set higher "
-			"than port MTU (%d)\n",
-			netdev->name, netdev->mtu, enic->port_mtu);
+		netdev_warn(netdev,
+			"interface MTU (%d) set higher than port MTU (%d)\n",
+			netdev->mtu, enic->port_mtu);
 
 	if (running)
 		enic_open(netdev);
@@ -1894,21 +1928,21 @@
 	err = enic_dev_wait(enic->vdev, vnic_dev_open,
 		vnic_dev_open_done, 0);
 	if (err)
-		printk(KERN_ERR PFX
-			"vNIC device open failed, err %d.\n", err);
+		dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
+			err);
 
 	return err;
 }
 
-static int enic_dev_soft_reset(struct enic *enic)
+static int enic_dev_hang_reset(struct enic *enic)
 {
 	int err;
 
-	err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
-		vnic_dev_soft_reset_done, 0);
+	err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
+		vnic_dev_hang_reset_done, 0);
 	if (err)
-		printk(KERN_ERR PFX
-			"vNIC soft reset failed, err %d.\n", err);
+		netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
+			err);
 
 	return err;
 }
@@ -1922,15 +1956,43 @@
 	const u8 rss_enable = 0;
 	const u8 tso_ipid_split_en = 0;
 	const u8 ig_vlan_strip_en = 1;
+	int err;
 
 	/* Enable VLAN tag stripping.  RSS not enabled (yet).
 	 */
 
-	return enic_set_nic_cfg(enic,
+	spin_lock(&enic->devcmd_lock);
+	err = enic_set_nic_cfg(enic,
 		rss_default_cpu, rss_hash_type,
 		rss_hash_bits, rss_base_cpu,
 		rss_enable, tso_ipid_split_en,
 		ig_vlan_strip_en);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+static int enic_dev_hang_notify(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_hang_notify(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+		IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
 }
 
 static void enic_reset(struct work_struct *work)
@@ -1942,16 +2004,13 @@
 
 	rtnl_lock();
 
-	spin_lock(&enic->devcmd_lock);
-	vnic_dev_hang_notify(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
-
+	enic_dev_hang_notify(enic);
 	enic_stop(enic->netdev);
-	enic_dev_soft_reset(enic);
-	vnic_dev_init(enic->vdev, 0);
-	enic_reset_mcaddrs(enic);
+	enic_dev_hang_reset(enic);
+	enic_reset_multicast_list(enic);
 	enic_init_vnic_resources(enic);
 	enic_set_niccfg(enic);
+	enic_dev_set_ig_vlan_rewrite_mode(enic);
 	enic_open(enic->netdev);
 
 	rtnl_unlock();
@@ -2087,8 +2146,8 @@
 	.ndo_start_xmit		= enic_hard_start_xmit,
 	.ndo_get_stats		= enic_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_multicast_list	= enic_set_multicast_list,
 	.ndo_set_mac_address	= enic_set_mac_address,
+	.ndo_set_multicast_list	= enic_set_multicast_list,
 	.ndo_change_mtu		= enic_change_mtu,
 	.ndo_vlan_rx_register	= enic_vlan_rx_register,
 	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
@@ -2106,8 +2165,20 @@
 	enic_clear_intr_mode(enic);
 }
 
+static int enic_dev_stats_clear(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_stats_clear(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
 int enic_dev_init(struct enic *enic)
 {
+	struct device *dev = enic_get_dev(enic);
 	struct net_device *netdev = enic->netdev;
 	int err;
 
@@ -2116,8 +2187,7 @@
 
 	err = enic_get_vnic_config(enic);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Get vNIC configuration failed, aborting.\n");
+		dev_err(dev, "Get vNIC configuration failed, aborting\n");
 		return err;
 	}
 
@@ -2132,9 +2202,8 @@
 
 	err = enic_set_intr_mode(enic);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Failed to set intr mode based on resource "
-			"counts and system capabilities, aborting.\n");
+		dev_err(dev, "Failed to set intr mode based on resource "
+			"counts and system capabilities, aborting\n");
 		return err;
 	}
 
@@ -2143,24 +2212,32 @@
 
 	err = enic_alloc_vnic_resources(enic);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Failed to alloc vNIC resources, aborting.\n");
+		dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
 		goto err_out_free_vnic_resources;
 	}
 
 	enic_init_vnic_resources(enic);
 
+	/* Clear LIF stats
+	 */
+	enic_dev_stats_clear(enic);
+
 	err = enic_set_rq_alloc_buf(enic);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Failed to set RQ buffer allocator, aborting.\n");
+		dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
 		goto err_out_free_vnic_resources;
 	}
 
 	err = enic_set_niccfg(enic);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Failed to config nic, aborting.\n");
+		dev_err(dev, "Failed to config nic, aborting\n");
+		goto err_out_free_vnic_resources;
+	}
+
+	err = enic_dev_set_ig_vlan_rewrite_mode(enic);
+	if (err) {
+		netdev_err(netdev,
+			"Failed to set ingress vlan rewrite mode, aborting.\n");
 		goto err_out_free_vnic_resources;
 	}
 
@@ -2194,6 +2271,7 @@
 static int __devinit enic_probe(struct pci_dev *pdev,
 	const struct pci_device_id *ent)
 {
+	struct device *dev = &pdev->dev;
 	struct net_device *netdev;
 	struct enic *enic;
 	int using_dac = 0;
@@ -2206,7 +2284,7 @@
 
 	netdev = alloc_etherdev(sizeof(struct enic));
 	if (!netdev) {
-		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+		pr_err("Etherdev alloc failed, aborting\n");
 		return -ENOMEM;
 	}
 
@@ -2221,17 +2299,15 @@
 	/* Setup PCI resources
 	 */
 
-	err = pci_enable_device(pdev);
+	err = pci_enable_device_mem(pdev);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Cannot enable PCI device, aborting.\n");
+		dev_err(dev, "Cannot enable PCI device, aborting\n");
 		goto err_out_free_netdev;
 	}
 
 	err = pci_request_regions(pdev, DRV_NAME);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Cannot request PCI regions, aborting.\n");
+		dev_err(dev, "Cannot request PCI regions, aborting\n");
 		goto err_out_disable_device;
 	}
 
@@ -2246,23 +2322,20 @@
 	if (err) {
 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (err) {
-			printk(KERN_ERR PFX
-				"No usable DMA configuration, aborting.\n");
+			dev_err(dev, "No usable DMA configuration, aborting\n");
 			goto err_out_release_regions;
 		}
 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (err) {
-			printk(KERN_ERR PFX
-				"Unable to obtain 32-bit DMA "
-				"for consistent allocations, aborting.\n");
+			dev_err(dev, "Unable to obtain %u-bit DMA "
+				"for consistent allocations, aborting\n", 32);
 			goto err_out_release_regions;
 		}
 	} else {
 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
 		if (err) {
-			printk(KERN_ERR PFX
-				"Unable to obtain 40-bit DMA "
-				"for consistent allocations, aborting.\n");
+			dev_err(dev, "Unable to obtain %u-bit DMA "
+				"for consistent allocations, aborting\n", 40);
 			goto err_out_release_regions;
 		}
 		using_dac = 1;
@@ -2277,8 +2350,7 @@
 		enic->bar[i].len = pci_resource_len(pdev, i);
 		enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
 		if (!enic->bar[i].vaddr) {
-			printk(KERN_ERR PFX
-				"Cannot memory-map BAR %d, aborting.\n", i);
+			dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
 			err = -ENODEV;
 			goto err_out_iounmap;
 		}
@@ -2291,8 +2363,7 @@
 	enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
 		ARRAY_SIZE(enic->bar));
 	if (!enic->vdev) {
-		printk(KERN_ERR PFX
-			"vNIC registration failed, aborting.\n");
+		dev_err(dev, "vNIC registration failed, aborting\n");
 		err = -ENODEV;
 		goto err_out_iounmap;
 	}
@@ -2302,8 +2373,7 @@
 
 	err = enic_dev_open(enic);
 	if (err) {
-		printk(KERN_ERR PFX
-			"vNIC dev open failed, aborting.\n");
+		dev_err(dev, "vNIC dev open failed, aborting\n");
 		goto err_out_vnic_unregister;
 	}
 
@@ -2317,23 +2387,31 @@
 
 	netif_carrier_off(netdev);
 
+	/* Do not call dev_init for a dynamic vnic.
+	 * For a dynamic vnic, init_prov_info will be
+	 * called later by an upper layer.
+	 */
+
 	if (!enic_is_dynamic(enic)) {
 		err = vnic_dev_init(enic->vdev, 0);
 		if (err) {
-			printk(KERN_ERR PFX
-				"vNIC dev init failed, aborting.\n");
+			dev_err(dev, "vNIC dev init failed, aborting\n");
 			goto err_out_dev_close;
 		}
 	}
 
+	/* Setup devcmd lock
+	 */
+
+	spin_lock_init(&enic->devcmd_lock);
+
 	err = enic_dev_init(enic);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Device initialization failed, aborting.\n");
+		dev_err(dev, "Device initialization failed, aborting\n");
 		goto err_out_dev_close;
 	}
 
-	/* Setup notification timer, HW reset task, and locks
+	/* Setup notification timer, HW reset task, and wq locks
 	 */
 
 	init_timer(&enic->notify_timer);
@@ -2345,8 +2423,6 @@
 	for (i = 0; i < enic->wq_count; i++)
 		spin_lock_init(&enic->wq_lock[i]);
 
-	spin_lock_init(&enic->devcmd_lock);
-
 	/* Register net device
 	 */
 
@@ -2355,8 +2431,7 @@
 
 	err = enic_set_mac_addr(netdev, enic->mac_addr);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Invalid MAC address, aborting.\n");
+		dev_err(dev, "Invalid MAC address, aborting\n");
 		goto err_out_dev_deinit;
 	}
 
@@ -2372,31 +2447,27 @@
 	netdev->ethtool_ops = &enic_ethtool_ops;
 
 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+	if (ENIC_SETTING(enic, LOOP)) {
+		netdev->features &= ~NETIF_F_HW_VLAN_TX;
+		enic->loop_enable = 1;
+		enic->loop_tag = enic->config.loop_tag;
+		dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
+	}
 	if (ENIC_SETTING(enic, TXCSUM))
 		netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
 	if (ENIC_SETTING(enic, TSO))
 		netdev->features |= NETIF_F_TSO |
 			NETIF_F_TSO6 | NETIF_F_TSO_ECN;
 	if (ENIC_SETTING(enic, LRO))
-		netdev->features |= NETIF_F_LRO;
+		netdev->features |= NETIF_F_GRO;
 	if (using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
 	enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
 
-	enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
-	enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC;
-	enic->lro_mgr.lro_arr = enic->lro_desc;
-	enic->lro_mgr.get_skb_header = enic_get_skb_header;
-	enic->lro_mgr.features	= LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
-	enic->lro_mgr.dev = netdev;
-	enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
-	enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
 	err = register_netdev(netdev);
 	if (err) {
-		printk(KERN_ERR PFX
-			"Cannot register net device, aborting.\n");
+		dev_err(dev, "Cannot register net device, aborting\n");
 		goto err_out_dev_deinit;
 	}
 
@@ -2450,7 +2521,7 @@
 
 static int __init enic_init_module(void)
 {
-	printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+	pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
 
 	return pci_register_driver(&enic_driver);
 }
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 9b18840..29ede8a 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -46,7 +46,8 @@
 
 	err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
 	if (err) {
-		printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
+		dev_err(enic_get_dev(enic),
+			"Error getting MAC addr, %d\n", err);
 		return err;
 	}
 
@@ -56,7 +57,7 @@
 			offsetof(struct vnic_enet_config, m), \
 			sizeof(c->m), &c->m); \
 		if (err) { \
-			printk(KERN_ERR PFX \
+			dev_err(enic_get_dev(enic), \
 				"Error getting %s, %d\n", #m, err); \
 			return err; \
 		} \
@@ -69,6 +70,7 @@
 	GET_CONFIG(intr_timer_type);
 	GET_CONFIG(intr_mode);
 	GET_CONFIG(intr_timer_usec);
+	GET_CONFIG(loop_tag);
 
 	c->wq_desc_count =
 		min_t(u32, ENIC_MAX_WQ_DESCS,
@@ -92,10 +94,10 @@
 		INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
 		c->intr_timer_usec);
 
-	printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
+	dev_info(enic_get_dev(enic), "vNIC MAC addr %pM wq/rq %d/%d\n",
 		enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
-	printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
-		"intr timer %d usec\n",
+	dev_info(enic_get_dev(enic), "vNIC mtu %d csum tx/rx %d/%d "
+		"tso/lro %d/%d intr timer %d usec\n",
 		c->mtu, ENIC_SETTING(enic, TXCSUM),
 		ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
 		ENIC_SETTING(enic, LRO), c->intr_timer_usec);
@@ -103,17 +105,7 @@
 	return 0;
 }
 
-void enic_add_multicast_addr(struct enic *enic, u8 *addr)
-{
-	vnic_dev_add_addr(enic->vdev, addr);
-}
-
-void enic_del_multicast_addr(struct enic *enic, u8 *addr)
-{
-	vnic_dev_del_addr(enic->vdev, addr);
-}
-
-void enic_add_vlan(struct enic *enic, u16 vlanid)
+int enic_add_vlan(struct enic *enic, u16 vlanid)
 {
 	u64 a0 = vlanid, a1 = 0;
 	int wait = 1000;
@@ -121,10 +113,12 @@
 
 	err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
 	if (err)
-		printk(KERN_ERR PFX "Can't add vlan id, %d\n", err);
+		dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err);
+
+	return err;
 }
 
-void enic_del_vlan(struct enic *enic, u16 vlanid)
+int enic_del_vlan(struct enic *enic, u16 vlanid)
 {
 	u64 a0 = vlanid, a1 = 0;
 	int wait = 1000;
@@ -132,7 +126,9 @@
 
 	err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
 	if (err)
-		printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err);
+		dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err);
+
+	return err;
 }
 
 int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
@@ -198,8 +194,8 @@
 		vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL),
 		ENIC_INTR_MAX);
 
-	printk(KERN_INFO PFX "vNIC resources avail: "
-		"wq %d rq %d cq %d intr %d\n",
+	dev_info(enic_get_dev(enic),
+		"vNIC resources avail: wq %d rq %d cq %d intr %d\n",
 		enic->wq_count, enic->rq_count,
 		enic->cq_count, enic->intr_count);
 }
@@ -304,11 +300,6 @@
 			enic->config.intr_timer_type,
 			mask_on_assertion);
 	}
-
-	/* Clear LIF stats
-	 */
-
-	vnic_dev_stats_clear(enic->vdev);
 }
 
 int enic_alloc_vnic_resources(struct enic *enic)
@@ -319,15 +310,14 @@
 
 	intr_mode = vnic_dev_get_intr_mode(enic->vdev);
 
-	printk(KERN_INFO PFX "vNIC resources used:  "
+	dev_info(enic_get_dev(enic), "vNIC resources used:  "
 		"wq %d rq %d cq %d intr %d intr mode %s\n",
 		enic->wq_count, enic->rq_count,
 		enic->cq_count, enic->intr_count,
 		intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
 		intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
 		intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
-		"unknown"
-		);
+		"unknown");
 
 	/* Allocate queue resources
 	 */
@@ -373,7 +363,8 @@
 	enic->legacy_pba = vnic_dev_get_res(enic->vdev,
 		RES_TYPE_INTR_PBA_LEGACY, 0);
 	if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
-		printk(KERN_ERR PFX "Failed to hook legacy pba resource\n");
+		dev_err(enic_get_dev(enic),
+			"Failed to hook legacy pba resource\n");
 		err = -ENODEV;
 		goto err_out_cleanup;
 	}
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 494664f..83bd172 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -43,7 +43,7 @@
 	void *os_buf, dma_addr_t dma_addr, unsigned int len,
 	unsigned int mss_or_csum_offset, unsigned int hdr_len,
 	int vlan_tag_insert, unsigned int vlan_tag,
-	int offload_mode, int cq_entry, int sop, int eop)
+	int offload_mode, int cq_entry, int sop, int eop, int loopback)
 {
 	struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
 
@@ -56,61 +56,62 @@
 		0, /* fcoe_encap */
 		(u8)vlan_tag_insert,
 		(u16)vlan_tag,
-		0 /* loopback */);
+		(u8)loopback);
 
 	vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
 }
 
 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
-	void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop)
+	void *os_buf, dma_addr_t dma_addr, unsigned int len,
+	int eop, int loopback)
 {
 	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
 		0, 0, 0, 0, 0,
-		eop, 0 /* !SOP */, eop);
+		eop, 0 /* !SOP */, eop, loopback);
 }
 
 static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
 	dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
-	unsigned int vlan_tag, int eop)
+	unsigned int vlan_tag, int eop, int loopback)
 {
 	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
 		0, 0, vlan_tag_insert, vlan_tag,
 		WQ_ENET_OFFLOAD_MODE_CSUM,
-		eop, 1 /* SOP */, eop);
+		eop, 1 /* SOP */, eop, loopback);
 }
 
 static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
 	void *os_buf, dma_addr_t dma_addr, unsigned int len,
 	int ip_csum, int tcpudp_csum, int vlan_tag_insert,
-	unsigned int vlan_tag, int eop)
+	unsigned int vlan_tag, int eop, int loopback)
 {
 	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
 		(ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
 		0, vlan_tag_insert, vlan_tag,
 		WQ_ENET_OFFLOAD_MODE_CSUM,
-		eop, 1 /* SOP */, eop);
+		eop, 1 /* SOP */, eop, loopback);
 }
 
 static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
 	void *os_buf, dma_addr_t dma_addr, unsigned int len,
 	unsigned int csum_offset, unsigned int hdr_len,
-	int vlan_tag_insert, unsigned int vlan_tag, int eop)
+	int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
 {
 	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
 		csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
 		WQ_ENET_OFFLOAD_MODE_CSUM_L4,
-		eop, 1 /* SOP */, eop);
+		eop, 1 /* SOP */, eop, loopback);
 }
 
 static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
 	void *os_buf, dma_addr_t dma_addr, unsigned int len,
 	unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
-	unsigned int vlan_tag, int eop)
+	unsigned int vlan_tag, int eop, int loopback)
 {
 	enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
 		mss, hdr_len, vlan_tag_insert, vlan_tag,
 		WQ_ENET_OFFLOAD_MODE_TSO,
-		eop, 1 /* SOP */, eop);
+		eop, 1 /* SOP */, eop, loopback);
 }
 
 static inline void enic_queue_rq_desc(struct vnic_rq *rq,
@@ -131,10 +132,8 @@
 struct enic;
 
 int enic_get_vnic_config(struct enic *);
-void enic_add_multicast_addr(struct enic *enic, u8 *addr);
-void enic_del_multicast_addr(struct enic *enic, u8 *addr);
-void enic_add_vlan(struct enic *enic, u16 vlanid);
-void enic_del_vlan(struct enic *enic, u16 vlanid);
+int enic_add_vlan(struct enic *enic, u16 vlanid);
+int enic_del_vlan(struct enic *enic, u16 vlanid);
 int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
 	u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
 	u8 ig_vlan_strip_en);
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h
index a06e649..e6dd309 100644
--- a/drivers/net/enic/rq_enet_desc.h
+++ b/drivers/net/enic/rq_enet_desc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
index 020ae6c..b86d6ef 100644
--- a/drivers/net/enic/vnic_cq.c
+++ b/drivers/net/enic/vnic_cq.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -42,7 +42,7 @@
 
 	cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
 	if (!cq->ctrl) {
-		printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
+		pr_err("Failed to hook CQ[%d] resource\n", index);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
index 114763c..552d3da 100644
--- a/drivers/net/enic/vnic_cq.h
+++ b/drivers/net/enic/vnic_cq.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index e0d3328..6a5b578 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -23,21 +23,23 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/if_ether.h>
-#include <linux/slab.h>
 
 #include "vnic_resource.h"
 #include "vnic_devcmd.h"
 #include "vnic_dev.h"
 #include "vnic_stats.h"
 
+enum vnic_proxy_type {
+	PROXY_NONE,
+	PROXY_BY_BDF,
+};
+
 struct vnic_res {
 	void __iomem *vaddr;
 	dma_addr_t bus_addr;
 	unsigned int count;
 };
 
-#define VNIC_DEV_CAP_INIT	0x0001
-
 struct vnic_dev {
 	void *priv;
 	struct pci_dev *pdev;
@@ -48,13 +50,14 @@
 	struct vnic_devcmd_notify notify_copy;
 	dma_addr_t notify_pa;
 	u32 notify_sz;
-	u32 *linkstatus;
 	dma_addr_t linkstatus_pa;
 	struct vnic_stats *stats;
 	dma_addr_t stats_pa;
 	struct vnic_devcmd_fw_info *fw_info;
 	dma_addr_t fw_info_pa;
-	u32 cap_flags;
+	enum vnic_proxy_type proxy;
+	u32 proxy_index;
+	u64 args[VNIC_DEVCMD_NARGS];
 };
 
 #define VNIC_MAX_RES_HDR_SIZE \
@@ -78,19 +81,19 @@
 		return -EINVAL;
 
 	if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
-		printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
+		pr_err("vNIC BAR0 res hdr length error\n");
 		return -EINVAL;
 	}
 
 	rh = bar->vaddr;
 	if (!rh) {
-		printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
+		pr_err("vNIC BAR0 res hdr not mem-mapped\n");
 		return -EINVAL;
 	}
 
 	if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
 	    ioread32(&rh->version) != VNIC_RES_VERSION) {
-		printk(KERN_ERR "vNIC BAR0 res magic/version error "
+		pr_err("vNIC BAR0 res magic/version error "
 			"exp (%lx/%lx) curr (%x/%x)\n",
 			VNIC_RES_MAGIC, VNIC_RES_VERSION,
 			ioread32(&rh->magic), ioread32(&rh->version));
@@ -122,7 +125,7 @@
 			/* each count is stride bytes long */
 			len = count * VNIC_RES_STRIDE;
 			if (len + bar_offset > bar[bar_num].len) {
-				printk(KERN_ERR "vNIC BAR0 resource %d "
+				pr_err("vNIC BAR0 resource %d "
 					"out-of-bounds, offset 0x%x + "
 					"size 0x%x > bar len 0x%lx\n",
 					type, bar_offset,
@@ -229,8 +232,7 @@
 		&ring->base_addr_unaligned);
 
 	if (!ring->descs_unaligned) {
-		printk(KERN_ERR
-		  "Failed to allocate ring (size=%d), aborting\n",
+		pr_err("Failed to allocate ring (size=%d), aborting\n",
 			(int)ring->size);
 		return -ENOMEM;
 	}
@@ -258,23 +260,28 @@
 	}
 }
 
-int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
-	u64 *a0, u64 *a1, int wait)
+static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+	int wait)
 {
 	struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+	unsigned int i;
 	int delay;
 	u32 status;
 	int err;
 
 	status = ioread32(&devcmd->status);
+	if (status == 0xFFFFFFFF) {
+		/* PCI-e target device is gone */
+		return -ENODEV;
+	}
 	if (status & STAT_BUSY) {
-		printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
+		pr_err("Busy devcmd %d\n", _CMD_N(cmd));
 		return -EBUSY;
 	}
 
 	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
-		writeq(*a0, &devcmd->args[0]);
-		writeq(*a1, &devcmd->args[1]);
+		for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+			writeq(vdev->args[i], &devcmd->args[i]);
 		wmb();
 	}
 
@@ -288,31 +295,110 @@
 		udelay(100);
 
 		status = ioread32(&devcmd->status);
+		if (status == 0xFFFFFFFF) {
+			/* PCI-e target device is gone */
+			return -ENODEV;
+		}
+
 		if (!(status & STAT_BUSY)) {
 
 			if (status & STAT_ERROR) {
 				err = (int)readq(&devcmd->args[0]);
 				if (err != ERR_ECMDUNKNOWN ||
 				    cmd != CMD_CAPABILITY)
-					printk(KERN_ERR "Error %d devcmd %d\n",
+					pr_err("Error %d devcmd %d\n",
 						err, _CMD_N(cmd));
 				return err;
 			}
 
 			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
 				rmb();
-				*a0 = readq(&devcmd->args[0]);
-				*a1 = readq(&devcmd->args[1]);
+				for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+					vdev->args[i] = readq(&devcmd->args[i]);
 			}
 
 			return 0;
 		}
 	}
 
-	printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
+	pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
 	return -ETIMEDOUT;
 }
 
+static int vnic_dev_cmd_proxy_by_bdf(struct vnic_dev *vdev,
+	enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+	u32 status;
+	int err;
+
+	memset(vdev->args, 0, sizeof(vdev->args));
+
+	vdev->args[0] = vdev->proxy_index; /* bdf */
+	vdev->args[1] = cmd;
+	vdev->args[2] = *a0;
+	vdev->args[3] = *a1;
+
+	err = _vnic_dev_cmd(vdev, CMD_PROXY_BY_BDF, wait);
+	if (err)
+		return err;
+
+	status = (u32)vdev->args[0];
+	if (status & STAT_ERROR) {
+		err = (int)vdev->args[1];
+		if (err != ERR_ECMDUNKNOWN ||
+		    cmd != CMD_CAPABILITY)
+			pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+		return err;
+	}
+
+	*a0 = vdev->args[1];
+	*a1 = vdev->args[2];
+
+	return 0;
+}
+
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+	enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+	int err;
+
+	vdev->args[0] = *a0;
+	vdev->args[1] = *a1;
+
+	err = _vnic_dev_cmd(vdev, cmd, wait);
+
+	*a0 = vdev->args[0];
+	*a1 = vdev->args[1];
+
+	return err;
+}
+
+void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf)
+{
+	vdev->proxy = PROXY_BY_BDF;
+	vdev->proxy_index = bdf;
+}
+
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
+{
+	vdev->proxy = PROXY_NONE;
+	vdev->proxy_index = 0;
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+	u64 *a0, u64 *a1, int wait)
+{
+	memset(vdev->args, 0, sizeof(vdev->args));
+
+	switch (vdev->proxy) {
+	case PROXY_BY_BDF:
+		return vnic_dev_cmd_proxy_by_bdf(vdev, cmd, a0, a1, wait);
+	case PROXY_NONE:
+	default:
+		return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
+	}
+}
+
 static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
 {
 	u64 a0 = (u32)cmd, a1 = 0;
@@ -431,6 +517,19 @@
 	return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
 }
 
+int vnic_dev_enable_wait(struct vnic_dev *vdev)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
+	if (err == ERR_ECMDUNKNOWN)
+		return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+
+	return err;
+}
+
 int vnic_dev_disable(struct vnic_dev *vdev)
 {
 	u64 a0 = 0, a1 = 0;
@@ -486,6 +585,44 @@
 	return 0;
 }
 
+int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
+{
+	u64 a0 = (u32)arg, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait);
+	if (err == ERR_ECMDUNKNOWN) {
+		err = vnic_dev_soft_reset(vdev, arg);
+		if (err)
+			return err;
+
+		return vnic_dev_init(vdev, 0);
+	}
+
+	return err;
+}
+
+int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
+{
+	u64 a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	*done = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait);
+	if (err) {
+		if (err == ERR_ECMDUNKNOWN)
+			return vnic_dev_soft_reset_done(vdev, done);
+		return err;
+	}
+
+	*done = (a0 == 0);
+
+	return 0;
+}
+
 int vnic_dev_hang_notify(struct vnic_dev *vdev)
 {
 	u64 a0, a1;
@@ -512,7 +649,7 @@
 	return 0;
 }
 
-void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
 	int broadcast, int promisc, int allmulti)
 {
 	u64 a0, a1 = 0;
@@ -527,7 +664,29 @@
 
 	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
 	if (err)
-		printk(KERN_ERR "Can't set packet filter\n");
+		pr_err("Can't set packet filter\n");
+
+	return err;
+}
+
+int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
+	int multicast, int broadcast, int promisc, int allmulti)
+{
+	u64 a0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+	     (multicast ? CMD_PFILTER_MULTICAST : 0) |
+	     (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+	     (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+	     (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait);
+	if (err)
+		pr_err("Can't set packet filter\n");
+
+	return err;
 }
 
 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
@@ -542,7 +701,7 @@
 
 	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
 	if (err)
-		printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err);
+		pr_err("Can't add addr [%pM], %d\n", addr, err);
 
 	return err;
 }
@@ -559,7 +718,21 @@
 
 	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
 	if (err)
-		printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err);
+		pr_err("Can't del addr [%pM], %d\n", addr, err);
+
+	return err;
+}
+
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+	u8 ig_vlan_rewrite_mode)
+{
+	u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait);
+	if (err == ERR_ECMDUNKNOWN)
+		return 0;
 
 	return err;
 }
@@ -572,8 +745,7 @@
 
 	err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait);
 	if (err)
-		printk(KERN_ERR "Failed to raise INTR[%d], err %d\n",
-			intr, err);
+		pr_err("Failed to raise INTR[%d], err %d\n", intr, err);
 
 	return err;
 }
@@ -604,8 +776,7 @@
 	dma_addr_t notify_pa;
 
 	if (vdev->notify || vdev->notify_pa) {
-		printk(KERN_ERR "notify block %p still allocated",
-			vdev->notify);
+		pr_err("notify block %p still allocated", vdev->notify);
 		return -EINVAL;
 	}
 
@@ -618,22 +789,25 @@
 	return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
 }
 
-void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
 {
 	u64 a0, a1;
 	int wait = 1000;
+	int err;
 
 	a0 = 0;  /* paddr = 0 to unset notify buffer */
 	a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
 	a1 += sizeof(struct vnic_devcmd_notify);
 
-	vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+	err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
 	vdev->notify = NULL;
 	vdev->notify_pa = 0;
 	vdev->notify_sz = 0;
+
+	return err;
 }
 
-void vnic_dev_notify_unset(struct vnic_dev *vdev)
+int vnic_dev_notify_unset(struct vnic_dev *vdev)
 {
 	if (vdev->notify) {
 		pci_free_consistent(vdev->pdev,
@@ -642,7 +816,7 @@
 			vdev->notify_pa);
 	}
 
-	vnic_dev_notify_unsetcmd(vdev);
+	return vnic_dev_notify_unsetcmd(vdev);
 }
 
 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
@@ -672,13 +846,14 @@
 	int wait = 1000;
 	int r = 0;
 
-	if (vdev->cap_flags & VNIC_DEV_CAP_INIT)
+	if (vnic_dev_capable(vdev, CMD_INIT))
 		r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
 	else {
 		vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
 		if (a0 & CMD_INITF_DEFAULT_MAC) {
-			// Emulate these for old CMD_INIT_v1 which
-			// didn't pass a0 so no CMD_INITF_*.
+			/* Emulate these for old CMD_INIT_v1 which
+			 * didn't pass a0 so no CMD_INITF_*.
+			 */
 			vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
 			vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
 		}
@@ -700,7 +875,7 @@
 
 	*done = (a0 == 0);
 
-	*err = (a0 == 0) ? a1 : 0;
+	*err = (a0 == 0) ? (int)a1:0;
 
 	return 0;
 }
@@ -738,9 +913,6 @@
 
 int vnic_dev_link_status(struct vnic_dev *vdev)
 {
-	if (vdev->linkstatus)
-		return *vdev->linkstatus;
-
 	if (!vnic_dev_notify_ready(vdev))
 		return 0;
 
@@ -787,6 +959,14 @@
 	return vdev->notify_copy.status;
 }
 
+u32 vnic_dev_uif(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.uif;
+}
+
 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
 	enum vnic_dev_intr_mode intr_mode)
 {
@@ -807,14 +987,9 @@
 				sizeof(struct vnic_devcmd_notify),
 				vdev->notify,
 				vdev->notify_pa);
-		if (vdev->linkstatus)
-			pci_free_consistent(vdev->pdev,
-				sizeof(u32),
-				vdev->linkstatus,
-				vdev->linkstatus_pa);
 		if (vdev->stats)
 			pci_free_consistent(vdev->pdev,
-				sizeof(struct vnic_dev),
+				sizeof(struct vnic_stats),
 				vdev->stats, vdev->stats_pa);
 		if (vdev->fw_info)
 			pci_free_consistent(vdev->pdev,
@@ -844,11 +1019,6 @@
 	if (!vdev->devcmd)
 		goto err_out;
 
-	vdev->cap_flags = 0;
-
-	if (vnic_dev_capable(vdev, CMD_INIT))
-		vdev->cap_flags |= VNIC_DEV_CAP_INIT;
-
 	return vdev;
 
 err_out:
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index caccce3..3a61873 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -41,6 +41,9 @@
 }
 #endif
 
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 enum vnic_dev_hw_version {
 	VNIC_DEV_HW_VER_UNKNOWN,
 	VNIC_DEV_HW_VER_A1,
@@ -92,6 +95,8 @@
 	struct vnic_dev_ring *ring);
 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
 	u64 *a0, u64 *a1, int wait);
+void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
 int vnic_dev_fw_info(struct vnic_dev *vdev,
 	struct vnic_devcmd_fw_info **fw_info);
 int vnic_dev_hw_version(struct vnic_dev *vdev,
@@ -101,8 +106,10 @@
 int vnic_dev_stats_clear(struct vnic_dev *vdev);
 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
 int vnic_dev_hang_notify(struct vnic_dev *vdev);
-void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
 	int broadcast, int promisc, int allmulti);
+int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
+	int multicast, int broadcast, int promisc, int allmulti);
 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
 int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
@@ -110,16 +117,18 @@
 int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
 	void *notify_addr, dma_addr_t notify_pa, u16 intr);
 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
-void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
-void vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
+int vnic_dev_notify_unset(struct vnic_dev *vdev);
 int vnic_dev_link_status(struct vnic_dev *vdev);
 u32 vnic_dev_port_speed(struct vnic_dev *vdev);
 u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
 u32 vnic_dev_mtu(struct vnic_dev *vdev);
 u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
 u32 vnic_dev_notify_status(struct vnic_dev *vdev);
+u32 vnic_dev_uif(struct vnic_dev *vdev);
 int vnic_dev_close(struct vnic_dev *vdev);
 int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_enable_wait(struct vnic_dev *vdev);
 int vnic_dev_disable(struct vnic_dev *vdev);
 int vnic_dev_open(struct vnic_dev *vdev, int arg);
 int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
@@ -129,10 +138,14 @@
 int vnic_dev_deinit(struct vnic_dev *vdev);
 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
 	enum vnic_dev_intr_mode intr_mode);
 enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
 void vnic_dev_unregister(struct vnic_dev *vdev);
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+	u8 ig_vlan_rewrite_mode);
 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
 	void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
 	unsigned int num_bars);
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index d78bbcc..2066175 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -98,6 +98,9 @@
 	/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
 	CMD_PACKET_FILTER	= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
 
+	/* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */
+	CMD_PACKET_FILTER_ALL   = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
+
 	/* hang detection notification */
 	CMD_HANG_NOTIFY         = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
 
@@ -171,6 +174,9 @@
 	/* enable virtual link */
 	CMD_ENABLE		= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
 
+	/* enable virtual link, waiting variant. */
+	CMD_ENABLE_WAIT		= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
 	/* disable virtual link */
 	CMD_DISABLE		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
 
@@ -211,6 +217,27 @@
 	 * in: (u16)a0=interrupt number to assert
 	 */
 	CMD_IAR			= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+	/* initiate hangreset, like softreset after hang detected */
+	CMD_HANG_RESET		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+	/* hangreset status:
+	 *    out: a0=0 reset complete, a0=1 reset in progress */
+	CMD_HANG_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+	/*
+	 * Set hw ingress packet vlan rewrite mode:
+	 * in:  (u32)a0=new vlan rewrite mode
+	 * out: (u32)a0=old vlan rewrite mode */
+	CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+	/*
+	 * in:  (u16)a0=bdf of target vnic
+	 *      (u32)a1=cmd to proxy
+	 *      a2-a15=args to cmd in a1
+	 * out: (u32)a0=status of proxied cmd
+	 *      a1-a15=out args of proxied cmd */
+	CMD_PROXY_BY_BDF =	_CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
 };
 
 /* flags for CMD_OPEN */
@@ -226,6 +253,12 @@
 #define CMD_PFILTER_PROMISCUOUS		0x08
 #define CMD_PFILTER_ALL_MULTICAST	0x10
 
+/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
+#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK              0
+#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN         1
+#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN  2
+#define IG_VLAN_REWRITE_MODE_PASS_THRU                  3
+
 enum vnic_devcmd_status {
 	STAT_NONE = 0,
 	STAT_BUSY = 1 << 0,	/* cmd in progress */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 8eeb675..3b32912 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -35,6 +35,7 @@
 	u8 intr_mode;
 	char devname[16];
 	u32 intr_timer_usec;
+	u16 loop_tag;
 };
 
 #define VENETF_TSO		0x1	/* TSO enabled */
@@ -48,5 +49,6 @@
 #define VENETF_RSSHASH_TCPIPV6	0x100	/* Hash on TCP + IPv6 fields */
 #define VENETF_RSSHASH_IPV6_EX	0x200	/* Hash on IPv6 extended fields */
 #define VENETF_RSSHASH_TCPIPV6_EX 0x400	/* Hash on TCP + IPv6 ext. fields */
+#define VENETF_LOOP		0x800	/* Loopback enabled */
 
 #endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 3934309..52ab61a 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -39,8 +39,7 @@
 
 	intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
 	if (!intr->ctrl) {
-		printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
-			index);
+		pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 2fe6c63..09dc0b7 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -61,7 +61,11 @@
 static inline void vnic_intr_mask(struct vnic_intr *intr)
 {
 	iowrite32(1, &intr->ctrl->mask);
-	(void)ioread32(&intr->ctrl->mask);
+}
+
+static inline int vnic_intr_masked(struct vnic_intr *intr)
+{
+	return ioread32(&intr->ctrl->mask);
 }
 
 static inline void vnic_intr_return_credits(struct vnic_intr *intr,
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index cf80ab4..995a50d 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
index b61c22a..810287b 100644
--- a/drivers/net/enic/vnic_resource.h
+++ b/drivers/net/enic/vnic_resource.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index cc580cf..dbb2aca 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -37,23 +37,23 @@
 	vdev = rq->vdev;
 
 	for (i = 0; i < blks; i++) {
-		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
+		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
 		if (!rq->bufs[i]) {
-			printk(KERN_ERR "Failed to alloc rq_bufs\n");
+			pr_err("Failed to alloc rq_bufs\n");
 			return -ENOMEM;
 		}
 	}
 
 	for (i = 0; i < blks; i++) {
 		buf = rq->bufs[i];
-		for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
-			buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
+		for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
+			buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
 			buf->desc = (u8 *)rq->ring.descs +
 				rq->ring.desc_size * buf->index;
 			if (buf->index + 1 == count) {
 				buf->next = rq->bufs[0];
 				break;
-			} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
+			} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
 				buf->next = rq->bufs[i + 1];
 			} else {
 				buf->next = buf + 1;
@@ -94,7 +94,7 @@
 
 	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
 	if (!rq->ctrl) {
-		printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
+		pr_err("Failed to hook RQ[%d] resource\n", index);
 		return -EINVAL;
 	}
 
@@ -119,10 +119,11 @@
 	unsigned int error_interrupt_offset)
 {
 	u64 paddr;
+	unsigned int count = rq->ring.desc_count;
 
 	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
 	writeq(paddr, &rq->ctrl->ring_base);
-	iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
+	iowrite32(count, &rq->ctrl->ring_size);
 	iowrite32(cq_index, &rq->ctrl->cq_index);
 	iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
 	iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
@@ -132,8 +133,8 @@
 	iowrite32(posted_index, &rq->ctrl->posted_index);
 
 	rq->to_use = rq->to_clean =
-		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
-			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
 }
 
 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
@@ -145,6 +146,11 @@
 	/* Use current fetch_index as the ring starting point */
 	fetch_index = ioread32(&rq->ctrl->fetch_index);
 
+	if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
+		/* Hardware surprise removal: reset fetch_index */
+		fetch_index = 0;
+	}
+
 	vnic_rq_init_start(rq, cq_index,
 		fetch_index, fetch_index,
 		error_interrupt_enable,
@@ -174,7 +180,7 @@
 		udelay(10);
 	}
 
-	printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
+	pr_err("Failed to disable RQ[%d]\n", rq->index);
 
 	return -ETIMEDOUT;
 }
@@ -184,8 +190,7 @@
 {
 	struct vnic_rq_buf *buf;
 	u32 fetch_index;
-
-	BUG_ON(ioread32(&rq->ctrl->enable));
+	unsigned int count = rq->ring.desc_count;
 
 	buf = rq->to_clean;
 
@@ -199,9 +204,14 @@
 
 	/* Use current fetch_index as the ring starting point */
 	fetch_index = ioread32(&rq->ctrl->fetch_index);
+
+	if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
+		/* Hardware surprise removal: reset fetch_index */
+		fetch_index = 0;
+	}
 	rq->to_use = rq->to_clean =
-		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
-			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
 	iowrite32(fetch_index, &rq->ctrl->posted_index);
 
 	vnic_dev_clear_desc_ring(&rq->ring);
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 35e736c..2dc48f9 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008, 2009 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -52,12 +52,16 @@
 	u32 pad10;
 };
 
-/* Break the vnic_rq_buf allocations into blocks of 64 entries */
-#define VNIC_RQ_BUF_BLK_ENTRIES 64
-#define VNIC_RQ_BUF_BLK_SZ \
-	(VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
+/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
+#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
+#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
+	((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
+	VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
+#define VNIC_RQ_BUF_BLK_SZ(entries) \
+	(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
-	DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
+	DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
 
 struct vnic_rq_buf {
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
index 5fbb3c9..f62d187 100644
--- a/drivers/net/enic/vnic_rss.h
+++ b/drivers/net/enic/vnic_rss.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h
index 9ff9614..77750ec 100644
--- a/drivers/net/enic/vnic_stats.h
+++ b/drivers/net/enic/vnic_stats.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index d769772..197c9d2 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -25,9 +25,13 @@
 
 struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
 {
-	struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+	struct vic_provinfo *vp;
 
-	if (!vp || !oui)
+	if (!oui)
+		return NULL;
+
+	vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+	if (!vp)
 		return NULL;
 
 	memcpy(vp->oui, oui, sizeof(vp->oui));
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index 085c2a2..7e46e5e 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -44,7 +44,7 @@
 		u16 length;
 		u8 value[0];
 	} tlv[0];
-} __attribute__ ((packed));
+} __packed;
 
 #define VIC_PROVINFO_MAX_DATA		1385
 #define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index 1378afb..122e33b 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -37,23 +37,23 @@
 	vdev = wq->vdev;
 
 	for (i = 0; i < blks; i++) {
-		wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
+		wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
 		if (!wq->bufs[i]) {
-			printk(KERN_ERR "Failed to alloc wq_bufs\n");
+			pr_err("Failed to alloc wq_bufs\n");
 			return -ENOMEM;
 		}
 	}
 
 	for (i = 0; i < blks; i++) {
 		buf = wq->bufs[i];
-		for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
-			buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
+		for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
+			buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
 			buf->desc = (u8 *)wq->ring.descs +
 				wq->ring.desc_size * buf->index;
 			if (buf->index + 1 == count) {
 				buf->next = wq->bufs[0];
 				break;
-			} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
+			} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
 				buf->next = wq->bufs[i + 1];
 			} else {
 				buf->next = buf + 1;
@@ -94,7 +94,7 @@
 
 	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
 	if (!wq->ctrl) {
-		printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
+		pr_err("Failed to hook WQ[%d] resource\n", index);
 		return -EINVAL;
 	}
 
@@ -119,10 +119,11 @@
 	unsigned int error_interrupt_offset)
 {
 	u64 paddr;
+	unsigned int count = wq->ring.desc_count;
 
 	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
 	writeq(paddr, &wq->ctrl->ring_base);
-	iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+	iowrite32(count, &wq->ctrl->ring_size);
 	iowrite32(fetch_index, &wq->ctrl->fetch_index);
 	iowrite32(posted_index, &wq->ctrl->posted_index);
 	iowrite32(cq_index, &wq->ctrl->cq_index);
@@ -131,8 +132,8 @@
 	iowrite32(0, &wq->ctrl->error_status);
 
 	wq->to_use = wq->to_clean =
-		&wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES]
-			[fetch_index % VNIC_WQ_BUF_BLK_ENTRIES];
+		&wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
+			[fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
 }
 
 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
@@ -167,7 +168,7 @@
 		udelay(10);
 	}
 
-	printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
+	pr_err("Failed to disable WQ[%d]\n", wq->index);
 
 	return -ETIMEDOUT;
 }
@@ -177,8 +178,6 @@
 {
 	struct vnic_wq_buf *buf;
 
-	BUG_ON(ioread32(&wq->ctrl->enable));
-
 	buf = wq->to_clean;
 
 	while (vnic_wq_desc_used(wq) > 0) {
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
index 9c34d41..94ac462 100644
--- a/drivers/net/enic/vnic_wq.h
+++ b/drivers/net/enic/vnic_wq.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
@@ -60,12 +60,16 @@
 	void *desc;
 };
 
-/* Break the vnic_wq_buf allocations into blocks of 64 entries */
-#define VNIC_WQ_BUF_BLK_ENTRIES 64
-#define VNIC_WQ_BUF_BLK_SZ \
-	(VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
+/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
+#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
+#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
+	((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
+	VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
+#define VNIC_WQ_BUF_BLK_SZ(entries) \
+	(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
 #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
-	DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
+	DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
 #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
 
 struct vnic_wq {
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h
index 483596c..c7021e3 100644
--- a/drivers/net/enic/wq_enet_desc.h
+++ b/drivers/net/enic/wq_enet_desc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  *
  * This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 4c27465..57c8ac0 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -278,7 +278,6 @@
 	struct pci_dev *pci_dev;			/* PCI bus location. */
 	int chip_id, chip_flags;
 
-	struct net_device_stats stats;
 	struct timer_list timer;			/* Media selection timer. */
 	int tx_threshold;
 	unsigned char mc_filter[8];
@@ -770,7 +769,6 @@
 static void epic_pause(struct net_device *dev)
 {
 	long ioaddr = dev->base_addr;
-	struct epic_private *ep = netdev_priv(dev);
 
 	netif_stop_queue (dev);
 
@@ -781,9 +779,9 @@
 
 	/* Update the error counts. */
 	if (inw(ioaddr + COMMAND) != 0xffff) {
-		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
 	}
 
 	/* Remove the packets on the Rx queue. */
@@ -900,7 +898,7 @@
 		}
 	}
 	if (inw(ioaddr + TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
-		ep->stats.tx_fifo_errors++;
+		dev->stats.tx_fifo_errors++;
 		outl(RestartTx, ioaddr + COMMAND);
 	} else {
 		epic_restart(dev);
@@ -908,7 +906,7 @@
 	}
 
 	dev->trans_start = jiffies; /* prevent tx timeout */
-	ep->stats.tx_errors++;
+	dev->stats.tx_errors++;
 	if (!ep->tx_full)
 		netif_wake_queue(dev);
 }
@@ -1016,7 +1014,7 @@
 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
 			  int status)
 {
-	struct net_device_stats *stats = &ep->stats;
+	struct net_device_stats *stats = &dev->stats;
 
 #ifndef final_version
 	/* There was an major error, log it. */
@@ -1053,9 +1051,9 @@
 			break;	/* It still hasn't been Txed */
 
 		if (likely(txstatus & 0x0001)) {
-			ep->stats.collisions += (txstatus >> 8) & 15;
-			ep->stats.tx_packets++;
-			ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
+			dev->stats.collisions += (txstatus >> 8) & 15;
+			dev->stats.tx_packets++;
+			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
 		} else
 			epic_tx_error(dev, ep, txstatus);
 
@@ -1125,12 +1123,12 @@
 			goto out;
 
 		/* Always update the error counts to avoid overhead later. */
-		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
 
 		if (status & TxUnderrun) { /* Tx FIFO underflow. */
-			ep->stats.tx_fifo_errors++;
+			dev->stats.tx_fifo_errors++;
 			outl(ep->tx_threshold += 128, ioaddr + TxThresh);
 			/* Restart the transmit process. */
 			outl(RestartTx, ioaddr + COMMAND);
@@ -1183,10 +1181,10 @@
 			if (status & 0x2000) {
 				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
 					   "multiple buffers, status %4.4x!\n", dev->name, status);
-				ep->stats.rx_length_errors++;
+				dev->stats.rx_length_errors++;
 			} else if (status & 0x0006)
 				/* Rx Frame errors are counted in hardware. */
-				ep->stats.rx_errors++;
+				dev->stats.rx_errors++;
 		} else {
 			/* Malloc up new buffer, compatible with net-2e. */
 			/* Omit the four octet CRC from the length. */
@@ -1223,8 +1221,8 @@
 			}
 			skb->protocol = eth_type_trans(skb, dev);
 			netif_receive_skb(skb);
-			ep->stats.rx_packets++;
-			ep->stats.rx_bytes += pkt_len;
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += pkt_len;
 		}
 		work_done++;
 		entry = (++ep->cur_rx) % RX_RING_SIZE;
@@ -1259,7 +1257,7 @@
 	if (status == EpicRemoved)
 		return;
 	if (status & RxOverflow) 	/* Missed a Rx frame. */
-		ep->stats.rx_errors++;
+		dev->stats.rx_errors++;
 	if (status & (RxOverflow | RxFull))
 		outw(RxQueued, ioaddr + COMMAND);
 }
@@ -1357,17 +1355,16 @@
 
 static struct net_device_stats *epic_get_stats(struct net_device *dev)
 {
-	struct epic_private *ep = netdev_priv(dev);
 	long ioaddr = dev->base_addr;
 
 	if (netif_running(dev)) {
 		/* Update the error counts. */
-		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
 	}
 
-	return &ep->stats;
+	return &dev->stats;
 }
 
 /* Set or clear the multicast filter for this adaptor.
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 6ed2df1..db519a8 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -180,9 +180,9 @@
  * @dty_tx:	last buffer actually sent
  * @num_rx:	number of receive buffers
  * @cur_rx:	current receive buffer
+ * @vma:        pointer to array of virtual memory addresses for buffers
  * @netdev:	pointer to network device structure
  * @napi:	NAPI structure
- * @stats:	network device statistics
  * @msg_enable:	device state flags
  * @rx_lock:	receive lock
  * @lock:	device lock
@@ -203,9 +203,10 @@
 	unsigned int num_rx;
 	unsigned int cur_rx;
 
+	void** vma;
+
 	struct net_device *netdev;
 	struct napi_struct napi;
-	struct net_device_stats stats;
 	u32 msg_enable;
 
 	spinlock_t rx_lock;
@@ -285,18 +286,22 @@
 	ethoc_write(dev, MODER, mode);
 }
 
-static int ethoc_init_ring(struct ethoc *dev)
+static int ethoc_init_ring(struct ethoc *dev, void* mem_start)
 {
 	struct ethoc_bd bd;
 	int i;
+	void* vma;
 
 	dev->cur_tx = 0;
 	dev->dty_tx = 0;
 	dev->cur_rx = 0;
 
+	ethoc_write(dev, TX_BD_NUM, dev->num_tx);
+
 	/* setup transmission buffers */
-	bd.addr = virt_to_phys(dev->membase);
+	bd.addr = mem_start;
 	bd.stat = TX_BD_IRQ | TX_BD_CRC;
+	vma = dev->membase;
 
 	for (i = 0; i < dev->num_tx; i++) {
 		if (i == dev->num_tx - 1)
@@ -304,6 +309,9 @@
 
 		ethoc_write_bd(dev, i, &bd);
 		bd.addr += ETHOC_BUFSIZ;
+
+		dev->vma[i] = vma;
+		vma += ETHOC_BUFSIZ;
 	}
 
 	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
@@ -314,6 +322,9 @@
 
 		ethoc_write_bd(dev, dev->num_tx + i, &bd);
 		bd.addr += ETHOC_BUFSIZ;
+
+		dev->vma[dev->num_tx + i] = vma;
+		vma += ETHOC_BUFSIZ;
 	}
 
 	return 0;
@@ -354,39 +365,39 @@
 
 	if (bd->stat & RX_BD_TL) {
 		dev_err(&netdev->dev, "RX: frame too long\n");
-		dev->stats.rx_length_errors++;
+		netdev->stats.rx_length_errors++;
 		ret++;
 	}
 
 	if (bd->stat & RX_BD_SF) {
 		dev_err(&netdev->dev, "RX: frame too short\n");
-		dev->stats.rx_length_errors++;
+		netdev->stats.rx_length_errors++;
 		ret++;
 	}
 
 	if (bd->stat & RX_BD_DN) {
 		dev_err(&netdev->dev, "RX: dribble nibble\n");
-		dev->stats.rx_frame_errors++;
+		netdev->stats.rx_frame_errors++;
 	}
 
 	if (bd->stat & RX_BD_CRC) {
 		dev_err(&netdev->dev, "RX: wrong CRC\n");
-		dev->stats.rx_crc_errors++;
+		netdev->stats.rx_crc_errors++;
 		ret++;
 	}
 
 	if (bd->stat & RX_BD_OR) {
 		dev_err(&netdev->dev, "RX: overrun\n");
-		dev->stats.rx_over_errors++;
+		netdev->stats.rx_over_errors++;
 		ret++;
 	}
 
 	if (bd->stat & RX_BD_MISS)
-		dev->stats.rx_missed_errors++;
+		netdev->stats.rx_missed_errors++;
 
 	if (bd->stat & RX_BD_LC) {
 		dev_err(&netdev->dev, "RX: late collision\n");
-		dev->stats.collisions++;
+		netdev->stats.collisions++;
 		ret++;
 	}
 
@@ -415,18 +426,18 @@
 			skb = netdev_alloc_skb_ip_align(dev, size);
 
 			if (likely(skb)) {
-				void *src = phys_to_virt(bd.addr);
+				void *src = priv->vma[entry];
 				memcpy_fromio(skb_put(skb, size), src, size);
 				skb->protocol = eth_type_trans(skb, dev);
-				priv->stats.rx_packets++;
-				priv->stats.rx_bytes += size;
+				dev->stats.rx_packets++;
+				dev->stats.rx_bytes += size;
 				netif_receive_skb(skb);
 			} else {
 				if (net_ratelimit())
 					dev_warn(&dev->dev, "low on memory - "
 							"packet dropped\n");
 
-				priv->stats.rx_dropped++;
+				dev->stats.rx_dropped++;
 				break;
 			}
 		}
@@ -447,30 +458,30 @@
 
 	if (bd->stat & TX_BD_LC) {
 		dev_err(&netdev->dev, "TX: late collision\n");
-		dev->stats.tx_window_errors++;
+		netdev->stats.tx_window_errors++;
 	}
 
 	if (bd->stat & TX_BD_RL) {
 		dev_err(&netdev->dev, "TX: retransmit limit\n");
-		dev->stats.tx_aborted_errors++;
+		netdev->stats.tx_aborted_errors++;
 	}
 
 	if (bd->stat & TX_BD_UR) {
 		dev_err(&netdev->dev, "TX: underrun\n");
-		dev->stats.tx_fifo_errors++;
+		netdev->stats.tx_fifo_errors++;
 	}
 
 	if (bd->stat & TX_BD_CS) {
 		dev_err(&netdev->dev, "TX: carrier sense lost\n");
-		dev->stats.tx_carrier_errors++;
+		netdev->stats.tx_carrier_errors++;
 	}
 
 	if (bd->stat & TX_BD_STATS)
-		dev->stats.tx_errors++;
+		netdev->stats.tx_errors++;
 
-	dev->stats.collisions += (bd->stat >> 4) & 0xf;
-	dev->stats.tx_bytes += bd->stat >> 16;
-	dev->stats.tx_packets++;
+	netdev->stats.collisions += (bd->stat >> 4) & 0xf;
+	netdev->stats.tx_bytes += bd->stat >> 16;
+	netdev->stats.tx_packets++;
 	return 0;
 }
 
@@ -501,7 +512,7 @@
 
 static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
 {
-	struct net_device *dev = (struct net_device *)dev_id;
+	struct net_device *dev = dev_id;
 	struct ethoc *priv = netdev_priv(dev);
 	u32 pending;
 
@@ -516,7 +527,7 @@
 
 	if (pending & INT_MASK_BUSY) {
 		dev_err(&dev->dev, "packet dropped\n");
-		priv->stats.rx_dropped++;
+		dev->stats.rx_dropped++;
 	}
 
 	if (pending & INT_MASK_RX) {
@@ -600,8 +611,11 @@
 
 	while (time_before(jiffies, timeout)) {
 		u32 stat = ethoc_read(priv, MIISTATUS);
-		if (!(stat & MIISTATUS_BUSY))
+		if (!(stat & MIISTATUS_BUSY)) {
+			/* reset MII command register */
+			ethoc_write(priv, MIICOMMAND, 0);
 			return 0;
+		}
 
 		schedule();
 	}
@@ -622,21 +636,12 @@
 {
 	struct ethoc *priv = netdev_priv(dev);
 	struct phy_device *phy;
-	int i;
+	int err;
 
-	for (i = 0; i < PHY_MAX_ADDR; i++) {
-		phy = priv->mdio->phy_map[i];
-		if (phy) {
-			if (priv->phy_id != -1) {
-				/* attach to specified PHY */
-				if (priv->phy_id == phy->addr)
-					break;
-			} else {
-				/* autoselect PHY if none was specified */
-				if (phy->addr != 0)
-					break;
-			}
-		}
+	if (priv->phy_id != -1) {
+		phy = priv->mdio->phy_map[priv->phy_id];
+	} else {
+		phy = phy_find_first(priv->mdio);
 	}
 
 	if (!phy) {
@@ -644,11 +649,11 @@
 		return -ENXIO;
 	}
 
-	phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0,
+	err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
 			PHY_INTERFACE_MODE_GMII);
-	if (IS_ERR(phy)) {
+	if (err) {
 		dev_err(&dev->dev, "could not attach to PHY\n");
-		return PTR_ERR(phy);
+		return err;
 	}
 
 	priv->phy = phy;
@@ -658,8 +663,6 @@
 static int ethoc_open(struct net_device *dev)
 {
 	struct ethoc *priv = netdev_priv(dev);
-	unsigned int min_tx = 2;
-	unsigned int num_bd;
 	int ret;
 
 	ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
@@ -667,14 +670,7 @@
 	if (ret)
 		return ret;
 
-	/* calculate the number of TX/RX buffers, maximum 128 supported */
-	num_bd = min_t(unsigned int,
-		128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
-	priv->num_tx = max(min_tx, num_bd / 4);
-	priv->num_rx = num_bd - priv->num_tx;
-	ethoc_write(priv, TX_BD_NUM, priv->num_tx);
-
-	ethoc_init_ring(priv);
+	ethoc_init_ring(priv, (void*)dev->mem_start);
 	ethoc_reset(priv);
 
 	if (netif_queue_stopped(dev)) {
@@ -812,8 +808,7 @@
 
 static struct net_device_stats *ethoc_stats(struct net_device *dev)
 {
-	struct ethoc *priv = netdev_priv(dev);
-	return &priv->stats;
+	return &dev->stats;
 }
 
 static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -824,7 +819,7 @@
 	void *dest;
 
 	if (unlikely(skb->len > ETHOC_BUFSIZ)) {
-		priv->stats.tx_errors++;
+		dev->stats.tx_errors++;
 		goto out;
 	}
 
@@ -838,7 +833,7 @@
 	else
 		bd.stat &= ~TX_BD_PAD;
 
-	dest = phys_to_virt(bd.addr);
+	dest = priv->vma[entry];
 	memcpy_toio(dest, skb->data, skb->len);
 
 	bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -884,6 +879,7 @@
 	struct resource *mem = NULL;
 	struct ethoc *priv = NULL;
 	unsigned int phy;
+	int num_bd;
 	int ret = 0;
 
 	/* allocate networking device */
@@ -965,7 +961,7 @@
 		}
 	} else {
 		/* Allocate buffer memory */
-		priv->membase = dma_alloc_coherent(NULL,
+		priv->membase = dmam_alloc_coherent(&pdev->dev,
 			buffer_size, (void *)&netdev->mem_start,
 			GFP_KERNEL);
 		if (!priv->membase) {
@@ -978,6 +974,18 @@
 		priv->dma_alloc = buffer_size;
 	}
 
+	/* calculate the number of TX/RX buffers, maximum 128 supported */
+	num_bd = min_t(unsigned int,
+		128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
+	priv->num_tx = max(2, num_bd / 4);
+	priv->num_rx = num_bd - priv->num_tx;
+
+	priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
+	if (!priv->vma) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
 	/* Allow the platform setup code to pass in a MAC address. */
 	if (pdev->dev.platform_data) {
 		struct ethoc_platform_data *pdata =
@@ -1063,21 +1071,6 @@
 	kfree(priv->mdio->irq);
 	mdiobus_free(priv->mdio);
 free:
-	if (priv) {
-		if (priv->dma_alloc)
-			dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
-					  netdev->mem_start);
-		else if (priv->membase)
-			devm_iounmap(&pdev->dev, priv->membase);
-		if (priv->iobase)
-			devm_iounmap(&pdev->dev, priv->iobase);
-	}
-	if (mem)
-		devm_release_mem_region(&pdev->dev, mem->start,
-					mem->end - mem->start + 1);
-	if (mmio)
-		devm_release_mem_region(&pdev->dev, mmio->start,
-					mmio->end - mmio->start + 1);
 	free_netdev(netdev);
 out:
 	return ret;
@@ -1104,17 +1097,6 @@
 			kfree(priv->mdio->irq);
 			mdiobus_free(priv->mdio);
 		}
-		if (priv->dma_alloc)
-			dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
-				netdev->mem_start);
-		else {
-			devm_iounmap(&pdev->dev, priv->membase);
-			devm_release_mem_region(&pdev->dev, netdev->mem_start,
-				netdev->mem_end - netdev->mem_start + 1);
-		}
-		devm_iounmap(&pdev->dev, priv->iobase);
-		devm_release_mem_region(&pdev->dev, netdev->base_addr,
-			priv->io_region_size);
 		unregister_netdev(netdev);
 		free_netdev(netdev);
 	}
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 15f4f8d..d7e8f6b 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -382,8 +382,6 @@
 
 	spinlock_t lock;
 
-	struct net_device_stats stats;
-
 	/* Media monitoring timer. */
 	struct timer_list timer;
 
@@ -1234,7 +1232,7 @@
 	spin_unlock_irqrestore(&np->lock, flags);
 
 	dev->trans_start = jiffies; /* prevent tx timeout */
-	np->stats.tx_errors++;
+	dev->stats.tx_errors++;
 	netif_wake_queue(dev); /* or .._start_.. ?? */
 }
 
@@ -1479,10 +1477,11 @@
 
 		if (intr_status & CNTOVF) {
 			/* missed pkts */
-			np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
+			dev->stats.rx_missed_errors +=
+				ioread32(ioaddr + TALLY) & 0x7fff;
 
 			/* crc error */
-			np->stats.rx_crc_errors +=
+			dev->stats.rx_crc_errors +=
 			    (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
 		}
 
@@ -1513,30 +1512,30 @@
 
 			if (!(np->crvalue & CR_W_ENH)) {
 				if (tx_status & (CSL | LC | EC | UDF | HF)) {
-					np->stats.tx_errors++;
+					dev->stats.tx_errors++;
 					if (tx_status & EC)
-						np->stats.tx_aborted_errors++;
+						dev->stats.tx_aborted_errors++;
 					if (tx_status & CSL)
-						np->stats.tx_carrier_errors++;
+						dev->stats.tx_carrier_errors++;
 					if (tx_status & LC)
-						np->stats.tx_window_errors++;
+						dev->stats.tx_window_errors++;
 					if (tx_status & UDF)
-						np->stats.tx_fifo_errors++;
+						dev->stats.tx_fifo_errors++;
 					if ((tx_status & HF) && np->mii.full_duplex == 0)
-						np->stats.tx_heartbeat_errors++;
+						dev->stats.tx_heartbeat_errors++;
 
 				} else {
-					np->stats.tx_bytes +=
+					dev->stats.tx_bytes +=
 					    ((tx_control & PKTSMask) >> PKTSShift);
 
-					np->stats.collisions +=
+					dev->stats.collisions +=
 					    ((tx_status & NCRMask) >> NCRShift);
-					np->stats.tx_packets++;
+					dev->stats.tx_packets++;
 				}
 			} else {
-				np->stats.tx_bytes +=
+				dev->stats.tx_bytes +=
 				    ((tx_control & PKTSMask) >> PKTSShift);
-				np->stats.tx_packets++;
+				dev->stats.tx_packets++;
 			}
 
 			/* Free the original skb. */
@@ -1564,10 +1563,12 @@
 			long data;
 
 			data = ioread32(ioaddr + TSR);
-			np->stats.tx_errors += (data & 0xff000000) >> 24;
-			np->stats.tx_aborted_errors += (data & 0xff000000) >> 24;
-			np->stats.tx_window_errors += (data & 0x00ff0000) >> 16;
-			np->stats.collisions += (data & 0x0000ffff);
+			dev->stats.tx_errors += (data & 0xff000000) >> 24;
+			dev->stats.tx_aborted_errors +=
+				(data & 0xff000000) >> 24;
+			dev->stats.tx_window_errors +=
+				(data & 0x00ff0000) >> 16;
+			dev->stats.collisions += (data & 0x0000ffff);
 		}
 
 		if (--boguscnt < 0) {
@@ -1593,10 +1594,11 @@
 
 	/* read the tally counters */
 	/* missed pkts */
-	np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
+	dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
 
 	/* crc error */
-	np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+	dev->stats.rx_crc_errors +=
+		(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
 
 	if (debug)
 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
@@ -1635,13 +1637,13 @@
 					       "%s: Receive error, Rx status %8.8x.\n",
 					       dev->name, rx_status);
 
-				np->stats.rx_errors++;	/* end of a packet. */
+				dev->stats.rx_errors++;	/* end of a packet. */
 				if (rx_status & (LONG | RUNT))
-					np->stats.rx_length_errors++;
+					dev->stats.rx_length_errors++;
 				if (rx_status & RXER)
-					np->stats.rx_frame_errors++;
+					dev->stats.rx_frame_errors++;
 				if (rx_status & CRC)
-					np->stats.rx_crc_errors++;
+					dev->stats.rx_crc_errors++;
 			} else {
 				int need_to_reset = 0;
 				int desno = 0;
@@ -1667,7 +1669,7 @@
 				if (need_to_reset == 0) {
 					int i;
 
-					np->stats.rx_length_errors++;
+					dev->stats.rx_length_errors++;
 
 					/* free all rx descriptors related this long pkt */
 					for (i = 0; i < desno; ++i) {
@@ -1733,8 +1735,8 @@
 			}
 			skb->protocol = eth_type_trans(skb, dev);
 			netif_rx(skb);
-			np->stats.rx_packets++;
-			np->stats.rx_bytes += pkt_len;
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += pkt_len;
 		}
 
 		np->cur_rx = np->cur_rx->next_desc_logical;
@@ -1754,11 +1756,13 @@
 
 	/* The chip only need report frame silently dropped. */
 	if (netif_running(dev)) {
-		np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
-		np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+		dev->stats.rx_missed_errors +=
+			ioread32(ioaddr + TALLY) & 0x7fff;
+		dev->stats.rx_crc_errors +=
+			(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
 	}
 
-	return &np->stats;
+	return &dev->stats;
 }
 
 
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index edfff92..b4afd7a 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -210,7 +210,7 @@
 /* Transmitter timeout */
 #define TX_TIMEOUT (2 * HZ)
 
-static int
+static netdev_tx_t
 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
@@ -679,30 +679,24 @@
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
 	struct phy_device *phy_dev = NULL;
-	int phy_addr;
+	int ret;
 
 	fep->phy_dev = NULL;
 
 	/* find the first phy */
-	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
-		if (fep->mii_bus->phy_map[phy_addr]) {
-			phy_dev = fep->mii_bus->phy_map[phy_addr];
-			break;
-		}
-	}
-
+	phy_dev = phy_find_first(fep->mii_bus);
 	if (!phy_dev) {
 		printk(KERN_ERR "%s: no PHY found\n", dev->name);
 		return -ENODEV;
 	}
 
 	/* attach the mac to the phy */
-	phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
+	ret = phy_connect_direct(dev, phy_dev,
 			     &fec_enet_adjust_link, 0,
 			     PHY_INTERFACE_MODE_MII);
-	if (IS_ERR(phy_dev)) {
+	if (ret) {
 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
-		return PTR_ERR(phy_dev);
+		return ret;
 	}
 
 	/* mask with MAC supported features */
@@ -1365,10 +1359,11 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
 static int
-fec_suspend(struct platform_device *dev, pm_message_t state)
+fec_suspend(struct device *dev)
 {
-	struct net_device *ndev = platform_get_drvdata(dev);
+	struct net_device *ndev = dev_get_drvdata(dev);
 	struct fec_enet_private *fep;
 
 	if (ndev) {
@@ -1381,9 +1376,9 @@
 }
 
 static int
-fec_resume(struct platform_device *dev)
+fec_resume(struct device *dev)
 {
-	struct net_device *ndev = platform_get_drvdata(dev);
+	struct net_device *ndev = dev_get_drvdata(dev);
 	struct fec_enet_private *fep;
 
 	if (ndev) {
@@ -1395,15 +1390,26 @@
 	return 0;
 }
 
+static const struct dev_pm_ops fec_pm_ops = {
+	.suspend	= fec_suspend,
+	.resume		= fec_resume,
+	.freeze		= fec_suspend,
+	.thaw		= fec_resume,
+	.poweroff	= fec_suspend,
+	.restore	= fec_resume,
+};
+#endif
+
 static struct platform_driver fec_driver = {
 	.driver	= {
-		.name    = "fec",
-		.owner	 = THIS_MODULE,
+		.name	= "fec",
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &fec_pm_ops,
+#endif
 	},
-	.probe   = fec_probe,
-	.remove  = __devexit_p(fec_drv_remove),
-	.suspend = fec_suspend,
-	.resume  = fec_resume,
+	.probe	= fec_probe,
+	.remove	= __devexit_p(fec_drv_remove),
 };
 
 static int __init
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 006f64d..dbaf72c 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -29,15 +29,14 @@
 		int reg, u32 value)
 {
 	struct mpc52xx_fec_mdio_priv *priv = bus->priv;
-	struct mpc52xx_fec __iomem *fec;
+	struct mpc52xx_fec __iomem *fec = priv->regs;
 	int tries = 3;
 
 	value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
 	value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
 
-	fec = priv->regs;
 	out_be32(&fec->ievent, FEC_IEVENT_MII);
-	out_be32(&priv->regs->mii_data, value);
+	out_be32(&fec->mii_data, value);
 
 	/* wait for it to finish, this takes about 23 us on lite5200b */
 	while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
@@ -47,7 +46,7 @@
 		return -ETIMEDOUT;
 
 	return value & FEC_MII_DATA_OP_RD ?
-		in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0;
+		in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
 }
 
 static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
@@ -69,9 +68,8 @@
 	struct device_node *np = of->dev.of_node;
 	struct mii_bus *bus;
 	struct mpc52xx_fec_mdio_priv *priv;
-	struct resource res = {};
+	struct resource res;
 	int err;
-	int i;
 
 	bus = mdiobus_alloc();
 	if (bus == NULL)
@@ -93,7 +91,7 @@
 	err = of_address_to_resource(np, 0, &res);
 	if (err)
 		goto out_free;
-	priv->regs = ioremap(res.start, res.end - res.start + 1);
+	priv->regs = ioremap(res.start, resource_size(&res));
 	if (priv->regs == NULL) {
 		err = -ENOMEM;
 		goto out_free;
@@ -118,10 +116,6 @@
  out_unmap:
 	iounmap(priv->regs);
  out_free:
-	for (i=0; i<PHY_MAX_ADDR; i++)
-		if (bus->irq[i] != PHY_POLL)
-			irq_dispose_mapping(bus->irq[i]);
-	kfree(bus->irq);
 	kfree(priv);
 	mdiobus_free(bus);
 
@@ -133,23 +127,16 @@
 	struct device *dev = &of->dev;
 	struct mii_bus *bus = dev_get_drvdata(dev);
 	struct mpc52xx_fec_mdio_priv *priv = bus->priv;
-	int i;
 
 	mdiobus_unregister(bus);
 	dev_set_drvdata(dev, NULL);
-
 	iounmap(priv->regs);
-	for (i=0; i<PHY_MAX_ADDR; i++)
-		if (bus->irq[i] != PHY_POLL)
-			irq_dispose_mapping(bus->irq[i]);
 	kfree(priv);
-	kfree(bus->irq);
 	mdiobus_free(bus);
 
 	return 0;
 }
 
-
 static struct of_device_id mpc52xx_fec_mdio_match[] = {
 	{ .compatible = "fsl,mpc5200b-mdio", },
 	{ .compatible = "fsl,mpc5200-mdio", },
@@ -171,5 +158,4 @@
 /* let fec driver call it, since this has to be registered before it */
 EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
 
-
 MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 268ea4d..9ef6a9d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2468,7 +2468,7 @@
 	struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
 
 	while ((np->get_tx.ex != np->put_tx.ex) &&
-	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
+	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
 	       (tx_work < limit)) {
 
 		dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
index 1f7d865..bd17a2a 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/fsl_pq_mdio.h
@@ -39,7 +39,7 @@
 	u8 reserved[28];	/* Space holder */
 	u32 utbipar;		/* TBI phy address reg (only on UCC) */
 	u8 res4[2728];
-} __attribute__ ((packed));
+} __packed;
 
 int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
 int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 28b53d1..746a776 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -85,6 +85,7 @@
 #include <linux/net_tstamp.h>
 
 #include <asm/io.h>
+#include <asm/reg.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 #include <linux/module.h>
@@ -685,8 +686,8 @@
 		priv->rx_queue[i] = NULL;
 
 	for (i = 0; i < priv->num_tx_queues; i++) {
-		priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kzalloc(
-				sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
+		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+					    GFP_KERNEL);
 		if (!priv->tx_queue[i]) {
 			err = -ENOMEM;
 			goto tx_alloc_failed;
@@ -698,8 +699,8 @@
 	}
 
 	for (i = 0; i < priv->num_rx_queues; i++) {
-		priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
-					sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
+		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+					    GFP_KERNEL);
 		if (!priv->rx_queue[i]) {
 			err = -ENOMEM;
 			goto rx_alloc_failed;
@@ -928,6 +929,34 @@
 	}
 }
 
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+	struct device *dev = &priv->ofdev->dev;
+	unsigned int pvr = mfspr(SPRN_PVR);
+	unsigned int svr = mfspr(SPRN_SVR);
+	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
+	unsigned int rev = svr & 0xffff;
+
+	/* MPC8313 Rev 2.0 and higher; All MPC837x */
+	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
+			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+		priv->errata |= GFAR_ERRATA_74;
+
+	/* MPC8313 and MPC837x all rev */
+	if ((pvr == 0x80850010 && mod == 0x80b0) ||
+			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+		priv->errata |= GFAR_ERRATA_76;
+
+	/* MPC8313 and MPC837x all rev */
+	if ((pvr == 0x80850010 && mod == 0x80b0) ||
+			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+		priv->errata |= GFAR_ERRATA_A002;
+
+	if (priv->errata)
+		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
+			 priv->errata);
+}
+
 /* Set up the ethernet device structure, private data,
  * and anything else we need before we start */
 static int gfar_probe(struct of_device *ofdev,
@@ -960,6 +989,8 @@
 	dev_set_drvdata(&ofdev->dev, priv);
 	regs = priv->gfargrp[0].regs;
 
+	gfar_detect_errata(priv);
+
 	/* Stop the DMA engine now, in case it was running before */
 	/* (The firmware could have used it, and left it running). */
 	gfar_halt(dev);
@@ -974,7 +1005,10 @@
 	gfar_write(&regs->maccfg1, tempval);
 
 	/* Initialize MACCFG2. */
-	gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
+	tempval = MACCFG2_INIT_SETTINGS;
+	if (gfar_has_errata(priv, GFAR_ERRATA_74))
+		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+	gfar_write(&regs->maccfg2, tempval);
 
 	/* Initialize ECNTRL */
 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
@@ -1541,6 +1575,29 @@
 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
 }
 
+static int __gfar_is_rx_idle(struct gfar_private *priv)
+{
+	u32 res;
+
+	/*
+	 * Normaly TSEC should not hang on GRS commands, so we should
+	 * actually wait for IEVENT_GRSC flag.
+	 */
+	if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
+		return 0;
+
+	/*
+	 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
+	 * and the Rx can be safely reset.
+	 */
+	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
+	res &= 0x7f807f80;
+	if ((res & 0xffff) == (res >> 16))
+		return 1;
+
+	return 0;
+}
 
 /* Halt the receive and transmit queues */
 static void gfar_halt_nodisable(struct net_device *dev)
@@ -1564,12 +1621,18 @@
 	tempval = gfar_read(&regs->dmactrl);
 	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
 	    != (DMACTRL_GRS | DMACTRL_GTS)) {
+		int ret;
+
 		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
 		gfar_write(&regs->dmactrl, tempval);
 
-		spin_event_timeout(((gfar_read(&regs->ievent) &
-			 (IEVENT_GRSC | IEVENT_GTSC)) ==
-			 (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
+		do {
+			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
+				 (IEVENT_GRSC | IEVENT_GTSC)) ==
+				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
+			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
+				ret = __gfar_is_rx_idle(priv);
+		} while (!ret);
 	}
 }
 
@@ -1987,6 +2050,20 @@
 	unsigned int nr_frags, nr_txbds, length;
 	union skb_shared_tx *shtx;
 
+	/*
+	 * TOE=1 frames larger than 2500 bytes may see excess delays
+	 * before start of transmission.
+	 */
+	if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
+			skb->ip_summed == CHECKSUM_PARTIAL &&
+			skb->len > 2500)) {
+		int ret;
+
+		ret = skb_checksum_help(skb);
+		if (ret)
+			return ret;
+	}
+
 	rq = skb->queue_mapping;
 	tx_queue = priv->tx_queue[rq];
 	txq = netdev_get_tx_queue(dev, rq);
@@ -2300,7 +2377,8 @@
 	 * to allow huge frames, and to check the length */
 	tempval = gfar_read(&regs->maccfg2);
 
-	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
+	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+			gfar_has_errata(priv, GFAR_ERRATA_74))
 		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
 	else
 		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2342,6 +2420,15 @@
 	schedule_work(&priv->reset_task);
 }
 
+static void gfar_align_skb(struct sk_buff *skb)
+{
+	/* We need the data buffer to be aligned properly.  We will reserve
+	 * as many bytes as needed to align the data properly
+	 */
+	skb_reserve(skb, RXBUF_ALIGNMENT -
+		(((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+}
+
 /* Interrupt Handler for Transmit complete */
 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 {
@@ -2426,9 +2513,10 @@
 		 */
 		if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
 				skb_recycle_check(skb, priv->rx_buffer_size +
-					RXBUF_ALIGNMENT))
+					RXBUF_ALIGNMENT)) {
+			gfar_align_skb(skb);
 			__skb_queue_head(&priv->rx_recycle, skb);
-		else
+		} else
 			dev_kfree_skb_any(skb);
 
 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
@@ -2491,29 +2579,28 @@
 	gfar_init_rxbdp(rx_queue, bdp, buf);
 }
 
+static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	struct sk_buff *skb = NULL;
+
+	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
+	if (!skb)
+		return NULL;
+
+	gfar_align_skb(skb);
+
+	return skb;
+}
 
 struct sk_buff * gfar_new_skb(struct net_device *dev)
 {
-	unsigned int alignamount;
 	struct gfar_private *priv = netdev_priv(dev);
 	struct sk_buff *skb = NULL;
 
 	skb = __skb_dequeue(&priv->rx_recycle);
 	if (!skb)
-		skb = netdev_alloc_skb(dev,
-				priv->rx_buffer_size + RXBUF_ALIGNMENT);
-
-	if (!skb)
-		return NULL;
-
-	alignamount = RXBUF_ALIGNMENT -
-		(((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
-
-	/* We need the data buffer to be aligned properly.  We will reserve
-	 * as many bytes as needed to align the data properly
-	 */
-	skb_reserve(skb, alignamount);
-	GFAR_CB(skb)->alignamount = alignamount;
+		skb = gfar_alloc_skb(dev);
 
 	return skb;
 }
@@ -2666,17 +2753,8 @@
 
 			if (unlikely(!newskb))
 				newskb = skb;
-			else if (skb) {
-				/*
-				 * We need to un-reserve() the skb to what it
-				 * was before gfar_new_skb() re-aligned
-				 * it to an RXBUF_ALIGNMENT boundary
-				 * before we put the skb back on the
-				 * recycle list.
-				 */
-				skb_reserve(skb, -GFAR_CB(skb)->alignamount);
+			else if (skb)
 				__skb_queue_head(&priv->rx_recycle, skb);
-			}
 		} else {
 			/* Increment the number of packets */
 			rx_queue->stats.rx_packets++;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index ac4a92e..710810e 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1025,6 +1025,12 @@
 	char int_name_er[GFAR_INT_NAME_MAX];
 };
 
+enum gfar_errata {
+	GFAR_ERRATA_74		= 0x01,
+	GFAR_ERRATA_76		= 0x02,
+	GFAR_ERRATA_A002	= 0x04,
+};
+
 /* Struct stolen almost completely (and shamelessly) from the FCC enet source
  * (Ok, that's not so true anymore, but there is a family resemblence)
  * The GFAR buffer descriptors track the ring buffers.  The rx_bd_base
@@ -1049,6 +1055,7 @@
 	struct device_node *node;
 	struct net_device *ndev;
 	struct of_device *ofdev;
+	enum gfar_errata errata;
 
 	struct gfar_priv_grp gfargrp[MAXGROUPS];
 	struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
@@ -1111,6 +1118,12 @@
 extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
 extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
 
+static inline int gfar_has_errata(struct gfar_private *priv,
+				  enum gfar_errata err)
+{
+	return priv->errata & err;
+}
+
 static inline u32 gfar_read(volatile unsigned __iomem *addr)
 {
 	u32 val;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 3a029d0..4d09eab 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1555,7 +1555,6 @@
 	}
 
 	/* setup NAPI */
-	memset(&greth->napi, 0, sizeof(greth->napi));
 	netif_napi_add(dev, &greth->napi, greth_poll, 64);
 
 	return 0;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 61f2b1c..49aac70 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -492,7 +492,6 @@
 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 	dma_addr_t tx_ring_dma;
 	dma_addr_t rx_ring_dma;
-	struct net_device_stats stats;
 	struct timer_list timer;		/* Media selection timer. */
 	/* Frequently used and paired value: keep adjacent for cache effect. */
 	spinlock_t lock;
@@ -1036,7 +1035,7 @@
 		if (entry >= TX_RING_SIZE-1)
 			hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
 				cpu_to_le32(DescEndRing);
-		hmp->stats.tx_packets++;
+		dev->stats.tx_packets++;
 	}
 
 	return 0;
@@ -1167,7 +1166,7 @@
 
 	/* Trigger an immediate transmit demand. */
 	dev->trans_start = jiffies; /* prevent tx timeout */
-	hmp->stats.tx_errors++;
+	dev->stats.tx_errors++;
 
 	/* Restart the chip's Tx/Rx processes . */
 	writew(0x0002, ioaddr + TxCmd); /* STOP Tx */
@@ -1434,7 +1433,7 @@
 					if (entry >= TX_RING_SIZE-1)
 						hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
 							cpu_to_le32(DescEndRing);
-					hmp->stats.tx_packets++;
+					dev->stats.tx_packets++;
 				}
 				if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){
 					/* The ring is no longer full */
@@ -1525,18 +1524,22 @@
 				   le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000,
 				   le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff,
 				   le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length));
-			hmp->stats.rx_length_errors++;
+			dev->stats.rx_length_errors++;
 		} /* else  Omit for prototype errata??? */
 		if (frame_status & 0x00380000) {
 			/* There was an error. */
 			if (hamachi_debug > 2)
 				printk(KERN_DEBUG "  hamachi_rx() Rx error was %8.8x.\n",
 					   frame_status);
-			hmp->stats.rx_errors++;
-			if (frame_status & 0x00600000) hmp->stats.rx_length_errors++;
-			if (frame_status & 0x00080000) hmp->stats.rx_frame_errors++;
-			if (frame_status & 0x00100000) hmp->stats.rx_crc_errors++;
-			if (frame_status < 0) hmp->stats.rx_dropped++;
+			dev->stats.rx_errors++;
+			if (frame_status & 0x00600000)
+				dev->stats.rx_length_errors++;
+			if (frame_status & 0x00080000)
+				dev->stats.rx_frame_errors++;
+			if (frame_status & 0x00100000)
+				dev->stats.rx_crc_errors++;
+			if (frame_status < 0)
+				dev->stats.rx_dropped++;
 		} else {
 			struct sk_buff *skb;
 			/* Omit CRC */
@@ -1654,7 +1657,7 @@
 #endif  /* RX_CHECKSUM */
 
 			netif_rx(skb);
-			hmp->stats.rx_packets++;
+			dev->stats.rx_packets++;
 		}
 		entry = (++hmp->cur_rx) % RX_RING_SIZE;
 	}
@@ -1724,9 +1727,9 @@
 		       dev->name, intr_status);
 	/* Hmmmmm, it's not clear how to recover from PCI faults. */
 	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
-		hmp->stats.tx_fifo_errors++;
+		dev->stats.tx_fifo_errors++;
 	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
-		hmp->stats.rx_fifo_errors++;
+		dev->stats.rx_fifo_errors++;
 }
 
 static int hamachi_close(struct net_device *dev)
@@ -1828,19 +1831,27 @@
            so I think I'll comment it out here and see if better things
            happen.
         */
-	/* hmp->stats.tx_packets	= readl(ioaddr + 0x000); */
+	/* dev->stats.tx_packets	= readl(ioaddr + 0x000); */
 
-	hmp->stats.rx_bytes = readl(ioaddr + 0x330); /* Total Uni+Brd+Multi */
-	hmp->stats.tx_bytes = readl(ioaddr + 0x3B0); /* Total Uni+Brd+Multi */
-	hmp->stats.multicast		= readl(ioaddr + 0x320); /* Multicast Rx */
+	/* Total Uni+Brd+Multi */
+	dev->stats.rx_bytes = readl(ioaddr + 0x330);
+	/* Total Uni+Brd+Multi */
+	dev->stats.tx_bytes = readl(ioaddr + 0x3B0);
+	/* Multicast Rx */
+	dev->stats.multicast = readl(ioaddr + 0x320);
 
-	hmp->stats.rx_length_errors	= readl(ioaddr + 0x368); /* Over+Undersized */
-	hmp->stats.rx_over_errors	= readl(ioaddr + 0x35C); /* Jabber */
-	hmp->stats.rx_crc_errors	= readl(ioaddr + 0x360); /* Jabber */
-	hmp->stats.rx_frame_errors	= readl(ioaddr + 0x364); /* Symbol Errs */
-	hmp->stats.rx_missed_errors	= readl(ioaddr + 0x36C); /* Dropped */
+	/* Over+Undersized */
+	dev->stats.rx_length_errors = readl(ioaddr + 0x368);
+	/* Jabber */
+	dev->stats.rx_over_errors = readl(ioaddr + 0x35C);
+	/* Jabber */
+	dev->stats.rx_crc_errors = readl(ioaddr + 0x360);
+	/* Symbol Errs */
+	dev->stats.rx_frame_errors = readl(ioaddr + 0x364);
+	/* Dropped */
+	dev->stats.rx_missed_errors = readl(ioaddr + 0x36C);
 
-	return &hmp->stats;
+	return &dev->stats;
 }
 
 static void set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 68e5ac8..acbf0d0 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -168,7 +168,6 @@
 	u_char mac1_mode;
 	u_char mac2_mode;
 	u_char hash_bytes[8];
-	struct net_device_stats stats;
 
 	/* Rings for busmaster mode: */
 	hp100_ring_t *rxrhead;	/* Head (oldest) index into rxring */
@@ -1582,8 +1581,8 @@
 	spin_unlock_irqrestore(&lp->lock, flags);
 
 	/* Update statistics */
-	lp->stats.tx_packets++;
-	lp->stats.tx_bytes += skb->len;
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
 
 	return NETDEV_TX_OK;
 
@@ -1740,8 +1739,8 @@
 
 	hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW);	/* send packet */
 
-	lp->stats.tx_packets++;
-	lp->stats.tx_bytes += skb->len;
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
 	hp100_ints_on();
 	spin_unlock_irqrestore(&lp->lock, flags);
 
@@ -1822,7 +1821,7 @@
 			printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
 					     dev->name, pkt_len);
 #endif
-			lp->stats.rx_dropped++;
+			dev->stats.rx_dropped++;
 		} else {	/* skb successfully allocated */
 
 			u_char *ptr;
@@ -1848,8 +1847,8 @@
 					ptr[9], ptr[10], ptr[11]);
 #endif
 			netif_rx(skb);
-			lp->stats.rx_packets++;
-			lp->stats.rx_bytes += pkt_len;
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += pkt_len;
 		}
 
 		/* Indicate the card that we have got the packet */
@@ -1858,7 +1857,7 @@
 		switch (header & 0x00070000) {
 		case (HP100_MULTI_ADDR_HASH << 16):
 		case (HP100_MULTI_ADDR_NO_HASH << 16):
-			lp->stats.multicast++;
+			dev->stats.multicast++;
 			break;
 		}
 	}			/* end of while(there are packets) loop */
@@ -1930,7 +1929,7 @@
 			if (ptr->skb == NULL) {
 				printk("hp100: %s: rx_bm: skb null\n", dev->name);
 				/* can happen if we only allocated room for the pdh due to memory shortage. */
-				lp->stats.rx_dropped++;
+				dev->stats.rx_dropped++;
 			} else {
 				skb_trim(ptr->skb, pkt_len);	/* Shorten it */
 				ptr->skb->protocol =
@@ -1938,14 +1937,14 @@
 
 				netif_rx(ptr->skb);	/* Up and away... */
 
-				lp->stats.rx_packets++;
-				lp->stats.rx_bytes += pkt_len;
+				dev->stats.rx_packets++;
+				dev->stats.rx_bytes += pkt_len;
 			}
 
 			switch (header & 0x00070000) {
 			case (HP100_MULTI_ADDR_HASH << 16):
 			case (HP100_MULTI_ADDR_NO_HASH << 16):
-				lp->stats.multicast++;
+				dev->stats.multicast++;
 				break;
 			}
 		} else {
@@ -1954,7 +1953,7 @@
 #endif
 			if (ptr->skb != NULL)
 				dev_kfree_skb_any(ptr->skb);
-			lp->stats.rx_errors++;
+			dev->stats.rx_errors++;
 		}
 
 		lp->rxrhead = lp->rxrhead->next;
@@ -1992,14 +1991,13 @@
 	hp100_update_stats(dev);
 	hp100_ints_on();
 	spin_unlock_irqrestore(&lp->lock, flags);
-	return &(lp->stats);
+	return &(dev->stats);
 }
 
 static void hp100_update_stats(struct net_device *dev)
 {
 	int ioaddr = dev->base_addr;
 	u_short val;
-	struct hp100_private *lp = netdev_priv(dev);
 
 #ifdef HP100_DEBUG_B
 	hp100_outw(0x4216, TRACE);
@@ -2009,14 +2007,14 @@
 	/* Note: Statistics counters clear when read. */
 	hp100_page(MAC_CTRL);
 	val = hp100_inw(DROPPED) & 0x0fff;
-	lp->stats.rx_errors += val;
-	lp->stats.rx_over_errors += val;
+	dev->stats.rx_errors += val;
+	dev->stats.rx_over_errors += val;
 	val = hp100_inb(CRC);
-	lp->stats.rx_errors += val;
-	lp->stats.rx_crc_errors += val;
+	dev->stats.rx_errors += val;
+	dev->stats.rx_crc_errors += val;
 	val = hp100_inb(ABORT);
-	lp->stats.tx_errors += val;
-	lp->stats.tx_aborted_errors += val;
+	dev->stats.tx_errors += val;
+	dev->stats.tx_aborted_errors += val;
 	hp100_page(PERFORMANCE);
 }
 
@@ -2025,7 +2023,6 @@
 #ifdef HP100_DEBUG_B
 	int ioaddr = dev->base_addr;
 #endif
-	struct hp100_private *lp = netdev_priv(dev);
 
 #ifdef HP100_DEBUG_B
 	int ioaddr = dev->base_addr;
@@ -2034,8 +2031,8 @@
 #endif
 
 	/* Note: Statistics counters clear when read. */
-	lp->stats.rx_errors++;
-	lp->stats.tx_errors++;
+	dev->stats.rx_errors++;
+	dev->stats.tx_errors++;
 }
 
 static void hp100_clear_stats(struct hp100_private *lp, int ioaddr)
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 86438b5..06251a9 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -295,6 +295,10 @@
 
 	if (hw->bus.func == E1000_FUNC_1)
 		mask = E1000_SWFW_PHY1_SM;
+	else if (hw->bus.func == E1000_FUNC_2)
+		mask = E1000_SWFW_PHY2_SM;
+	else if (hw->bus.func == E1000_FUNC_3)
+		mask = E1000_SWFW_PHY3_SM;
 
 	return igb_acquire_swfw_sync_82575(hw, mask);
 }
@@ -312,6 +316,10 @@
 
 	if (hw->bus.func == E1000_FUNC_1)
 		mask = E1000_SWFW_PHY1_SM;
+	else if (hw->bus.func == E1000_FUNC_2)
+		mask = E1000_SWFW_PHY2_SM;
+	else if (hw->bus.func == E1000_FUNC_3)
+		mask = E1000_SWFW_PHY3_SM;
 
 	igb_release_swfw_sync_82575(hw, mask);
 }
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 24d9be6..90bc29d 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -164,6 +164,8 @@
 #define E1000_SWFW_EEP_SM   0x1
 #define E1000_SWFW_PHY0_SM  0x2
 #define E1000_SWFW_PHY1_SM  0x4
+#define E1000_SWFW_PHY2_SM  0x20
+#define E1000_SWFW_PHY3_SM  0x40
 
 /* FACTPS Definitions */
 /* Device Control */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index f2ebf927..26bf6a1 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1823,12 +1823,10 @@
 		dev_info(&adapter->pdev->dev, "online testing starting\n");
 
 		/* PHY is powered down when interface is down */
-		if (!netif_carrier_ok(netdev)) {
+		if (if_running && igb_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+		else
 			data[4] = 0;
-		} else {
-			if (igb_link_test(adapter, &data[4]))
-				eth_test->flags |= ETH_TEST_FL_FAILED;
-		}
 
 		/* Online tests aren't run; pass by default */
 		data[0] = 0;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 3881918..9465617 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -630,9 +630,6 @@
 			for (; i < adapter->rss_queues; i++)
 				adapter->rx_ring[i]->reg_idx = rbase_offset +
 				                               Q_IDX_82576(i);
-			for (; j < adapter->rss_queues; j++)
-				adapter->tx_ring[j]->reg_idx = rbase_offset +
-				                               Q_IDX_82576(j);
 		}
 	case e1000_82575:
 	case e1000_82580:
@@ -996,7 +993,10 @@
 
 	/* Number of supported queues. */
 	adapter->num_rx_queues = adapter->rss_queues;
-	adapter->num_tx_queues = adapter->rss_queues;
+	if (adapter->vfs_allocated_count)
+		adapter->num_tx_queues = 1;
+	else
+		adapter->num_tx_queues = adapter->rss_queues;
 
 	/* start with one vector for every rx queue */
 	numvecs = adapter->num_rx_queues;
@@ -2091,9 +2091,6 @@
 #ifdef CONFIG_PCI_IOV
 	struct pci_dev *pdev = adapter->pdev;
 
-	if (adapter->vfs_allocated_count > 7)
-		adapter->vfs_allocated_count = 7;
-
 	if (adapter->vfs_allocated_count) {
 		adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
 		                           sizeof(struct vf_data_storage),
@@ -2258,7 +2255,7 @@
 
 #ifdef CONFIG_PCI_IOV
 	if (hw->mac.type == e1000_82576)
-		adapter->vfs_allocated_count = max_vfs;
+		adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
 
 #endif /* CONFIG_PCI_IOV */
 	adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
@@ -2720,14 +2717,16 @@
 	}
 	igb_vmm_control(adapter);
 
-	mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
-		 E1000_MRQC_RSS_FIELD_IPV4_TCP);
-	mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
-		 E1000_MRQC_RSS_FIELD_IPV6_TCP);
-	mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
-		 E1000_MRQC_RSS_FIELD_IPV6_UDP);
-	mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
-		 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+	/*
+	 * Generate RSS hash based on TCP port numbers and/or
+	 * IPv4/v6 src and dst addresses since UDP cannot be
+	 * hashed reliably due to IP fragmentation
+	 */
+	mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+		E1000_MRQC_RSS_FIELD_IPV4_TCP |
+		E1000_MRQC_RSS_FIELD_IPV6 |
+		E1000_MRQC_RSS_FIELD_IPV6_TCP |
+		E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
 
 	wr32(E1000_MRQC, mrqc);
 }
@@ -4977,6 +4976,10 @@
 
 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
 {
+	/*
+	 * The VF MAC Address is stored in a packed array of bytes
+	 * starting at the second 32 bit word of the msg array
+	 */
 	unsigned char *addr = (char *)&msg[1];
 	int err = -1;
 
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index e3b5e94..0b3f6df 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -82,7 +82,6 @@
 	struct ioc3_etxd *txr;
 	struct sk_buff *rx_skbs[512];
 	struct sk_buff *tx_skbs[128];
-	struct net_device_stats stats;
 	int rx_ci;			/* RX consumer index */
 	int rx_pi;			/* RX producer index */
 	int tx_ci;			/* TX consumer index */
@@ -504,8 +503,8 @@
 	struct ioc3_private *ip = netdev_priv(dev);
 	struct ioc3 *ioc3 = ip->regs;
 
-	ip->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
-	return &ip->stats;
+	dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
+	return &dev->stats;
 }
 
 static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
@@ -576,8 +575,9 @@
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-static inline void ioc3_rx(struct ioc3_private *ip)
+static inline void ioc3_rx(struct net_device *dev)
 {
+	struct ioc3_private *ip = netdev_priv(dev);
 	struct sk_buff *skb, *new_skb;
 	struct ioc3 *ioc3 = ip->regs;
 	int rx_entry, n_entry, len;
@@ -598,13 +598,13 @@
 		if (err & ERXBUF_GOODPKT) {
 			len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
 			skb_trim(skb, len);
-			skb->protocol = eth_type_trans(skb, priv_netdev(ip));
+			skb->protocol = eth_type_trans(skb, dev);
 
 			new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
 			if (!new_skb) {
 				/* Ouch, drop packet and just recycle packet
 				   to keep the ring filled.  */
-				ip->stats.rx_dropped++;
+				dev->stats.rx_dropped++;
 				new_skb = skb;
 				goto next;
 			}
@@ -622,19 +622,19 @@
 			rxb = (struct ioc3_erxbuf *) new_skb->data;
 			skb_reserve(new_skb, RX_OFFSET);
 
-			ip->stats.rx_packets++;		/* Statistics */
-			ip->stats.rx_bytes += len;
+			dev->stats.rx_packets++;		/* Statistics */
+			dev->stats.rx_bytes += len;
 		} else {
- 			/* The frame is invalid and the skb never
-                           reached the network layer so we can just
-                           recycle it.  */
- 			new_skb = skb;
- 			ip->stats.rx_errors++;
+			/* The frame is invalid and the skb never
+			   reached the network layer so we can just
+			   recycle it.  */
+			new_skb = skb;
+			dev->stats.rx_errors++;
 		}
 		if (err & ERXBUF_CRCERR)	/* Statistics */
-			ip->stats.rx_crc_errors++;
+			dev->stats.rx_crc_errors++;
 		if (err & ERXBUF_FRAMERR)
-			ip->stats.rx_frame_errors++;
+			dev->stats.rx_frame_errors++;
 next:
 		ip->rx_skbs[n_entry] = new_skb;
 		rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
@@ -652,8 +652,9 @@
 	ip->rx_ci = rx_entry;
 }
 
-static inline void ioc3_tx(struct ioc3_private *ip)
+static inline void ioc3_tx(struct net_device *dev)
 {
+	struct ioc3_private *ip = netdev_priv(dev);
 	unsigned long packets, bytes;
 	struct ioc3 *ioc3 = ip->regs;
 	int tx_entry, o_entry;
@@ -681,12 +682,12 @@
 		tx_entry = (etcir >> 7) & 127;
 	}
 
-	ip->stats.tx_packets += packets;
-	ip->stats.tx_bytes += bytes;
+	dev->stats.tx_packets += packets;
+	dev->stats.tx_bytes += bytes;
 	ip->txqlen -= packets;
 
 	if (ip->txqlen < 128)
-		netif_wake_queue(priv_netdev(ip));
+		netif_wake_queue(dev);
 
 	ip->tx_ci = o_entry;
 	spin_unlock(&ip->ioc3_lock);
@@ -699,9 +700,9 @@
  * with such error interrupts if something really goes wrong, so we might
  * also consider to take the interface down.
  */
-static void ioc3_error(struct ioc3_private *ip, u32 eisr)
+static void ioc3_error(struct net_device *dev, u32 eisr)
 {
-	struct net_device *dev = priv_netdev(ip);
+	struct ioc3_private *ip = netdev_priv(dev);
 	unsigned char *iface = dev->name;
 
 	spin_lock(&ip->ioc3_lock);
@@ -747,11 +748,11 @@
 
 	if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
 	            EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
-		ioc3_error(ip, eisr);
+		ioc3_error(dev, eisr);
 	if (eisr & EISR_RXTIMERINT)
-		ioc3_rx(ip);
+		ioc3_rx(dev);
 	if (eisr & EISR_TXEXPLICIT)
-		ioc3_tx(ip);
+		ioc3_tx(dev);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 0dbd193..36c3060 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -273,7 +273,7 @@
   __u8 control;                 /*Slot control/status see below */
   __u32 address;                /*Slot buffer address */
 }
-__attribute__ ((packed));
+__packed;
 
 #define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
 
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index ac0443d..58ddb52 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -125,7 +125,7 @@
 	__u8  bmAdditionalBOFs;
 	__u8  bIrdaRateSniff;
 	__u8  bMaxUnicastList;
-} __attribute__ ((packed));
+} __packed;
 
 /* class specific interface request to get the IrDA-USB class descriptor
  * (6.2.5, USB-IrDA class spec 1.0) */
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index b54d3b4..1046014 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -154,7 +154,7 @@
 	__le32 baudrate;	/* baud rate, little endian */
 	__u8 flags;
 	__u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 #define KS_DATA_5_BITS 0x00
 #define KS_DATA_6_BITS 0x01
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 8d713eb..9cc142f 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -117,7 +117,7 @@
 	__le32 baudrate;	/* baud rate, little endian */
 	__u8 flags;
 	__u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 #define KS_DATA_5_BITS 0x00
 #define KS_DATA_6_BITS 0x01
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 3050d1a..3f24a1f3 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -544,9 +544,9 @@
 		struct {
 			u8		addr_res[3];
 			volatile u8	status;		/* descriptor status */
-		} __attribute__((packed)) rd_s;
-	} __attribute((packed)) rd_u;
-} __attribute__ ((packed));
+		} __packed rd_s;
+	} __packed rd_u;
+} __packed;
 
 #define rd_addr		rd_u.addr
 #define rd_status	rd_u.rd_s.status
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ffae480..9e15eb9 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -44,11 +44,9 @@
 #include <linux/dca.h>
 #endif
 
-#define PFX "ixgbe: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
-	((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
-	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
-		__func__ , ## args)))
+/* common prefix used by pr_<> macros */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 /* TX/RX descriptor defines */
 #define IXGBE_DEFAULT_TXD		    512
@@ -112,7 +110,6 @@
 	u16 vlans_enabled;
 	bool clear_to_send;
 	bool pf_set_mac;
-	int rar;
 	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
 	u16 pf_qos;
 };
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index a4e2901..3e06a61 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -206,6 +206,14 @@
 	s32 status = 0;
 	u32 autoc = 0;
 
+	/* Determine 1G link capabilities off of SFP+ type */
+	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		*negotiation = true;
+		goto out;
+	}
+
 	/*
 	 * Determine link capabilities based on the stored value of AUTOC,
 	 * which represents EEPROM defaults.  If AUTOC value has not been
@@ -707,9 +715,8 @@
 
 out:
 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
-		netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
-			" downgraded the link speed from the maximum"
-			" advertised\n");
+		e_info(hw, "Smartspeed has downgraded the link speed from "
+		       "the maximum advertised\n");
 	return status;
 }
 
@@ -2088,6 +2095,7 @@
 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
 	u16 ext_ability = 0;
 	u8 comp_codes_10g = 0;
+	u8 comp_codes_1g = 0;
 
 	hw->phy.ops.identify(hw);
 
@@ -2168,11 +2176,15 @@
 	case ixgbe_phy_sfp_intel:
 	case ixgbe_phy_sfp_unknown:
 		hw->phy.ops.read_i2c_eeprom(hw,
+		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+		hw->phy.ops.read_i2c_eeprom(hw,
 		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
 		break;
 	default:
 		break;
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 3080afb..5cf15aa 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -105,12 +105,23 @@
 
 #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
 
-#ifdef DEBUG
-extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw);
+extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
-	printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg)
-#else
-#define hw_dbg(hw, format, arg...) do {} while (0)
-#endif
-
+	netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
+#define e_dev_info(format, arg...) \
+	dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+	dev_warn(&adapter->pdev->dev, format, ## arg)
+#define e_dev_err(format, arg...) \
+	dev_err(&adapter->pdev->dev, format, ## arg)
+#define e_dev_notice(format, arg...) \
+	dev_notice(&adapter->pdev->dev, format, ## arg)
+#define e_info(msglvl, format, arg...) \
+	netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_err(msglvl, format, arg...) \
+	netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_warn(msglvl, format, arg...) \
+	netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_crit(msglvl, format, arg...) \
+	netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
 #endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 71da325..b53b465 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -121,7 +121,7 @@
 			goto out;
 
 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
-			DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n");
+			e_err(drv, "Enable failed, needs MSI-X\n");
 			err = 1;
 			goto out;
 		}
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3a93a81..da54b38 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -55,7 +55,7 @@
 				offsetof(struct ixgbe_adapter, m)
 #define IXGBE_NETDEV_STAT(m)	NETDEV_STATS, \
 				sizeof(((struct net_device *)0)->m), \
-				offsetof(struct net_device, m)
+				offsetof(struct net_device, m) - offsetof(struct net_device, stats)
 
 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
 	{"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)},
@@ -234,6 +234,13 @@
 		case ixgbe_sfp_type_not_present:
 			ecmd->port = PORT_NONE;
 			break;
+		case ixgbe_sfp_type_1g_cu_core0:
+		case ixgbe_sfp_type_1g_cu_core1:
+			ecmd->port = PORT_TP;
+			ecmd->supported = SUPPORTED_TP;
+			ecmd->advertising = (ADVERTISED_1000baseT_Full |
+			                     ADVERTISED_TP);
+			break;
 		case ixgbe_sfp_type_unknown:
 		default:
 			ecmd->port = PORT_OTHER;
@@ -294,8 +301,7 @@
 		hw->mac.autotry_restart = true;
 		err = hw->mac.ops.setup_link(hw, advertised, true, true);
 		if (err) {
-			DPRINTK(PROBE, INFO,
-			        "setup link failed with code %d\n", err);
+			e_info(probe, "setup link failed with code %d\n", err);
 			hw->mac.ops.setup_link(hw, old, true, true);
 		}
 	} else {
@@ -992,16 +998,18 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	u64 *queue_stat;
 	int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *net_stats;
 	int j, k;
 	int i;
 	char *p = NULL;
 
 	ixgbe_update_stats(adapter);
-	dev_get_stats(netdev);
+	net_stats = dev_get_stats(netdev, &temp);
 	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
 		switch (ixgbe_gstrings_stats[i].type) {
 		case NETDEV_STATS:
-			p = (char *) netdev +
+			p = (char *) net_stats +
 					ixgbe_gstrings_stats[i].stat_offset;
 			break;
 		case IXGBE_STATS:
@@ -1188,9 +1196,9 @@
 		writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
 		val = readl(adapter->hw.hw_addr + R);                         \
 		if (val != (_test[pat] & W & M)) {                            \
-			DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
-					  "0x%08X expected 0x%08X\n",         \
-				R, val, (_test[pat] & W & M));                \
+			e_err(drv, "pattern test reg %04X failed: got "   \
+			      "0x%08X expected 0x%08X\n",		      \
+			      R, val, (_test[pat] & W & M));                \
 			*data = R;                                            \
 			writel(before, adapter->hw.hw_addr + R);              \
 			return 1;                                             \
@@ -1206,8 +1214,8 @@
 	writel((W & M), (adapter->hw.hw_addr + R));                           \
 	val = readl(adapter->hw.hw_addr + R);                                 \
 	if ((W & M) != (val & M)) {                                           \
-		DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
-				 "expected 0x%08X\n", R, (val & M), (W & M)); \
+		e_err(drv, "set/check reg %04X test failed: got 0x%08X "  \
+		      "expected 0x%08X\n", R, (val & M), (W & M));        \
 		*data = R;                                                    \
 		writel(before, (adapter->hw.hw_addr + R));                    \
 		return 1;                                                     \
@@ -1240,8 +1248,8 @@
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
 	after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
 	if (value != after) {
-		DPRINTK(DRV, ERR, "failed STATUS register test got: "
-		        "0x%08X expected: 0x%08X\n", after, value);
+		e_err(drv, "failed STATUS register test got: 0x%08X "
+		      "expected: 0x%08X\n", after, value);
 		*data = 1;
 		return 1;
 	}
@@ -1341,8 +1349,8 @@
 		*data = 1;
 		return -1;
 	}
-	DPRINTK(HW, INFO, "testing %s interrupt\n",
-		(shared_int ? "shared" : "unshared"));
+	e_info(hw, "testing %s interrupt\n", shared_int ?
+	       "shared" : "unshared");
 
 	/* Disable all the interrupts */
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
@@ -1847,7 +1855,7 @@
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline tests */
 
-		DPRINTK(HW, INFO, "offline testing starting\n");
+		e_info(hw, "offline testing starting\n");
 
 		/* Link test performed before hardware reset so autoneg doesn't
 		 * interfere with test result */
@@ -1880,17 +1888,17 @@
 		else
 			ixgbe_reset(adapter);
 
-		DPRINTK(HW, INFO, "register testing starting\n");
+		e_info(hw, "register testing starting\n");
 		if (ixgbe_reg_test(adapter, &data[0]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		ixgbe_reset(adapter);
-		DPRINTK(HW, INFO, "eeprom testing starting\n");
+		e_info(hw, "eeprom testing starting\n");
 		if (ixgbe_eeprom_test(adapter, &data[1]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		ixgbe_reset(adapter);
-		DPRINTK(HW, INFO, "interrupt testing starting\n");
+		e_info(hw, "interrupt testing starting\n");
 		if (ixgbe_intr_test(adapter, &data[2]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1898,14 +1906,14 @@
 		 * loopback diagnostic. */
 		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
 				      IXGBE_FLAG_VMDQ_ENABLED)) {
-			DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
-				"mode\n");
+			e_info(hw, "Skip MAC loopback diagnostic in VT "
+			       "mode\n");
 			data[3] = 0;
 			goto skip_loopback;
 		}
 
 		ixgbe_reset(adapter);
-		DPRINTK(HW, INFO, "loopback testing starting\n");
+		e_info(hw, "loopback testing starting\n");
 		if (ixgbe_loopback_test(adapter, &data[3]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1916,7 +1924,7 @@
 		if (if_running)
 			dev_open(netdev);
 	} else {
-		DPRINTK(HW, INFO, "online testing starting\n");
+		e_info(hw, "online testing starting\n");
 		/* Online tests */
 		if (ixgbe_link_test(adapter, &data[4]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -2134,8 +2142,8 @@
 			adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
 			if (netdev->features & NETIF_F_LRO) {
 				netdev->features &= ~NETIF_F_LRO;
-				DPRINTK(PROBE, INFO, "rx-usecs set to 0, "
-					"disabling LRO/RSC\n");
+				e_info(probe, "rx-usecs set to 0, "
+				       "disabling RSC\n");
 			}
 			need_reset = true;
 		}
@@ -2208,8 +2216,11 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	bool need_reset = false;
+	int rc;
 
-	ethtool_op_set_flags(netdev, data);
+	rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
+	if (rc)
+		return rc;
 
 	/* if state changes we need to update adapter->flags and reset */
 	if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
@@ -2230,10 +2241,10 @@
 				break;
 			}
 		} else if (!adapter->rx_itr_setting) {
-			netdev->features &= ~ETH_FLAG_LRO;
+			netdev->features &= ~NETIF_F_LRO;
 			if (data & ETH_FLAG_LRO)
-				DPRINTK(PROBE, INFO, "rx-usecs set to 0, "
-					"LRO/RSC cannot be enabled.\n");
+				e_info(probe, "rx-usecs set to 0, "
+				       "LRO/RSC cannot be enabled.\n");
 		}
 	}
 
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 45182ab..f6ef4cd 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -25,7 +25,6 @@
 
 *******************************************************************************/
 
-
 #include "ixgbe.h"
 #ifdef CONFIG_IXGBE_DCB
 #include "ixgbe_dcb_82599.h"
@@ -165,20 +164,20 @@
 
 	adapter = netdev_priv(netdev);
 	if (xid >= IXGBE_FCOE_DDP_MAX) {
-		DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid);
+		e_warn(drv, "xid=0x%x out-of-range\n", xid);
 		return 0;
 	}
 
 	fcoe = &adapter->fcoe;
 	if (!fcoe->pool) {
-		DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid);
+		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
 		return 0;
 	}
 
 	ddp = &fcoe->ddp[xid];
 	if (ddp->sgl) {
-		DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
-			xid, ddp->sgl, ddp->sgc);
+		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
+		      xid, ddp->sgl, ddp->sgc);
 		return 0;
 	}
 	ixgbe_fcoe_clear_ddp(ddp);
@@ -186,14 +185,14 @@
 	/* setup dma from scsi command sgl */
 	dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
 	if (dmacount == 0) {
-		DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid);
+		e_err(drv, "xid 0x%x DMA map error\n", xid);
 		return 0;
 	}
 
 	/* alloc the udl from our ddp pool */
 	ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp);
 	if (!ddp->udl) {
-		DPRINTK(DRV, ERR, "failed allocated ddp context\n");
+		e_err(drv, "failed allocated ddp context\n");
 		goto out_noddp_unmap;
 	}
 	ddp->sgl = sgl;
@@ -206,10 +205,9 @@
 		while (len) {
 			/* max number of buffers allowed in one DDP context */
 			if (j >= IXGBE_BUFFCNT_MAX) {
-				netif_err(adapter, drv, adapter->netdev,
-					  "xid=%x:%d,%d,%d:addr=%llx "
-					  "not enough descriptors\n",
-					  xid, i, j, dmacount, (u64)addr);
+				e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+				      "not enough descriptors\n",
+				      xid, i, j, dmacount, (u64)addr);
 				goto out_noddp_free;
 			}
 
@@ -387,8 +385,8 @@
 	struct fc_frame_header *fh;
 
 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
-		DPRINTK(DRV, ERR, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
-			skb_shinfo(skb)->gso_type);
+		e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
+		      skb_shinfo(skb)->gso_type);
 		return -EINVAL;
 	}
 
@@ -414,7 +412,7 @@
 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
 		break;
 	default:
-		DPRINTK(DRV, WARNING, "unknown sof = 0x%x\n", sof);
+		e_warn(drv, "unknown sof = 0x%x\n", sof);
 		return -EINVAL;
 	}
 
@@ -441,7 +439,7 @@
 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
 		break;
 	default:
-		DPRINTK(DRV, WARNING, "unknown eof = 0x%x\n", eof);
+		e_warn(drv, "unknown eof = 0x%x\n", eof);
 		return -EINVAL;
 	}
 
@@ -517,8 +515,7 @@
 					     adapter->pdev, IXGBE_FCPTR_MAX,
 					     IXGBE_FCPTR_ALIGN, PAGE_SIZE);
 		if (!fcoe->pool)
-			DPRINTK(DRV, ERR,
-				"failed to allocated FCoE DDP pool\n");
+			e_err(drv, "failed to allocated FCoE DDP pool\n");
 
 		spin_lock_init(&fcoe->lock);
 	}
@@ -614,7 +611,7 @@
 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
 		goto out_enable;
 
-	DPRINTK(DRV, INFO, "Enabling FCoE offload features.\n");
+	e_info(drv, "Enabling FCoE offload features.\n");
 	if (netif_running(netdev))
 		netdev->netdev_ops->ndo_stop(netdev);
 
@@ -660,7 +657,7 @@
 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
 		goto out_disable;
 
-	DPRINTK(DRV, INFO, "Disabling FCoE offload features.\n");
+	e_info(drv, "Disabling FCoE offload features.\n");
 	if (netif_running(netdev))
 		netdev->netdev_ops->ndo_stop(netdev);
 
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7b5d976..b235aa1 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -696,19 +696,19 @@
 		/* detected Tx unit hang */
 		union ixgbe_adv_tx_desc *tx_desc;
 		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
-		DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
-			"  Tx Queue             <%d>\n"
-			"  TDH, TDT             <%x>, <%x>\n"
-			"  next_to_use          <%x>\n"
-			"  next_to_clean        <%x>\n"
-			"tx_buffer_info[next_to_clean]\n"
-			"  time_stamp           <%lx>\n"
-			"  jiffies              <%lx>\n",
-			tx_ring->queue_index,
-			IXGBE_READ_REG(hw, tx_ring->head),
-			IXGBE_READ_REG(hw, tx_ring->tail),
-			tx_ring->next_to_use, eop,
-			tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+		e_err(drv, "Detected Tx Unit Hang\n"
+		      "  Tx Queue             <%d>\n"
+		      "  TDH, TDT             <%x>, <%x>\n"
+		      "  next_to_use          <%x>\n"
+		      "  next_to_clean        <%x>\n"
+		      "tx_buffer_info[next_to_clean]\n"
+		      "  time_stamp           <%lx>\n"
+		      "  jiffies              <%lx>\n",
+		      tx_ring->queue_index,
+		      IXGBE_READ_REG(hw, tx_ring->head),
+		      IXGBE_READ_REG(hw, tx_ring->tail),
+		      tx_ring->next_to_use, eop,
+		      tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
 		return true;
 	}
 
@@ -812,9 +812,8 @@
 	if (adapter->detect_tx_hung) {
 		if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
 			/* schedule immediate reset if we believe we hung */
-			DPRINTK(PROBE, INFO,
-			        "tx hang %d detected, resetting adapter\n",
-			        adapter->tx_timeout_count + 1);
+			e_info(probe, "tx hang %d detected, resetting "
+			       "adapter\n", adapter->tx_timeout_count + 1);
 			ixgbe_tx_timeout(adapter->netdev);
 		}
 	}
@@ -1653,10 +1652,10 @@
 				return;
 			break;
 		}
-		DPRINTK(DRV, ERR, "Network adapter has been stopped because it "
-		        "has over heated. Restart the computer. If the problem "
-		        "persists, power off the system and replace the "
-		        "adapter\n");
+		e_crit(drv, "Network adapter has been stopped because it has "
+		       "over heated. Restart the computer. If the problem "
+		       "persists, power off the system and replace the "
+		       "adapter\n");
 		/* write to clear the interrupt */
 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
 	}
@@ -1668,7 +1667,7 @@
 
 	if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
-		DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
+		e_crit(probe, "Fan has stopped, replace the adapter\n");
 		/* write to clear the interrupt */
 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
 	}
@@ -2154,9 +2153,8 @@
 		                  handler, 0, adapter->name[vector],
 		                  adapter->q_vector[vector]);
 		if (err) {
-			DPRINTK(PROBE, ERR,
-			        "request_irq failed for MSIX interrupt "
-			        "Error: %d\n", err);
+			e_err(probe, "request_irq failed for MSIX interrupt "
+			      "Error: %d\n", err);
 			goto free_queue_irqs;
 		}
 	}
@@ -2165,8 +2163,7 @@
 	err = request_irq(adapter->msix_entries[vector].vector,
 	                  ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
 	if (err) {
-		DPRINTK(PROBE, ERR,
-			"request_irq for msix_lsc failed: %d\n", err);
+		e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
 		goto free_queue_irqs;
 	}
 
@@ -2352,7 +2349,7 @@
 	}
 
 	if (err)
-		DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
+		e_err(probe, "request_irq failed, Error %d\n", err);
 
 	return err;
 }
@@ -2423,7 +2420,7 @@
 	map_vector_to_rxq(adapter, 0, 0);
 	map_vector_to_txq(adapter, 0, 0);
 
-	DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
+	e_info(hw, "Legacy interrupt IVAR setup done\n");
 }
 
 /**
@@ -2995,6 +2992,48 @@
 }
 
 /**
+ * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
+ **/
+static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	unsigned int vfn = adapter->num_vfs;
+	unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
+	int count = 0;
+
+	/* return ENOMEM indicating insufficient memory for addresses */
+	if (netdev_uc_count(netdev) > rar_entries)
+		return -ENOMEM;
+
+	if (!netdev_uc_empty(netdev) && rar_entries) {
+		struct netdev_hw_addr *ha;
+		/* return error if we do not support writing to RAR table */
+		if (!hw->mac.ops.set_rar)
+			return -ENOMEM;
+
+		netdev_for_each_uc_addr(ha, netdev) {
+			if (!rar_entries)
+				break;
+			hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
+					    vfn, IXGBE_RAH_AV);
+			count++;
+		}
+	}
+	/* write the addresses in reverse order to avoid write combining */
+	for (; rar_entries > 0 ; rar_entries--)
+		hw->mac.ops.clear_rar(hw, rar_entries);
+
+	return count;
+}
+
+/**
  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
  * @netdev: network interface device structure
  *
@@ -3007,38 +3046,58 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 fctrl;
+	u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+	int count;
 
 	/* Check for Promiscuous and All Multicast modes */
 
 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 
+	/* clear the bits we are changing the status of */
+	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+
 	if (netdev->flags & IFF_PROMISC) {
 		hw->addr_ctrl.user_set_promisc = true;
 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+		vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
 		/* don't hardware filter vlans in promisc mode */
 		ixgbe_vlan_filter_disable(adapter);
 	} else {
 		if (netdev->flags & IFF_ALLMULTI) {
 			fctrl |= IXGBE_FCTRL_MPE;
-			fctrl &= ~IXGBE_FCTRL_UPE;
-		} else if (!hw->addr_ctrl.uc_set_promisc) {
-			fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+			vmolr |= IXGBE_VMOLR_MPE;
+		} else {
+			/*
+			 * Write addresses to the MTA, if the attempt fails
+			 * then we should just turn on promiscous mode so
+			 * that we can at least receive multicast traffic
+			 */
+			hw->mac.ops.update_mc_addr_list(hw, netdev);
+			vmolr |= IXGBE_VMOLR_ROMPE;
 		}
 		ixgbe_vlan_filter_enable(adapter);
 		hw->addr_ctrl.user_set_promisc = false;
+		/*
+		 * Write addresses to available RAR registers, if there is not
+		 * sufficient space to store all the addresses then enable
+		 * unicast promiscous mode
+		 */
+		count = ixgbe_write_uc_addr_list(netdev);
+		if (count < 0) {
+			fctrl |= IXGBE_FCTRL_UPE;
+			vmolr |= IXGBE_VMOLR_ROPE;
+		}
+	}
+
+	if (adapter->num_vfs) {
+		ixgbe_restore_vf_multicasts(adapter);
+		vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+			 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
+			   IXGBE_VMOLR_ROPE);
+		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-
-	/* reprogram secondary unicast list */
-	hw->mac.ops.update_uc_addr_list(hw, netdev);
-
-	/* reprogram multicast list */
-	hw->mac.ops.update_mc_addr_list(hw, netdev);
-
-	if (adapter->num_vfs)
-		ixgbe_restore_vf_multicasts(adapter);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -3257,8 +3316,8 @@
 			msleep(1);
 	}
 	if (k >= IXGBE_MAX_RX_DESC_POLL) {
-		DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
-		        "not set within the polling period\n", rxr);
+		e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
+		      "the polling period\n", rxr);
 	}
 	ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
 	                      (adapter->rx_ring[rxr]->count - 1));
@@ -3387,8 +3446,7 @@
 			} while (--wait_loop &&
 			         !(txdctl & IXGBE_TXDCTL_ENABLE));
 			if (!wait_loop)
-				DPRINTK(DRV, ERR, "Could not enable "
-				        "Tx Queue %d\n", j);
+				e_err(drv, "Could not enable Tx Queue %d\n", j);
 		}
 	}
 
@@ -3436,8 +3494,7 @@
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
 		if (esdp & IXGBE_ESDP_SDP1)
-			DPRINTK(DRV, CRIT,
-				"Fan has stopped, replace the adapter\n");
+			e_crit(drv, "Fan has stopped, replace the adapter\n");
 	}
 
 	/*
@@ -3466,7 +3523,7 @@
 	} else {
 		err = ixgbe_non_sfp_link_config(hw);
 		if (err)
-			DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
+			e_err(probe, "link_config FAILED %d\n", err);
 	}
 
 	for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3527,19 +3584,19 @@
 	case IXGBE_ERR_SFP_NOT_PRESENT:
 		break;
 	case IXGBE_ERR_MASTER_REQUESTS_PENDING:
-		dev_err(&adapter->pdev->dev, "master disable timed out\n");
+		e_dev_err("master disable timed out\n");
 		break;
 	case IXGBE_ERR_EEPROM_VERSION:
 		/* We are running on a pre-production device, log a warning */
-		dev_warn(&adapter->pdev->dev, "This device is a pre-production "
-		         "adapter/LOM.  Please be aware there may be issues "
-		         "associated with your hardware.  If you are "
-		         "experiencing problems please contact your Intel or "
-		         "hardware representative who provided you with this "
-		         "hardware.\n");
+		e_dev_warn("This device is a pre-production adapter/LOM. "
+			   "Please be aware there may be issuesassociated with "
+			   "your hardware.  If you are experiencing problems "
+			   "please contact your Intel or hardware "
+			   "representative who provided you with this "
+			   "hardware.\n");
 		break;
 	default:
-		dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+		e_dev_err("Hardware Error: %d\n", err);
 	}
 
 	/* reprogram the RAR[0] in case user changed it. */
@@ -3920,12 +3977,12 @@
 		adapter->num_tx_queues = 1;
 #ifdef CONFIG_IXGBE_DCB
 		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-			DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
+			e_info(probe, "FCoE enabled with DCB\n");
 			ixgbe_set_dcb_queues(adapter);
 		}
 #endif
 		if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-			DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
+			e_info(probe, "FCoE enabled with RSS\n");
 			if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
 			    (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
 				ixgbe_set_fdir_queues(adapter);
@@ -4038,7 +4095,8 @@
 		 * This just means we'll go with either a single MSI
 		 * vector or fall back to legacy interrupts.
 		 */
-		DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
+		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+			     "Unable to allocate MSI-X interrupts\n");
 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 		kfree(adapter->msix_entries);
 		adapter->msix_entries = NULL;
@@ -4435,8 +4493,9 @@
 	if (!err) {
 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
 	} else {
-		DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
-		        "falling back to legacy.  Error: %d\n", err);
+		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+			     "Unable to allocate MSI interrupt, "
+			     "falling back to legacy.  Error: %d\n", err);
 		/* reset err */
 		err = 0;
 	}
@@ -4557,27 +4616,25 @@
 
 	err = ixgbe_set_interrupt_capability(adapter);
 	if (err) {
-		DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
+		e_dev_err("Unable to setup interrupt capabilities\n");
 		goto err_set_interrupt;
 	}
 
 	err = ixgbe_alloc_q_vectors(adapter);
 	if (err) {
-		DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
-		        "vectors\n");
+		e_dev_err("Unable to allocate memory for queue vectors\n");
 		goto err_alloc_q_vectors;
 	}
 
 	err = ixgbe_alloc_queues(adapter);
 	if (err) {
-		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		e_dev_err("Unable to allocate memory for queues\n");
 		goto err_alloc_queues;
 	}
 
-	DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
-	        "Tx Queue count = %u\n",
-	        (adapter->num_rx_queues > 1) ? "Enabled" :
-	        "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
+		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+		   adapter->num_rx_queues, adapter->num_tx_queues);
 
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
@@ -4648,15 +4705,13 @@
 			goto reschedule;
 		ret = hw->phy.ops.reset(hw);
 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-			dev_err(&adapter->pdev->dev, "failed to initialize "
-				"because an unsupported SFP+ module type "
-				"was detected.\n"
-				"Reload the driver after installing a "
-				"supported module.\n");
+			e_dev_err("failed to initialize because an unsupported "
+				  "SFP+ module type was detected.\n");
+			e_dev_err("Reload the driver after installing a "
+				  "supported module.\n");
 			unregister_netdev(adapter->netdev);
 		} else {
-			DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
-			        hw->phy.sfp_type);
+			e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
 		}
 		/* don't need this routine any more */
 		clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
@@ -4783,7 +4838,7 @@
 
 	/* initialize eeprom parameters */
 	if (ixgbe_init_eeprom_params_generic(hw)) {
-		dev_err(&pdev->dev, "EEPROM initialization failed\n");
+		e_dev_err("EEPROM initialization failed\n");
 		return -EIO;
 	}
 
@@ -4836,8 +4891,7 @@
 err:
 	vfree(tx_ring->tx_buffer_info);
 	tx_ring->tx_buffer_info = NULL;
-	DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
-	                    "descriptor ring\n");
+	e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
 	return -ENOMEM;
 }
 
@@ -4859,7 +4913,7 @@
 		err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
 		if (!err)
 			continue;
-		DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
+		e_err(probe, "Allocation for Tx Queue %u failed\n", i);
 		break;
 	}
 
@@ -4884,8 +4938,8 @@
 	if (!rx_ring->rx_buffer_info)
 		rx_ring->rx_buffer_info = vmalloc(size);
 	if (!rx_ring->rx_buffer_info) {
-		DPRINTK(PROBE, ERR,
-		        "vmalloc allocation failed for the rx desc ring\n");
+		e_err(probe, "vmalloc allocation failed for the Rx "
+		      "descriptor ring\n");
 		goto alloc_failed;
 	}
 	memset(rx_ring->rx_buffer_info, 0, size);
@@ -4898,8 +4952,8 @@
 					   &rx_ring->dma, GFP_KERNEL);
 
 	if (!rx_ring->desc) {
-		DPRINTK(PROBE, ERR,
-		        "Memory allocation failed for the rx desc ring\n");
+		e_err(probe, "Memory allocation failed for the Rx "
+		      "descriptor ring\n");
 		vfree(rx_ring->rx_buffer_info);
 		goto alloc_failed;
 	}
@@ -4932,7 +4986,7 @@
 		err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
 		if (!err)
 			continue;
-		DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
+		e_err(probe, "Allocation for Rx Queue %u failed\n", i);
 		break;
 	}
 
@@ -5031,8 +5085,7 @@
 	if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
 		return -EINVAL;
 
-	DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
-	        netdev->mtu, new_mtu);
+	e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
 	/* must set new MTU before calling down or up */
 	netdev->mtu = new_mtu;
 
@@ -5145,8 +5198,7 @@
 
 	err = pci_enable_device_mem(pdev);
 	if (err) {
-		printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
-				"suspend\n");
+		e_dev_err("Cannot enable PCI device from suspend\n");
 		return err;
 	}
 	pci_set_master(pdev);
@@ -5155,8 +5207,7 @@
 
 	err = ixgbe_init_interrupt_scheme(adapter);
 	if (err) {
-		printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
-		                "device\n");
+		e_dev_err("Cannot initialize interrupts for device\n");
 		return err;
 	}
 
@@ -5517,10 +5568,10 @@
 	err = hw->phy.ops.identify_sfp(hw);
 
 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-		dev_err(&adapter->pdev->dev, "failed to initialize because "
-			"an unsupported SFP+ module type was detected.\n"
-			"Reload the driver after installing a supported "
-			"module.\n");
+		e_dev_err("failed to initialize because an unsupported SFP+ "
+			  "module type was detected.\n");
+		e_dev_err("Reload the driver after installing a supported "
+			  "module.\n");
 		unregister_netdev(adapter->netdev);
 		return;
 	}
@@ -5549,8 +5600,8 @@
 			set_bit(__IXGBE_FDIR_INIT_DONE,
 			        &(adapter->tx_ring[i]->reinit_state));
 	} else {
-		DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
-			"ignored adding FDIR ATR filters\n");
+		e_err(probe, "failed to finish FDIR re-initialization, "
+		      "ignored adding FDIR ATR filters\n");
 	}
 	/* Done FDIR Re-initialization, enable transmits */
 	netif_tx_start_all_queues(adapter->netdev);
@@ -5621,16 +5672,14 @@
 				flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
 			}
 
-			printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
-			       "Flow Control: %s\n",
-			       netdev->name,
+			e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
 			       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
-			        "10 Gbps" :
-			        (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
-			         "1 Gbps" : "unknown speed")),
+			       "10 Gbps" :
+			       (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+			       "1 Gbps" : "unknown speed")),
 			       ((flow_rx && flow_tx) ? "RX/TX" :
-			        (flow_rx ? "RX" :
-			        (flow_tx ? "TX" : "None"))));
+			       (flow_rx ? "RX" :
+			       (flow_tx ? "TX" : "None"))));
 
 			netif_carrier_on(netdev);
 		} else {
@@ -5641,8 +5690,7 @@
 		adapter->link_up = false;
 		adapter->link_speed = 0;
 		if (netif_carrier_ok(netdev)) {
-			printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
-			       netdev->name);
+			e_info(drv, "NIC Link is Down\n");
 			netif_carrier_off(netdev);
 		}
 	}
@@ -5818,9 +5866,9 @@
 				break;
 			default:
 				if (unlikely(net_ratelimit())) {
-					DPRINTK(PROBE, WARNING,
-					 "partial checksum but proto=%x!\n",
-					 skb->protocol);
+					e_warn(probe, "partial checksum "
+					       "but proto=%x!\n",
+					       skb->protocol);
 				}
 				break;
 			}
@@ -5931,7 +5979,7 @@
 	return count;
 
 dma_error:
-	dev_err(&pdev->dev, "TX DMA map failed\n");
+	e_dev_err("TX DMA map failed\n");
 
 	/* clear timestamp and dma mappings for failed tx_buffer_info map */
 	tx_buffer_info->dma = 0;
@@ -6430,8 +6478,7 @@
 	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
 	err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
 	if (err) {
-		DPRINTK(PROBE, ERR,
-			"Failed to enable PCI sriov: %d\n", err);
+		e_err(probe, "Failed to enable PCI sriov: %d\n", err);
 		goto err_novfs;
 	}
 	/* If call to enable VFs succeeded then allocate memory
@@ -6455,9 +6502,8 @@
 	}
 
 	/* Oh oh */
-	DPRINTK(PROBE, ERR,
-		"Unable to allocate memory for VF "
-		"Data Storage - SRIOV disabled\n");
+	e_err(probe, "Unable to allocate memory for VF Data Storage - "
+	      "SRIOV disabled\n");
 	pci_disable_sriov(adapter->pdev);
 
 err_novfs:
@@ -6505,8 +6551,8 @@
 			err = dma_set_coherent_mask(&pdev->dev,
 						    DMA_BIT_MASK(32));
 			if (err) {
-				dev_err(&pdev->dev, "No usable DMA "
-				        "configuration, aborting\n");
+				e_dev_err("No usable DMA configuration, "
+					  "aborting\n");
 				goto err_dma;
 			}
 		}
@@ -6516,8 +6562,7 @@
 	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
 	                                   IORESOURCE_MEM), ixgbe_driver_name);
 	if (err) {
-		dev_err(&pdev->dev,
-		        "pci_request_selected_regions failed 0x%x\n", err);
+		e_dev_err("pci_request_selected_regions failed 0x%x\n", err);
 		goto err_pci_reg;
 	}
 
@@ -6628,8 +6673,7 @@
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
 		if (esdp & IXGBE_ESDP_SDP1)
-			DPRINTK(PROBE, CRIT,
-				"Fan has stopped, replace the adapter\n");
+			e_crit(probe, "Fan has stopped, replace the adapter\n");
 	}
 
 	/* reset_hw fills in the perm_addr as well */
@@ -6648,13 +6692,13 @@
 			  round_jiffies(jiffies + (2 * HZ)));
 		err = 0;
 	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-		dev_err(&adapter->pdev->dev, "failed to initialize because "
-			"an unsupported SFP+ module type was detected.\n"
-			"Reload the driver after installing a supported "
-			"module.\n");
+		e_dev_err("failed to initialize because an unsupported SFP+ "
+			  "module type was detected.\n");
+		e_dev_err("Reload the driver after installing a supported "
+			  "module.\n");
 		goto err_sw_init;
 	} else if (err) {
-		dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
+		e_dev_err("HW Init failed: %d\n", err);
 		goto err_sw_init;
 	}
 
@@ -6707,7 +6751,7 @@
 
 	/* make sure the EEPROM is good */
 	if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
-		dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
+		e_dev_err("The EEPROM Checksum Is Not Valid\n");
 		err = -EIO;
 		goto err_eeprom;
 	}
@@ -6716,7 +6760,7 @@
 	memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
 
 	if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
-		dev_err(&pdev->dev, "invalid MAC address\n");
+		e_dev_err("invalid MAC address\n");
 		err = -EIO;
 		goto err_eeprom;
 	}
@@ -6751,7 +6795,7 @@
 	hw->mac.ops.get_bus_info(hw);
 
 	/* print bus type/speed/width info */
-	dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
+	e_dev_info("(PCI Express:%s:%s) %pM\n",
 	        ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
 	         (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
 	        ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
@@ -6761,20 +6805,20 @@
 	        netdev->dev_addr);
 	ixgbe_read_pba_num_generic(hw, &part_num);
 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-		dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
-		         hw->mac.type, hw->phy.type, hw->phy.sfp_type,
-		         (part_num >> 8), (part_num & 0xff));
+		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
+			   "PBA No: %06x-%03x\n",
+			   hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+			   (part_num >> 8), (part_num & 0xff));
 	else
-		dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
-		         hw->mac.type, hw->phy.type,
-		         (part_num >> 8), (part_num & 0xff));
+		e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
+			   hw->mac.type, hw->phy.type,
+			   (part_num >> 8), (part_num & 0xff));
 
 	if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
-		dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
-		         "this card is not sufficient for optimal "
-		         "performance.\n");
-		dev_warn(&pdev->dev, "For optimal performance a x8 "
-		         "PCI-Express slot is required.\n");
+		e_dev_warn("PCI-Express bandwidth available for this card is "
+			   "not sufficient for optimal performance.\n");
+		e_dev_warn("For optimal performance a x8 PCI-Express slot "
+			   "is required.\n");
 	}
 
 	/* save off EEPROM version number */
@@ -6785,12 +6829,12 @@
 
 	if (err == IXGBE_ERR_EEPROM_VERSION) {
 		/* We are running on a pre-production device, log a warning */
-		dev_warn(&pdev->dev, "This device is a pre-production "
-		         "adapter/LOM.  Please be aware there may be issues "
-		         "associated with your hardware.  If you are "
-		         "experiencing problems please contact your Intel or "
-		         "hardware representative who provided you with this "
-		         "hardware.\n");
+		e_dev_warn("This device is a pre-production adapter/LOM. "
+			   "Please be aware there may be issues associated "
+			   "with your hardware.  If you are experiencing "
+			   "problems please contact your Intel or hardware "
+			   "representative who provided you with this "
+			   "hardware.\n");
 	}
 	strcpy(netdev->name, "eth%d");
 	err = register_netdev(netdev);
@@ -6813,8 +6857,7 @@
 	}
 #endif
 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
-		DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
-			adapter->num_vfs);
+		e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
 		for (i = 0; i < adapter->num_vfs; i++)
 			ixgbe_vf_configuration(pdev, (i | 0x10000000));
 	}
@@ -6822,7 +6865,7 @@
 	/* add san mac addr to netdev */
 	ixgbe_add_sanmac_netdev(netdev);
 
-	dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
+	e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
 	cards_found++;
 	return 0;
 
@@ -6912,7 +6955,7 @@
 	pci_release_selected_regions(pdev, pci_select_bars(pdev,
 	                             IORESOURCE_MEM));
 
-	DPRINTK(PROBE, INFO, "complete\n");
+	e_dev_info("complete\n");
 
 	free_netdev(netdev);
 
@@ -6962,8 +7005,7 @@
 	int err;
 
 	if (pci_enable_device_mem(pdev)) {
-		DPRINTK(PROBE, ERR,
-		        "Cannot re-enable PCI device after reset.\n");
+		e_err(probe, "Cannot re-enable PCI device after reset.\n");
 		result = PCI_ERS_RESULT_DISCONNECT;
 	} else {
 		pci_set_master(pdev);
@@ -6979,8 +7021,8 @@
 
 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
 	if (err) {
-		dev_err(&pdev->dev,
-		  "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
+		e_dev_err("pci_cleanup_aer_uncorrect_error_status "
+			  "failed 0x%0x\n", err);
 		/* non-fatal, continue */
 	}
 
@@ -7001,7 +7043,7 @@
 
 	if (netif_running(netdev)) {
 		if (ixgbe_up(adapter)) {
-			DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
+			e_info(probe, "ixgbe_up failed after reset\n");
 			return;
 		}
 	}
@@ -7037,10 +7079,9 @@
 static int __init ixgbe_init_module(void)
 {
 	int ret;
-	printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
-	       ixgbe_driver_string, ixgbe_driver_version);
-
-	printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
+	pr_info("%s - version %s\n", ixgbe_driver_string,
+		   ixgbe_driver_version);
+	pr_info("%s\n", ixgbe_copyright);
 
 #ifdef CONFIG_IXGBE_DCA
 	dca_register_notify(&dca_notifier);
@@ -7079,18 +7120,17 @@
 }
 
 #endif /* CONFIG_IXGBE_DCA */
-#ifdef DEBUG
+
 /**
- * ixgbe_get_hw_dev_name - return device name string
+ * ixgbe_get_hw_dev return device
  * used by hardware layer to print debugging information
  **/
-char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
+struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
 {
 	struct ixgbe_adapter *adapter = hw->back;
-	return adapter->netdev->name;
+	return adapter->netdev;
 }
 
-#endif
 module_exit(ixgbe_exit_module);
 
 /* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 48325a5..6c0d42e 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -577,6 +577,8 @@
 		 * 6    SFP_SR/LR_CORE1 - 82599-specific
 		 * 7    SFP_act_lmt_DA_CORE0 - 82599-specific
 		 * 8    SFP_act_lmt_DA_CORE1 - 82599-specific
+		 * 9    SFP_1g_cu_CORE0 - 82599-specific
+		 * 10   SFP_1g_cu_CORE1 - 82599-specific
 		 */
 		if (hw->mac.type == ixgbe_mac_82598EB) {
 			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -625,6 +627,13 @@
 				else
 					hw->phy.sfp_type =
 					              ixgbe_sfp_type_srlr_core1;
+			else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						ixgbe_sfp_type_1g_cu_core0;
+				else
+					hw->phy.sfp_type =
+						ixgbe_sfp_type_1g_cu_core1;
 			else
 				hw->phy.sfp_type = ixgbe_sfp_type_unknown;
 		}
@@ -696,8 +705,10 @@
 			goto out;
 		}
 
-		/* 1G SFP modules are not supported */
-		if (comp_codes_10g == 0) {
+		/* Verify supported 1G SFP modules */
+		if (comp_codes_10g == 0 &&
+		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
 			hw->phy.type = ixgbe_phy_sfp_unsupported;
 			status = IXGBE_ERR_SFP_NOT_SUPPORTED;
 			goto out;
@@ -711,7 +722,9 @@
 
 		/* This is guaranteed to be 82599, no need to check for NULL */
 		hw->mac.ops.get_device_caps(hw, &enforce_sfp);
-		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+		    !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
+		      (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
 			/* Make sure we're a supported PHY type */
 			if (hw->phy.type == ixgbe_phy_sfp_intel) {
 				status = 0;
@@ -742,6 +755,7 @@
                                         u16 *data_offset)
 {
 	u16 sfp_id;
+	u16 sfp_type = hw->phy.sfp_type;
 
 	if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -753,6 +767,17 @@
 	    (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
 
+	/*
+	 * Limiting active cables and 1G Phys must be initialized as
+	 * SR modules
+	 */
+	if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+	    sfp_type == ixgbe_sfp_type_1g_cu_core0)
+		sfp_type = ixgbe_sfp_type_srlr_core0;
+	else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+	         sfp_type == ixgbe_sfp_type_1g_cu_core1)
+		sfp_type = ixgbe_sfp_type_srlr_core1;
+
 	/* Read offset to PHY init contents */
 	hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
 
@@ -769,7 +794,7 @@
 	hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
 
 	while (sfp_id != IXGBE_PHY_INIT_END_NL) {
-		if (sfp_id == hw->phy.sfp_type) {
+		if (sfp_id == sfp_type) {
 			(*list_offset)++;
 			hw->eeprom.ops.read(hw, *list_offset, data_offset);
 			if ((!*data_offset) || (*data_offset == 0xFFFF)) {
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index ef4ba83..fb3898f 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -48,6 +48,7 @@
 #define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING    0x4
 #define IXGBE_SFF_1GBASESX_CAPABLE           0x1
 #define IXGBE_SFF_1GBASELX_CAPABLE           0x2
+#define IXGBE_SFF_1GBASET_CAPABLE            0x8
 #define IXGBE_SFF_10GBASESR_CAPABLE          0x10
 #define IXGBE_SFF_10GBASELR_CAPABLE          0x20
 #define IXGBE_I2C_EEPROM_READ_MASK           0x100
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index f6cee94..49661a1 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -25,7 +25,6 @@
 
 *******************************************************************************/
 
-
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -138,6 +137,7 @@
 inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 
 	/* reset offloads to defaults */
 	if (adapter->vfinfo[vf].pf_vlan) {
@@ -159,26 +159,17 @@
 	/* Flush and reset the mta with the new values */
 	ixgbe_set_rx_mode(adapter->netdev);
 
-	if (adapter->vfinfo[vf].rar > 0) {
-		adapter->hw.mac.ops.clear_rar(&adapter->hw,
-		                              adapter->vfinfo[vf].rar);
-		adapter->vfinfo[vf].rar = -1;
-	}
+	hw->mac.ops.clear_rar(hw, rar_entry);
 }
 
 int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
                           int vf, unsigned char *mac_addr)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-
-	adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
-	                                              vf, IXGBE_RAH_AV);
-	if (adapter->vfinfo[vf].rar < 0) {
-		DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
-		return -1;
-	}
+	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 
 	memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+	hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
 
 	return 0;
 }
@@ -194,11 +185,8 @@
 
 	if (enable) {
 		random_ether_addr(vf_mac_addr);
-		DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
-		       "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
-		       vfn,
-		       vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
-		       vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+		e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
+		       vfn, vf_mac_addr);
 		/*
 		 * Store away the VF "permananet" MAC address, it will ask
 		 * for it later.
@@ -243,7 +231,7 @@
 	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 
 	if (retval)
-		printk(KERN_ERR "Error receiving message from VF\n");
+		pr_err("Error receiving message from VF\n");
 
 	/* this is a message we already processed, do nothing */
 	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
@@ -257,7 +245,7 @@
 	if (msgbuf[0] == IXGBE_VF_RESET) {
 		unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
 		u8 *addr = (u8 *)(&msgbuf[1]);
-		DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
+		e_info(probe, "VF Reset msg received from vf %d\n", vf);
 		adapter->vfinfo[vf].clear_to_send = false;
 		ixgbe_vf_reset_msg(adapter, vf);
 		adapter->vfinfo[vf].clear_to_send = true;
@@ -310,7 +298,7 @@
 		retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
 		break;
 	default:
-		DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
+		e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
 		retval = IXGBE_ERR_MBX;
 		break;
 	}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index cdd1998..9587d97 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2214,6 +2214,8 @@
 	ixgbe_sfp_type_srlr_core1 = 6,
 	ixgbe_sfp_type_da_act_lmt_core0 = 7,
 	ixgbe_sfp_type_da_act_lmt_core1 = 8,
+	ixgbe_sfp_type_1g_cu_core0 = 9,
+	ixgbe_sfp_type_1g_cu_core1 = 10,
 	ixgbe_sfp_type_not_present = 0xFFFE,
 	ixgbe_sfp_type_unknown = 0xFFFF
 };
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index a16cff7..73f1e75 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -3411,6 +3411,7 @@
 	netdev->features |= NETIF_F_IPV6_CSUM;
 	netdev->features |= NETIF_F_TSO;
 	netdev->features |= NETIF_F_TSO6;
+	netdev->features |= NETIF_F_GRO;
 	netdev->vlan_features |= NETIF_F_TSO;
 	netdev->vlan_features |= NETIF_F_TSO6;
 	netdev->vlan_features |= NETIF_F_IP_CSUM;
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index f852ab3..d47bba9 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -119,7 +119,6 @@
 	int		irq;
 	struct tasklet_struct	tasklet;
 	spinlock_t	lock; /* spinlock to be interrupt safe */
-	struct platform_device *pdev;
 };
 
 static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
@@ -331,8 +330,7 @@
 	u32 *ptr = (u32 *)skb->data;
 	u32 ctrl;
 
-	dev_dbg(&adapter->pdev->dev,
-		"%s: len %u head %p data %p tail %p end %p\n",
+	netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
 		__func__, skb->len, skb->head, skb->data,
 		skb_tail_pointer(skb), skb_end_pointer(skb));
 
@@ -369,15 +367,13 @@
 
 	status &= 0xffff;
 
-	dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n",
-		__func__, status);
+	netdev_dbg(netdev, "%s - rx_data: status: %x\n", __func__, status);
 
 	/* check the status */
 	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
 		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
 
-		dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
-			__func__, len);
+		netdev_dbg(netdev, "%s, got package, len: %d\n", __func__, len);
 		if (skb) {
 			u32 *data;
 
@@ -400,7 +396,7 @@
 		} else
 			netdev->stats.rx_dropped++;
 	} else {
-		dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status);
+		netdev_dbg(netdev, "RX error, status: %x\n", status);
 		netdev->stats.rx_errors++;
 		if (status & RXSR_TOO_LONG)
 			netdev->stats.rx_length_errors++;
@@ -423,8 +419,7 @@
 void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
 {
 	u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
-	dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n",
-		__func__, rx_data);
+	netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
 	while (rx_data) {
 		ks8842_rx_frame(netdev, adapter);
 		rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
@@ -434,7 +429,7 @@
 void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
 {
 	u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
-	dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr);
+	netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
 	netdev->stats.tx_packets++;
 	if (netif_queue_stopped(netdev))
 		netif_wake_queue(netdev);
@@ -443,7 +438,7 @@
 void ks8842_handle_rx_overrun(struct net_device *netdev,
 	struct ks8842_adapter *adapter)
 {
-	dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+	netdev_dbg(netdev, "%s: entry\n", __func__);
 	netdev->stats.rx_errors++;
 	netdev->stats.rx_fifo_errors++;
 }
@@ -462,7 +457,7 @@
 	spin_unlock_irqrestore(&adapter->lock, flags);
 
 	isr = ks8842_read16(adapter, 18, REG_ISR);
-	dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
+	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
 
 	/* Ack */
 	ks8842_write16(adapter, 18, isr, REG_ISR);
@@ -501,13 +496,14 @@
 
 static irqreturn_t ks8842_irq(int irq, void *devid)
 {
-	struct ks8842_adapter *adapter = devid;
+	struct net_device *netdev = devid;
+	struct ks8842_adapter *adapter = netdev_priv(netdev);
 	u16 isr;
 	u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
 	irqreturn_t ret = IRQ_NONE;
 
 	isr = ks8842_read16(adapter, 18, REG_ISR);
-	dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
+	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
 
 	if (isr) {
 		/* disable IRQ */
@@ -532,7 +528,7 @@
 	struct ks8842_adapter *adapter = netdev_priv(netdev);
 	int err;
 
-	dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
+	netdev_dbg(netdev, "%s - entry\n", __func__);
 
 	/* reset the HW */
 	ks8842_reset_hw(adapter);
@@ -542,7 +538,7 @@
 	ks8842_update_link_status(netdev, adapter);
 
 	err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
-		adapter);
+		netdev);
 	if (err) {
 		pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
 		return err;
@@ -555,10 +551,10 @@
 {
 	struct ks8842_adapter *adapter = netdev_priv(netdev);
 
-	dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
+	netdev_dbg(netdev, "%s - entry\n", __func__);
 
 	/* free the irq */
-	free_irq(adapter->irq, adapter);
+	free_irq(adapter->irq, netdev);
 
 	/* disable the switch */
 	ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
@@ -572,7 +568,7 @@
 	int ret;
 	struct ks8842_adapter *adapter = netdev_priv(netdev);
 
-	dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+	netdev_dbg(netdev, "%s: entry\n", __func__);
 
 	ret = ks8842_tx_frame(skb, netdev);
 
@@ -588,7 +584,7 @@
 	struct sockaddr *addr = p;
 	char *mac = (u8 *)addr->sa_data;
 
-	dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+	netdev_dbg(netdev, "%s: entry\n", __func__);
 
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
@@ -604,7 +600,7 @@
 	struct ks8842_adapter *adapter = netdev_priv(netdev);
 	unsigned long flags;
 
-	dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+	netdev_dbg(netdev, "%s: entry\n", __func__);
 
 	spin_lock_irqsave(&adapter->lock, flags);
 	/* disable interrupts */
@@ -663,8 +659,6 @@
 		goto err_get_irq;
 	}
 
-	adapter->pdev = pdev;
-
 	tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
 	spin_lock_init(&adapter->lock);
 
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 7805bbf..b3c010b 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -1457,7 +1457,6 @@
  * @adapter:		Adapter device information.
  * @port:		Port information.
  * @monitor_time_info:	Timer to monitor ports.
- * @stats:		Network statistics.
  * @proc_sem:		Semaphore for proc accessing.
  * @id:			Device ID.
  * @mii_if:		MII interface information.
@@ -1471,7 +1470,6 @@
 	struct dev_info *adapter;
 	struct ksz_port port;
 	struct ksz_timer_info monitor_timer_info;
-	struct net_device_stats stats;
 
 	struct semaphore proc_sem;
 	int id;
@@ -4751,8 +4749,8 @@
 	hw_send_pkt(hw);
 
 	/* Update transmit statistics. */
-	priv->stats.tx_packets++;
-	priv->stats.tx_bytes += len;
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += len;
 }
 
 /**
@@ -5030,7 +5028,7 @@
 		/* skb->data != skb->head */
 		skb = dev_alloc_skb(packet_len + 2);
 		if (!skb) {
-			priv->stats.rx_dropped++;
+			dev->stats.rx_dropped++;
 			return -ENOMEM;
 		}
 
@@ -5050,8 +5048,8 @@
 		csum_verified(skb);
 
 	/* Update receive statistics. */
-	priv->stats.rx_packets++;
-	priv->stats.rx_bytes += packet_len;
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += packet_len;
 
 	/* Notify upper layer for received packet. */
 	rx_status = netif_rx(skb);
@@ -5291,7 +5289,7 @@
 		}
 
 		if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
-			priv->stats.rx_fifo_errors++;
+			dev->stats.rx_fifo_errors++;
 			hw_resume_rx(hw);
 		}
 
@@ -5522,7 +5520,7 @@
 	priv->promiscuous = 0;
 
 	/* Reset device statistics. */
-	memset(&priv->stats, 0, sizeof(struct net_device_stats));
+	memset(&dev->stats, 0, sizeof(struct net_device_stats));
 	memset((void *) port->counter, 0,
 		(sizeof(u64) * OID_COUNTER_LAST));
 
@@ -5622,42 +5620,42 @@
 	int i;
 	int p;
 
-	priv->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
-	priv->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
+	dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
+	dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
 
 	/* Reset to zero to add count later. */
-	priv->stats.multicast = 0;
-	priv->stats.collisions = 0;
-	priv->stats.rx_length_errors = 0;
-	priv->stats.rx_crc_errors = 0;
-	priv->stats.rx_frame_errors = 0;
-	priv->stats.tx_window_errors = 0;
+	dev->stats.multicast = 0;
+	dev->stats.collisions = 0;
+	dev->stats.rx_length_errors = 0;
+	dev->stats.rx_crc_errors = 0;
+	dev->stats.rx_frame_errors = 0;
+	dev->stats.tx_window_errors = 0;
 
 	for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
 		mib = &hw->port_mib[p];
 
-		priv->stats.multicast += (unsigned long)
+		dev->stats.multicast += (unsigned long)
 			mib->counter[MIB_COUNTER_RX_MULTICAST];
 
-		priv->stats.collisions += (unsigned long)
+		dev->stats.collisions += (unsigned long)
 			mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
 
-		priv->stats.rx_length_errors += (unsigned long)(
+		dev->stats.rx_length_errors += (unsigned long)(
 			mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
 			mib->counter[MIB_COUNTER_RX_FRAGMENT] +
 			mib->counter[MIB_COUNTER_RX_OVERSIZE] +
 			mib->counter[MIB_COUNTER_RX_JABBER]);
-		priv->stats.rx_crc_errors += (unsigned long)
+		dev->stats.rx_crc_errors += (unsigned long)
 			mib->counter[MIB_COUNTER_RX_CRC_ERR];
-		priv->stats.rx_frame_errors += (unsigned long)(
+		dev->stats.rx_frame_errors += (unsigned long)(
 			mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
 			mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
 
-		priv->stats.tx_window_errors += (unsigned long)
+		dev->stats.tx_window_errors += (unsigned long)
 			mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
 	}
 
-	return &priv->stats;
+	return &dev->stats;
 }
 
 /**
@@ -5718,7 +5716,7 @@
 		 * from the bridge.
 		 */
 		if ((hw->features & STP_SUPPORT) && !promiscuous &&
-				dev->br_port) {
+		    (dev->priv_flags & IFF_BRIDGE_PORT)) {
 			struct ksz_switch *sw = hw->ksz_switch;
 			int port = priv->port.first_port;
 
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 21f8ada..f06296b 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -248,7 +248,6 @@
 	int cur_rx, cur_tx;			/* The next free ring entry */
 	int dirty_rx, dirty_tx;		/* The ring entries to be free()ed. */
 	int dma;
-	struct net_device_stats stats;
 	unsigned char chip_version;	/* See lance_chip_type. */
 	spinlock_t devlock;
 };
@@ -925,7 +924,7 @@
 	printk ("%s: transmit timed out, status %4.4x, resetting.\n",
 		dev->name, inw (ioaddr + LANCE_DATA));
 	outw (0x0004, ioaddr + LANCE_DATA);
-	lp->stats.tx_errors++;
+	dev->stats.tx_errors++;
 #ifndef final_version
 	if (lance_debug > 3) {
 		int i;
@@ -989,7 +988,7 @@
 
 	lp->tx_ring[entry].misc = 0x0000;
 
-	lp->stats.tx_bytes += skb->len;
+	dev->stats.tx_bytes += skb->len;
 
 	/* If any part of this buffer is >16M we must copy it to a low-memory
 	   buffer. */
@@ -1062,13 +1061,16 @@
 				if (status & 0x40000000) {
 					/* There was an major error, log it. */
 					int err_status = lp->tx_ring[entry].misc;
-					lp->stats.tx_errors++;
-					if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
-					if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
-					if (err_status & 0x1000) lp->stats.tx_window_errors++;
+					dev->stats.tx_errors++;
+					if (err_status & 0x0400)
+						dev->stats.tx_aborted_errors++;
+					if (err_status & 0x0800)
+						dev->stats.tx_carrier_errors++;
+					if (err_status & 0x1000)
+						dev->stats.tx_window_errors++;
 					if (err_status & 0x4000) {
 						/* Ackk!  On FIFO errors the Tx unit is turned off! */
-						lp->stats.tx_fifo_errors++;
+						dev->stats.tx_fifo_errors++;
 						/* Remove this verbosity later! */
 						printk("%s: Tx FIFO error! Status %4.4x.\n",
 							   dev->name, csr0);
@@ -1077,8 +1079,8 @@
 					}
 				} else {
 					if (status & 0x18000000)
-						lp->stats.collisions++;
-					lp->stats.tx_packets++;
+						dev->stats.collisions++;
+					dev->stats.tx_packets++;
 				}
 
 				/* We must free the original skb if it's not a data-only copy
@@ -1108,8 +1110,10 @@
 		}
 
 		/* Log misc errors. */
-		if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
-		if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
+		if (csr0 & 0x4000)
+			dev->stats.tx_errors++; /* Tx babble. */
+		if (csr0 & 0x1000)
+			dev->stats.rx_errors++; /* Missed a Rx frame. */
 		if (csr0 & 0x0800) {
 			printk("%s: Bus master arbitration failure, status %4.4x.\n",
 				   dev->name, csr0);
@@ -1155,11 +1159,15 @@
 			   buffers it's possible for a jabber packet to use two
 			   buffers, with only the last correctly noting the error. */
 			if (status & 0x01)	/* Only count a general error at the */
-				lp->stats.rx_errors++; /* end of a packet.*/
-			if (status & 0x20) lp->stats.rx_frame_errors++;
-			if (status & 0x10) lp->stats.rx_over_errors++;
-			if (status & 0x08) lp->stats.rx_crc_errors++;
-			if (status & 0x04) lp->stats.rx_fifo_errors++;
+				dev->stats.rx_errors++; /* end of a packet.*/
+			if (status & 0x20)
+				dev->stats.rx_frame_errors++;
+			if (status & 0x10)
+				dev->stats.rx_over_errors++;
+			if (status & 0x08)
+				dev->stats.rx_crc_errors++;
+			if (status & 0x04)
+				dev->stats.rx_fifo_errors++;
 			lp->rx_ring[entry].base &= 0x03ffffff;
 		}
 		else
@@ -1171,7 +1179,7 @@
 			if(pkt_len<60)
 			{
 				printk("%s: Runt packet!\n",dev->name);
-				lp->stats.rx_errors++;
+				dev->stats.rx_errors++;
 			}
 			else
 			{
@@ -1185,7 +1193,7 @@
 
 					if (i > RX_RING_SIZE -2)
 					{
-						lp->stats.rx_dropped++;
+						dev->stats.rx_dropped++;
 						lp->rx_ring[entry].base |= 0x80000000;
 						lp->cur_rx++;
 					}
@@ -1198,8 +1206,8 @@
 					pkt_len);
 				skb->protocol=eth_type_trans(skb,dev);
 				netif_rx(skb);
-				lp->stats.rx_packets++;
-				lp->stats.rx_bytes+=pkt_len;
+				dev->stats.rx_packets++;
+				dev->stats.rx_bytes += pkt_len;
 			}
 		}
 		/* The docs say that the buffer length isn't touched, but Andrew Boyd
@@ -1225,7 +1233,7 @@
 
 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
 		outw(112, ioaddr+LANCE_ADDR);
-		lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 	}
 	outw(0, ioaddr+LANCE_ADDR);
 
@@ -1262,12 +1270,12 @@
 		spin_lock_irqsave(&lp->devlock, flags);
 		saved_addr = inw(ioaddr+LANCE_ADDR);
 		outw(112, ioaddr+LANCE_ADDR);
-		lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 		outw(saved_addr, ioaddr+LANCE_ADDR);
 		spin_unlock_irqrestore(&lp->devlock, flags);
 	}
 
-	return &lp->stats;
+	return &dev->stats;
 }
 
 /* Set or clear the multicast filter for this adaptor.
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 6474c49..fa303c8 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -202,14 +202,29 @@
 	int i;
 
 	lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+	if (!lp->rx_skb) {
+		dev_err(&ndev->dev,
+				"can't allocate memory for DMA RX buffer\n");
+		goto out;
+	}
 	/* allocate the tx and rx ring buffer descriptors. */
 	/* returns a virtual addres and a physical address. */
 	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 					 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 					 &lp->tx_bd_p, GFP_KERNEL);
+	if (!lp->tx_bd_v) {
+		dev_err(&ndev->dev,
+				"unable to allocate DMA TX buffer descriptors");
+		goto out;
+	}
 	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 					 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 					 &lp->rx_bd_p, GFP_KERNEL);
+	if (!lp->rx_bd_v) {
+		dev_err(&ndev->dev,
+				"unable to allocate DMA RX buffer descriptors");
+		goto out;
+	}
 
 	memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
 	for (i = 0; i < TX_BD_NUM; i++) {
@@ -227,7 +242,7 @@
 
 		if (skb == 0) {
 			dev_err(&ndev->dev, "alloc_skb error %d\n", i);
-			return -1;
+			goto out;
 		}
 		lp->rx_skb[i] = skb;
 		/* returns physical address of skb->data */
@@ -258,6 +273,9 @@
 	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
 
 	return 0;
+
+out:
+	return -ENOMEM;
 }
 
 /* ---------------------------------------------------------------------
@@ -505,7 +523,10 @@
 	}
 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
 
-	temac_dma_bd_init(ndev);
+	if (temac_dma_bd_init(ndev)) {
+		dev_err(&ndev->dev,
+				"temac_device_reset descriptor allocation failed\n");
+	}
 
 	temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
 	temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 72b7949..9a09967 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -58,11 +58,13 @@
 #include <linux/tcp.h>
 #include <linux/percpu.h>
 #include <net/net_namespace.h>
+#include <linux/u64_stats_sync.h>
 
 struct pcpu_lstats {
-	unsigned long packets;
-	unsigned long bytes;
-	unsigned long drops;
+	u64			packets;
+	u64			bytes;
+	struct u64_stats_sync	syncp;
+	unsigned long		drops;
 };
 
 /*
@@ -86,31 +88,40 @@
 
 	len = skb->len;
 	if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
+		u64_stats_update_begin(&lb_stats->syncp);
 		lb_stats->bytes += len;
 		lb_stats->packets++;
+		u64_stats_update_end(&lb_stats->syncp);
 	} else
 		lb_stats->drops++;
 
 	return NETDEV_TX_OK;
 }
 
-static struct net_device_stats *loopback_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
+						      struct rtnl_link_stats64 *stats)
 {
 	const struct pcpu_lstats __percpu *pcpu_lstats;
-	struct net_device_stats *stats = &dev->stats;
-	unsigned long bytes = 0;
-	unsigned long packets = 0;
-	unsigned long drops = 0;
+	u64 bytes = 0;
+	u64 packets = 0;
+	u64 drops = 0;
 	int i;
 
 	pcpu_lstats = (void __percpu __force *)dev->ml_priv;
 	for_each_possible_cpu(i) {
 		const struct pcpu_lstats *lb_stats;
+		u64 tbytes, tpackets;
+		unsigned int start;
 
 		lb_stats = per_cpu_ptr(pcpu_lstats, i);
-		bytes   += lb_stats->bytes;
-		packets += lb_stats->packets;
+		do {
+			start = u64_stats_fetch_begin(&lb_stats->syncp);
+			tbytes = lb_stats->bytes;
+			tpackets = lb_stats->packets;
+		} while (u64_stats_fetch_retry(&lb_stats->syncp, start));
 		drops   += lb_stats->drops;
+		bytes   += tbytes;
+		packets += tpackets;
 	}
 	stats->rx_packets = packets;
 	stats->tx_packets = packets;
@@ -158,7 +169,7 @@
 static const struct net_device_ops loopback_ops = {
 	.ndo_init      = loopback_dev_init,
 	.ndo_start_xmit= loopback_xmit,
-	.ndo_get_stats = loopback_get_stats,
+	.ndo_get_stats64 = loopback_get_stats64,
 };
 
 /*
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 1136c9a..3832fa4 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -157,6 +157,8 @@
 #define memcpy_fromio(a, b, c)	memcpy((a), (void *)(b), (c))
 #define memcpy_toio(a, b, c)	memcpy((void *)(a), (b), (c))
 
+#define memcmp_withio(a, b, c)	memcmp((a), (void *)(b), (c))
+
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
 static void slow_sane_get_8390_hdr(struct net_device *dev,
 				   struct e8390_pkt_hdr *hdr, int ring_page);
@@ -164,8 +166,8 @@
 				  struct sk_buff *skb, int ring_offset);
 static void slow_sane_block_output(struct net_device *dev, int count,
 				   const unsigned char *buf, int start_page);
-static void word_memcpy_tocard(void *tp, const void *fp, int count);
-static void word_memcpy_fromcard(void *tp, const void *fp, int count);
+static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
+static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
 
 static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
 {
@@ -245,9 +247,9 @@
 	unsigned long outdata = 0xA5A0B5B0;
 	unsigned long indata =  0x00000000;
 	/* Try writing 32 bits */
-	memcpy(membase, &outdata, 4);
+	memcpy_toio(membase, &outdata, 4);
 	/* Now compare them */
-	if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
+	if (memcmp_withio(&outdata, membase, 4) == 0)
 		return ACCESS_32;
 	/* Write 16 bit output */
 	word_memcpy_tocard(membase, &outdata, 4);
@@ -554,7 +556,7 @@
 	case MAC8390_APPLE:
 		switch (mac8390_testio(dev->mem_start)) {
 		case ACCESS_UNKNOWN:
-			pr_info("Don't know how to access card memory!\n");
+			pr_err("Don't know how to access card memory!\n");
 			return -ENODEV;
 			break;
 
@@ -641,12 +643,13 @@
 
 static int mac8390_open(struct net_device *dev)
 {
+	int err;
+
 	__ei_open(dev);
-	if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
-		pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
-		return -EAGAIN;
-	}
-	return 0;
+	err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev);
+	if (err)
+		pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq);
+	return err;
 }
 
 static int mac8390_close(struct net_device *dev)
@@ -731,7 +734,7 @@
 			      struct e8390_pkt_hdr *hdr, int ring_page)
 {
 	unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
-	memcpy_fromio((void *)hdr, (char *)dev->mem_start + hdr_start, 4);
+	memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
 	/* Fix endianness */
 	hdr->count = swab16(hdr->count);
 }
@@ -745,14 +748,13 @@
 	if (xfer_start + count > ei_status.rmem_end) {
 		/* We must wrap the input move. */
 		int semi_count = ei_status.rmem_end - xfer_start;
-		memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+		memcpy_fromio(skb->data, dev->mem_start + xfer_base,
 			      semi_count);
 		count -= semi_count;
-		memcpy_toio(skb->data + semi_count,
-			    (char *)ei_status.rmem_start, count);
-	} else {
-		memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+		memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
 			      count);
+	} else {
+		memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
 	}
 }
 
@@ -761,7 +763,7 @@
 {
 	long shmem = (start_page - WD_START_PG)<<8;
 
-	memcpy_toio((char *)dev->mem_start + shmem, buf, count);
+	memcpy_toio(dev->mem_start + shmem, buf, count);
 }
 
 /* dayna block input/output */
@@ -812,7 +814,7 @@
 				   int ring_page)
 {
 	unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
-	word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
+	word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4);
 	/* Register endianism - fix here rather than 8390.c */
 	hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
 }
@@ -826,15 +828,14 @@
 	if (xfer_start + count > ei_status.rmem_end) {
 		/* We must wrap the input move. */
 		int semi_count = ei_status.rmem_end - xfer_start;
-		word_memcpy_fromcard(skb->data,
-				     (char *)dev->mem_start + xfer_base,
+		word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
 				     semi_count);
 		count -= semi_count;
 		word_memcpy_fromcard(skb->data + semi_count,
-				     (char *)ei_status.rmem_start, count);
+				     ei_status.rmem_start, count);
 	} else {
-		word_memcpy_fromcard(skb->data,
-				     (char *)dev->mem_start + xfer_base, count);
+		word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
+				     count);
 	}
 }
 
@@ -843,12 +844,12 @@
 {
 	long shmem = (start_page - WD_START_PG)<<8;
 
-	word_memcpy_tocard((char *)dev->mem_start + shmem, buf, count);
+	word_memcpy_tocard(dev->mem_start + shmem, buf, count);
 }
 
-static void word_memcpy_tocard(void *tp, const void *fp, int count)
+static void word_memcpy_tocard(unsigned long tp, const void *fp, int count)
 {
-	volatile unsigned short *to = tp;
+	volatile unsigned short *to = (void *)tp;
 	const unsigned short *from = fp;
 
 	count++;
@@ -858,10 +859,10 @@
 		*to++ = *from++;
 }
 
-static void word_memcpy_fromcard(void *tp, const void *fp, int count)
+static void word_memcpy_fromcard(void *tp, unsigned long fp, int count)
 {
 	unsigned short *to = tp;
-	const volatile unsigned short *from = fp;
+	const volatile unsigned short *from = (const void *)fp;
 
 	count++;
 	count /= 2;
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 69fa4ef6..669b317 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -110,7 +110,6 @@
 
 /* Information that need to be kept for each board. */
 struct net_local {
-	struct net_device_stats stats;
 	int chip_type;		/* one of: CS8900, CS8920, CS8920M */
 	char chip_revision;	/* revision letter of the chip ('A'...) */
 	int send_cmd;		/* the propercommand used to send a packet. */
@@ -444,13 +443,18 @@
 			net_rx(dev);
 			break;
 		case ISQ_TRANSMITTER_EVENT:
-			lp->stats.tx_packets++;
+			dev->stats.tx_packets++;
 			netif_wake_queue(dev);
-			if ((status & TX_OK) == 0) lp->stats.tx_errors++;
-			if (status & TX_LOST_CRS) lp->stats.tx_carrier_errors++;
-			if (status & TX_SQE_ERROR) lp->stats.tx_heartbeat_errors++;
-			if (status & TX_LATE_COL) lp->stats.tx_window_errors++;
-			if (status & TX_16_COL) lp->stats.tx_aborted_errors++;
+			if ((status & TX_OK) == 0)
+				dev->stats.tx_errors++;
+			if (status & TX_LOST_CRS)
+				dev->stats.tx_carrier_errors++;
+			if (status & TX_SQE_ERROR)
+				dev->stats.tx_heartbeat_errors++;
+			if (status & TX_LATE_COL)
+				dev->stats.tx_window_errors++;
+			if (status & TX_16_COL)
+				dev->stats.tx_aborted_errors++;
 			break;
 		case ISQ_BUFFER_EVENT:
 			if (status & READY_FOR_TX) {
@@ -469,10 +473,10 @@
                         }
 			break;
 		case ISQ_RX_MISS_EVENT:
-			lp->stats.rx_missed_errors += (status >>6);
+			dev->stats.rx_missed_errors += (status >> 6);
 			break;
 		case ISQ_TX_COL_EVENT:
-			lp->stats.collisions += (status >>6);
+			dev->stats.collisions += (status >> 6);
 			break;
 		}
 	}
@@ -483,19 +487,22 @@
 static void
 net_rx(struct net_device *dev)
 {
-	struct net_local *lp = netdev_priv(dev);
 	struct sk_buff *skb;
 	int status, length;
 
 	status = readreg(dev, PP_RxStatus);
 	if ((status & RX_OK) == 0) {
-		lp->stats.rx_errors++;
-		if (status & RX_RUNT) lp->stats.rx_length_errors++;
-		if (status & RX_EXTRA_DATA) lp->stats.rx_length_errors++;
-		if (status & RX_CRC_ERROR) if (!(status & (RX_EXTRA_DATA|RX_RUNT)))
+		dev->stats.rx_errors++;
+		if (status & RX_RUNT)
+				dev->stats.rx_length_errors++;
+		if (status & RX_EXTRA_DATA)
+				dev->stats.rx_length_errors++;
+		if ((status & RX_CRC_ERROR) &&
+		    !(status & (RX_EXTRA_DATA|RX_RUNT)))
 			/* per str 172 */
-			lp->stats.rx_crc_errors++;
-		if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++;
+			dev->stats.rx_crc_errors++;
+		if (status & RX_DRIBBLE)
+				dev->stats.rx_frame_errors++;
 		return;
 	}
 
@@ -504,7 +511,7 @@
 	skb = alloc_skb(length, GFP_ATOMIC);
 	if (skb == NULL) {
 		printk("%s: Memory squeeze, dropping packet.\n", dev->name);
-		lp->stats.rx_dropped++;
+		dev->stats.rx_dropped++;
 		return;
 	}
 	skb_put(skb, length);
@@ -519,8 +526,8 @@
 
         skb->protocol=eth_type_trans(skb,dev);
 	netif_rx(skb);
-	lp->stats.rx_packets++;
-	lp->stats.rx_bytes += length;
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += length;
 }
 
 /* The inverse routine to net_open(). */
@@ -548,16 +555,15 @@
 static struct net_device_stats *
 net_get_stats(struct net_device *dev)
 {
-	struct net_local *lp = netdev_priv(dev);
 	unsigned long flags;
 
 	local_irq_save(flags);
 	/* Update the statistics from the device registers. */
-	lp->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
-	lp->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
+	dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
+	dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
 	local_irq_restore(flags);
 
-	return &lp->stats;
+	return &dev->stats;
 }
 
 static void set_multicast_list(struct net_device *dev)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 87e8d4c..6112f14 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -37,8 +37,14 @@
 	struct net_device	*dev;
 	struct hlist_head	vlan_hash[MACVLAN_HASH_SIZE];
 	struct list_head	vlans;
+	struct rcu_head		rcu;
 };
 
+#define macvlan_port_get_rcu(dev) \
+	((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
+#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
+#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
+
 static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
 					       const unsigned char *addr)
 {
@@ -145,15 +151,16 @@
 }
 
 /* called under rcu_read_lock() from netif_receive_skb */
-static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port,
-					    struct sk_buff *skb)
+static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
 {
+	struct macvlan_port *port;
 	const struct ethhdr *eth = eth_hdr(skb);
 	const struct macvlan_dev *vlan;
 	const struct macvlan_dev *src;
 	struct net_device *dev;
 	unsigned int len;
 
+	port = macvlan_port_get_rcu(skb->dev);
 	if (is_multicast_ether_addr(eth->h_dest)) {
 		src = macvlan_hash_lookup(port, eth->h_source);
 		if (!src)
@@ -424,29 +431,38 @@
 	free_percpu(vlan->rx_stats);
 }
 
-static struct net_device_stats *macvlan_dev_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
+							 struct rtnl_link_stats64 *stats)
 {
-	struct net_device_stats *stats = &dev->stats;
 	struct macvlan_dev *vlan = netdev_priv(dev);
 
-	dev_txq_stats_fold(dev, stats);
+	dev_txq_stats_fold(dev, (struct net_device_stats *)stats);
 
 	if (vlan->rx_stats) {
-		struct macvlan_rx_stats *p, rx = {0};
+		struct macvlan_rx_stats *p, accum = {0};
+		u64 rx_packets, rx_bytes, rx_multicast;
+		unsigned int start;
 		int i;
 
 		for_each_possible_cpu(i) {
 			p = per_cpu_ptr(vlan->rx_stats, i);
-			rx.rx_packets += p->rx_packets;
-			rx.rx_bytes   += p->rx_bytes;
-			rx.rx_errors  += p->rx_errors;
-			rx.multicast  += p->multicast;
+			do {
+				start = u64_stats_fetch_begin_bh(&p->syncp);
+				rx_packets	= p->rx_packets;
+				rx_bytes	= p->rx_bytes;
+				rx_multicast	= p->rx_multicast;
+			} while (u64_stats_fetch_retry_bh(&p->syncp, start));
+			accum.rx_packets	+= rx_packets;
+			accum.rx_bytes		+= rx_bytes;
+			accum.rx_multicast	+= rx_multicast;
+			/* rx_errors is an ulong, updated without syncp protection */
+			accum.rx_errors		+= p->rx_errors;
 		}
-		stats->rx_packets = rx.rx_packets;
-		stats->rx_bytes   = rx.rx_bytes;
-		stats->rx_errors  = rx.rx_errors;
-		stats->rx_dropped = rx.rx_errors;
-		stats->multicast  = rx.multicast;
+		stats->rx_packets = accum.rx_packets;
+		stats->rx_bytes   = accum.rx_bytes;
+		stats->rx_errors  = accum.rx_errors;
+		stats->rx_dropped = accum.rx_errors;
+		stats->multicast  = accum.rx_multicast;
 	}
 	return stats;
 }
@@ -495,7 +511,7 @@
 	.ndo_change_rx_flags	= macvlan_change_rx_flags,
 	.ndo_set_mac_address	= macvlan_set_mac_address,
 	.ndo_set_multicast_list	= macvlan_set_multicast_list,
-	.ndo_get_stats		= macvlan_dev_get_stats,
+	.ndo_get_stats64	= macvlan_dev_get_stats64,
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
@@ -515,6 +531,7 @@
 {
 	struct macvlan_port *port;
 	unsigned int i;
+	int err;
 
 	if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
 		return -EINVAL;
@@ -527,17 +544,30 @@
 	INIT_LIST_HEAD(&port->vlans);
 	for (i = 0; i < MACVLAN_HASH_SIZE; i++)
 		INIT_HLIST_HEAD(&port->vlan_hash[i]);
-	rcu_assign_pointer(dev->macvlan_port, port);
-	return 0;
+
+	err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
+	if (err)
+		kfree(port);
+
+	dev->priv_flags |= IFF_MACVLAN_PORT;
+	return err;
+}
+
+static void macvlan_port_rcu_free(struct rcu_head *head)
+{
+	struct macvlan_port *port;
+
+	port = container_of(head, struct macvlan_port, rcu);
+	kfree(port);
 }
 
 static void macvlan_port_destroy(struct net_device *dev)
 {
-	struct macvlan_port *port = dev->macvlan_port;
+	struct macvlan_port *port = macvlan_port_get(dev);
 
-	rcu_assign_pointer(dev->macvlan_port, NULL);
-	synchronize_rcu();
-	kfree(port);
+	dev->priv_flags &= ~IFF_MACVLAN_PORT;
+	netdev_rx_handler_unregister(dev);
+	call_rcu(&port->rcu, macvlan_port_rcu_free);
 }
 
 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -615,12 +645,12 @@
 	if (!tb[IFLA_ADDRESS])
 		random_ether_addr(dev->dev_addr);
 
-	if (lowerdev->macvlan_port == NULL) {
+	if (!macvlan_port_exists(lowerdev)) {
 		err = macvlan_port_create(lowerdev);
 		if (err < 0)
 			return err;
 	}
-	port = lowerdev->macvlan_port;
+	port = macvlan_port_get(lowerdev);
 
 	vlan->lowerdev = lowerdev;
 	vlan->dev      = dev;
@@ -730,10 +760,11 @@
 	struct macvlan_dev *vlan, *next;
 	struct macvlan_port *port;
 
-	port = dev->macvlan_port;
-	if (port == NULL)
+	if (!macvlan_port_exists(dev))
 		return NOTIFY_DONE;
 
+	port = macvlan_port_get(dev);
+
 	switch (event) {
 	case NETDEV_CHANGE:
 		list_for_each_entry(vlan, &port->vlans, list)
@@ -767,14 +798,12 @@
 	int err;
 
 	register_netdevice_notifier(&macvlan_notifier_block);
-	macvlan_handle_frame_hook = macvlan_handle_frame;
 
 	err = macvlan_link_register(&macvlan_link_ops);
 	if (err < 0)
 		goto err1;
 	return 0;
 err1:
-	macvlan_handle_frame_hook = NULL;
 	unregister_netdevice_notifier(&macvlan_notifier_block);
 	return err;
 }
@@ -782,7 +811,6 @@
 static void __exit macvlan_cleanup_module(void)
 {
 	rtnl_link_unregister(&macvlan_link_ops);
-	macvlan_handle_frame_hook = NULL;
 	unregister_netdevice_notifier(&macvlan_notifier_block);
 }
 
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index d5afd03..b275238 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -387,6 +387,42 @@
 	param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
 }
 
+static int mlx4_ethtool_op_set_flags(struct net_device *dev, u32 data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int rc = 0;
+	int changed = 0;
+
+	if (data & ~ETH_FLAG_LRO)
+		return -EOPNOTSUPP;
+
+	if (data & ETH_FLAG_LRO) {
+		if (mdev->profile.num_lro == 0)
+			return -EOPNOTSUPP;
+		if (!(dev->features & NETIF_F_LRO))
+			changed = 1;
+	} else if (dev->features & NETIF_F_LRO) {
+		changed = 1;
+	}
+
+	if (changed) {
+		if (netif_running(dev)) {
+			mutex_lock(&mdev->state_lock);
+			mlx4_en_stop_port(dev);
+		}
+		dev->features ^= NETIF_F_LRO;
+		if (netif_running(dev)) {
+			rc = mlx4_en_start_port(dev);
+			if (rc)
+				en_err(priv, "Failed to restart port\n");
+			mutex_unlock(&mdev->state_lock);
+		}
+	}
+
+	return rc;
+}
+
 const struct ethtool_ops mlx4_en_ethtool_ops = {
 	.get_drvinfo = mlx4_en_get_drvinfo,
 	.get_settings = mlx4_en_get_settings,
@@ -415,7 +451,7 @@
 	.get_ringparam = mlx4_en_get_ringparam,
 	.set_ringparam = mlx4_en_set_ringparam,
 	.get_flags = ethtool_op_get_flags,
-	.set_flags = ethtool_op_set_flags,
+	.set_flags = mlx4_ethtool_op_set_flags,
 };
 
 
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 96180c0..a0d8a26 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -961,6 +961,7 @@
 	}
 
 	SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
+	dev->dev_id =  port - 1;
 
 	/*
 	 * Initialize driver private data
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 4230534..22d0b3b 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -110,7 +110,7 @@
 		u32		raw[6];
 		struct {
 			__be32	cqn;
-		} __attribute__((packed)) comp;
+		} __packed comp;
 		struct {
 			u16	reserved1;
 			__be16	token;
@@ -118,27 +118,27 @@
 			u8	reserved3[3];
 			u8	status;
 			__be64	out_param;
-		} __attribute__((packed)) cmd;
+		} __packed cmd;
 		struct {
 			__be32	qpn;
-		} __attribute__((packed)) qp;
+		} __packed qp;
 		struct {
 			__be32	srqn;
-		} __attribute__((packed)) srq;
+		} __packed srq;
 		struct {
 			__be32	cqn;
 			u32	reserved1;
 			u8	reserved2[3];
 			u8	syndrome;
-		} __attribute__((packed)) cq_err;
+		} __packed cq_err;
 		struct {
 			u32	reserved1[2];
 			__be32	port;
-		} __attribute__((packed)) port_change;
+		} __packed port_change;
 	}			event;
 	u8			reserved3[3];
 	u8			owner;
-} __attribute__((packed));
+} __packed;
 
 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
 {
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 3dc69be..9c188bd 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -58,7 +58,7 @@
 	__be32 mtt_sz;
 	__be32 entity_size;
 	__be32 first_byte_offset;
-} __attribute__((packed));
+} __packed;
 
 #define MLX4_MPT_FLAG_SW_OWNS	    (0xfUL << 28)
 #define MLX4_MPT_FLAG_FREE	    (0x3UL << 28)
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 73bb8ea..af075af 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1640,6 +1640,11 @@
 	}
 }
 
+static int mv643xx_eth_set_flags(struct net_device *dev, u32 data)
+{
+	return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO);
+}
+
 static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
 {
 	if (sset == ETH_SS_STATS)
@@ -1665,7 +1670,7 @@
 	.get_strings		= mv643xx_eth_get_strings,
 	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
 	.get_flags		= ethtool_op_get_flags,
-	.set_flags		= ethtool_op_set_flags,
+	.set_flags		= mv643xx_eth_set_flags,
 	.get_sset_count		= mv643xx_eth_get_sset_count,
 };
 
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e0b47cc..d771d16 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1730,8 +1730,7 @@
 	if (csum_enabled)
 		mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
 	else {
-		u32 flags = ethtool_op_get_flags(netdev);
-		err = ethtool_op_set_flags(netdev, (flags & ~ETH_FLAG_LRO));
+		netdev->features &= ~NETIF_F_LRO;
 		mgp->csum_flag = 0;
 
 	}
@@ -1900,6 +1899,11 @@
 	return mgp->msg_enable;
 }
 
+static int myri10ge_set_flags(struct net_device *netdev, u32 value)
+{
+	return ethtool_op_set_flags(netdev, value, ETH_FLAG_LRO);
+}
+
 static const struct ethtool_ops myri10ge_ethtool_ops = {
 	.get_settings = myri10ge_get_settings,
 	.get_drvinfo = myri10ge_get_drvinfo,
@@ -1920,7 +1924,7 @@
 	.set_msglevel = myri10ge_set_msglevel,
 	.get_msglevel = myri10ge_get_msglevel,
 	.get_flags = ethtool_op_get_flags,
-	.set_flags = ethtool_op_set_flags
+	.set_flags = myri10ge_set_flags
 };
 
 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 2a17b50..a6033d4 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -548,7 +548,6 @@
 	dma_addr_t tx_dma[TX_RING_SIZE];
 	struct net_device *dev;
 	struct napi_struct napi;
-	struct net_device_stats stats;
 	/* Media monitoring timer */
 	struct timer_list timer;
 	/* Frequently used values: keep some adjacent for cache effect */
@@ -1906,7 +1905,7 @@
 	enable_irq(dev->irq);
 
 	dev->trans_start = jiffies; /* prevent tx timeout */
-	np->stats.tx_errors++;
+	dev->stats.tx_errors++;
 	netif_wake_queue(dev);
 }
 
@@ -2009,7 +2008,7 @@
 				np->tx_dma[i], np->tx_skbuff[i]->len,
 				PCI_DMA_TODEVICE);
 			dev_kfree_skb(np->tx_skbuff[i]);
-			np->stats.tx_dropped++;
+			dev->stats.tx_dropped++;
 		}
 		np->tx_skbuff[i] = NULL;
 	}
@@ -2115,7 +2114,7 @@
 		writel(TxOn, ioaddr + ChipCmd);
 	} else {
 		dev_kfree_skb_irq(skb);
-		np->stats.tx_dropped++;
+		dev->stats.tx_dropped++;
 	}
 	spin_unlock_irqrestore(&np->lock, flags);
 
@@ -2140,20 +2139,20 @@
 					dev->name, np->dirty_tx,
 					le32_to_cpu(np->tx_ring[entry].cmd_status));
 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
-			np->stats.tx_packets++;
-			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+			dev->stats.tx_packets++;
+			dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
 		} else { /* Various Tx errors */
 			int tx_status =
 				le32_to_cpu(np->tx_ring[entry].cmd_status);
 			if (tx_status & (DescTxAbort|DescTxExcColl))
-				np->stats.tx_aborted_errors++;
+				dev->stats.tx_aborted_errors++;
 			if (tx_status & DescTxFIFO)
-				np->stats.tx_fifo_errors++;
+				dev->stats.tx_fifo_errors++;
 			if (tx_status & DescTxCarrier)
-				np->stats.tx_carrier_errors++;
+				dev->stats.tx_carrier_errors++;
 			if (tx_status & DescTxOOWCol)
-				np->stats.tx_window_errors++;
-			np->stats.tx_errors++;
+				dev->stats.tx_window_errors++;
+			dev->stats.tx_errors++;
 		}
 		pci_unmap_single(np->pci_dev,np->tx_dma[entry],
 					np->tx_skbuff[entry]->len,
@@ -2301,7 +2300,7 @@
 						"buffers, entry %#08x "
 						"status %#08x.\n", dev->name,
 						np->cur_rx, desc_status);
-				np->stats.rx_length_errors++;
+				dev->stats.rx_length_errors++;
 
 				/* The RX state machine has probably
 				 * locked up beneath us.  Follow the
@@ -2321,15 +2320,15 @@
 
 			} else {
 				/* There was an error. */
-				np->stats.rx_errors++;
+				dev->stats.rx_errors++;
 				if (desc_status & (DescRxAbort|DescRxOver))
-					np->stats.rx_over_errors++;
+					dev->stats.rx_over_errors++;
 				if (desc_status & (DescRxLong|DescRxRunt))
-					np->stats.rx_length_errors++;
+					dev->stats.rx_length_errors++;
 				if (desc_status & (DescRxInvalid|DescRxAlign))
-					np->stats.rx_frame_errors++;
+					dev->stats.rx_frame_errors++;
 				if (desc_status & DescRxCRC)
-					np->stats.rx_crc_errors++;
+					dev->stats.rx_crc_errors++;
 			}
 		} else if (pkt_len > np->rx_buf_sz) {
 			/* if this is the tail of a double buffer
@@ -2364,8 +2363,8 @@
 			}
 			skb->protocol = eth_type_trans(skb, dev);
 			netif_receive_skb(skb);
-			np->stats.rx_packets++;
-			np->stats.rx_bytes += pkt_len;
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += pkt_len;
 		}
 		entry = (++np->cur_rx) % RX_RING_SIZE;
 		np->rx_head_desc = &np->rx_ring[entry];
@@ -2428,17 +2427,17 @@
 			printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
 				dev->name);
 		}
-		np->stats.rx_fifo_errors++;
-		np->stats.rx_errors++;
+		dev->stats.rx_fifo_errors++;
+		dev->stats.rx_errors++;
 	}
 	/* Hmmmmm, it's not clear how to recover from PCI faults. */
 	if (intr_status & IntrPCIErr) {
 		printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
 			intr_status & IntrPCIErr);
-		np->stats.tx_fifo_errors++;
-		np->stats.tx_errors++;
-		np->stats.rx_fifo_errors++;
-		np->stats.rx_errors++;
+		dev->stats.tx_fifo_errors++;
+		dev->stats.tx_errors++;
+		dev->stats.rx_fifo_errors++;
+		dev->stats.rx_errors++;
 	}
 	spin_unlock(&np->lock);
 }
@@ -2446,11 +2445,10 @@
 static void __get_stats(struct net_device *dev)
 {
 	void __iomem * ioaddr = ns_ioaddr(dev);
-	struct netdev_private *np = netdev_priv(dev);
 
 	/* The chip only need report frame silently dropped. */
-	np->stats.rx_crc_errors	+= readl(ioaddr + RxCRCErrs);
-	np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+	dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
+	dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
 }
 
 static struct net_device_stats *get_stats(struct net_device *dev)
@@ -2463,7 +2461,7 @@
 		__get_stats(dev);
 	spin_unlock_irq(&np->lock);
 
-	return &np->stats;
+	return &dev->stats;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 20f7c58..b30de24 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -887,12 +887,19 @@
 	struct netxen_adapter *adapter = netdev_priv(netdev);
 	int hw_lro;
 
+	if (data & ~ETH_FLAG_LRO)
+		return -EINVAL;
+
 	if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
 		return -EINVAL;
 
-	ethtool_op_set_flags(netdev, data);
-
-	hw_lro = (data & ETH_FLAG_LRO) ? NETXEN_NIC_LRO_ENABLED : 0;
+	if (data & ETH_FLAG_LRO) {
+		hw_lro = NETXEN_NIC_LRO_ENABLED;
+		netdev->features |= NETIF_F_LRO;
+	} else {
+		hw_lro = 0;
+		netdev->features &= ~NETIF_F_LRO;
+	}
 
 	if (netxen_config_hw_lro(adapter, hw_lro))
 		return -EIO;
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 9bddb5f..33618edc 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -185,7 +185,6 @@
 static void    ni52_rnr_int(struct net_device *dev);
 
 struct priv {
-	struct net_device_stats stats;
 	char __iomem *base;
 	char __iomem *mapped;
 	char __iomem *memtop;
@@ -972,10 +971,10 @@
 					memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
 					skb->protocol = eth_type_trans(skb, dev);
 					netif_rx(skb);
-					p->stats.rx_packets++;
-					p->stats.rx_bytes += totlen;
+					dev->stats.rx_packets++;
+					dev->stats.rx_bytes += totlen;
 				} else
-					p->stats.rx_dropped++;
+					dev->stats.rx_dropped++;
 			} else {
 				int rstat;
 				 /* free all RBD's until RBD_LAST is set */
@@ -993,12 +992,12 @@
 				writew(0, &rbd->status);
 				printk(KERN_ERR "%s: received oversized frame! length: %d\n",
 					dev->name, totlen);
-				p->stats.rx_dropped++;
+				dev->stats.rx_dropped++;
 			 }
 		} else {/* frame !(ok), only with 'save-bad-frames' */
 			printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n",
 				dev->name, status);
-			p->stats.rx_errors++;
+			dev->stats.rx_errors++;
 		}
 		writeb(0, &p->rfd_top->stat_high);
 		writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */
@@ -1043,7 +1042,7 @@
 {
 	struct priv *p = netdev_priv(dev);
 
-	p->stats.rx_errors++;
+	dev->stats.rx_errors++;
 
 	wait_for_scb_cmd(dev);		/* wait for the last cmd, WAIT_4_FULLSTAT?? */
 	writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */
@@ -1076,29 +1075,29 @@
 		printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
 
 	if (status & STAT_OK) {
-		p->stats.tx_packets++;
-		p->stats.collisions += (status & TCMD_MAXCOLLMASK);
+		dev->stats.tx_packets++;
+		dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
 	} else {
-		p->stats.tx_errors++;
+		dev->stats.tx_errors++;
 		if (status & TCMD_LATECOLL) {
 			printk(KERN_ERR "%s: late collision detected.\n",
 				dev->name);
-			p->stats.collisions++;
+			dev->stats.collisions++;
 		} else if (status & TCMD_NOCARRIER) {
-			p->stats.tx_carrier_errors++;
+			dev->stats.tx_carrier_errors++;
 			printk(KERN_ERR "%s: no carrier detected.\n",
 				dev->name);
 		} else if (status & TCMD_LOSTCTS)
 			printk(KERN_ERR "%s: loss of CTS detected.\n",
 				dev->name);
 		else if (status & TCMD_UNDERRUN) {
-			p->stats.tx_fifo_errors++;
+			dev->stats.tx_fifo_errors++;
 			printk(KERN_ERR "%s: DMA underrun detected.\n",
 				dev->name);
 		} else if (status & TCMD_MAXCOLL) {
 			printk(KERN_ERR "%s: Max. collisions exceeded.\n",
 				dev->name);
-			p->stats.collisions += 16;
+			dev->stats.collisions += 16;
 		}
 	}
 #if (NUM_XMIT_BUFFS > 1)
@@ -1286,12 +1285,12 @@
 	ovrn = readw(&p->scb->ovrn_errs);
 	writew(0, &p->scb->ovrn_errs);
 
-	p->stats.rx_crc_errors += crc;
-	p->stats.rx_fifo_errors += ovrn;
-	p->stats.rx_frame_errors += aln;
-	p->stats.rx_dropped += rsc;
+	dev->stats.rx_crc_errors += crc;
+	dev->stats.rx_fifo_errors += ovrn;
+	dev->stats.rx_frame_errors += aln;
+	dev->stats.rx_dropped += rsc;
 
-	return &p->stats;
+	return &dev->stats;
 }
 
 /********************************************************
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 63e8e38..3d523cb 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7920,14 +7920,7 @@
 
 static int niu_set_flags(struct net_device *dev, u32 data)
 {
-	if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE))
-		return -EOPNOTSUPP;
-
-	if (data & ETH_FLAG_RXHASH)
-		dev->features |= NETIF_F_RXHASH;
-	else
-		dev->features &= ~NETIF_F_RXHASH;
-	return 0;
+	return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
 }
 
 static const struct ethtool_ops niu_ethtool_ops = {
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index e88e97c..5a3488f 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -424,7 +424,6 @@
 
 
 struct ns83820 {
-	struct net_device_stats	stats;
 	u8			__iomem *base;
 
 	struct pci_dev		*pci_dev;
@@ -918,9 +917,9 @@
 			if (unlikely(!skb))
 				goto netdev_mangle_me_harder_failed;
 			if (cmdsts & CMDSTS_DEST_MULTI)
-				dev->stats.multicast ++;
-			dev->stats.rx_packets ++;
-			dev->stats.rx_bytes += len;
+				ndev->stats.multicast++;
+			ndev->stats.rx_packets++;
+			ndev->stats.rx_bytes += len;
 			if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {
 				skb->ip_summed = CHECKSUM_UNNECESSARY;
 			} else {
@@ -940,7 +939,7 @@
 #endif
 			if (NET_RX_DROP == rx_rc) {
 netdev_mangle_me_harder_failed:
-				dev->stats.rx_dropped ++;
+				ndev->stats.rx_dropped++;
 			}
 		} else {
 			kfree_skb(skb);
@@ -1008,11 +1007,11 @@
 		dma_addr_t addr;
 
 		if (cmdsts & CMDSTS_ERR)
-			dev->stats.tx_errors ++;
+			ndev->stats.tx_errors++;
 		if (cmdsts & CMDSTS_OK)
-			dev->stats.tx_packets ++;
+			ndev->stats.tx_packets++;
 		if (cmdsts & CMDSTS_OK)
-			dev->stats.tx_bytes += cmdsts & 0xffff;
+			ndev->stats.tx_bytes += cmdsts & 0xffff;
 
 		dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
 			tx_done_idx, dev->tx_free_idx, cmdsts);
@@ -1212,20 +1211,21 @@
 
 static void ns83820_update_stats(struct ns83820 *dev)
 {
+	struct net_device *ndev = dev->ndev;
 	u8 __iomem *base = dev->base;
 
 	/* the DP83820 will freeze counters, so we need to read all of them */
-	dev->stats.rx_errors		+= readl(base + 0x60) & 0xffff;
-	dev->stats.rx_crc_errors	+= readl(base + 0x64) & 0xffff;
-	dev->stats.rx_missed_errors	+= readl(base + 0x68) & 0xffff;
-	dev->stats.rx_frame_errors	+= readl(base + 0x6c) & 0xffff;
-	/*dev->stats.rx_symbol_errors +=*/ readl(base + 0x70);
-	dev->stats.rx_length_errors	+= readl(base + 0x74) & 0xffff;
-	dev->stats.rx_length_errors	+= readl(base + 0x78) & 0xffff;
-	/*dev->stats.rx_badopcode_errors += */ readl(base + 0x7c);
-	/*dev->stats.rx_pause_count += */  readl(base + 0x80);
-	/*dev->stats.tx_pause_count += */  readl(base + 0x84);
-	dev->stats.tx_carrier_errors	+= readl(base + 0x88) & 0xff;
+	ndev->stats.rx_errors		+= readl(base + 0x60) & 0xffff;
+	ndev->stats.rx_crc_errors	+= readl(base + 0x64) & 0xffff;
+	ndev->stats.rx_missed_errors	+= readl(base + 0x68) & 0xffff;
+	ndev->stats.rx_frame_errors	+= readl(base + 0x6c) & 0xffff;
+	/*ndev->stats.rx_symbol_errors +=*/ readl(base + 0x70);
+	ndev->stats.rx_length_errors	+= readl(base + 0x74) & 0xffff;
+	ndev->stats.rx_length_errors	+= readl(base + 0x78) & 0xffff;
+	/*ndev->stats.rx_badopcode_errors += */ readl(base + 0x7c);
+	/*ndev->stats.rx_pause_count += */  readl(base + 0x80);
+	/*ndev->stats.tx_pause_count += */  readl(base + 0x84);
+	ndev->stats.tx_carrier_errors	+= readl(base + 0x88) & 0xff;
 }
 
 static struct net_device_stats *ns83820_get_stats(struct net_device *ndev)
@@ -1237,7 +1237,7 @@
 	ns83820_update_stats(dev);
 	spin_unlock_irq(&dev->misc_lock);
 
-	return &dev->stats;
+	return &ndev->stats;
 }
 
 /* Let ethtool retrieve info */
@@ -1464,12 +1464,12 @@
 
 	if (unlikely(ISR_RXSOVR & isr)) {
 		//printk("overrun: rxsovr\n");
-		dev->stats.rx_fifo_errors ++;
+		ndev->stats.rx_fifo_errors++;
 	}
 
 	if (unlikely(ISR_RXORN & isr)) {
 		//printk("overrun: rxorn\n");
-		dev->stats.rx_fifo_errors ++;
+		ndev->stats.rx_fifo_errors++;
 	}
 
 	if ((ISR_RXRCMP & isr) && dev->rx_info.up)
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 000e792..f4a0f08 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -1067,7 +1067,7 @@
 #endif
 };
 
-static int __init octeon_mgmt_probe(struct platform_device *pdev)
+static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
 {
 	struct resource *res_irq;
 	struct net_device *netdev;
@@ -1124,7 +1124,7 @@
 	return -ENOENT;
 }
 
-static int __exit octeon_mgmt_remove(struct platform_device *pdev)
+static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
 {
 	struct net_device *netdev = dev_get_drvdata(&pdev->dev);
 
@@ -1139,7 +1139,7 @@
 		.owner		= THIS_MODULE,
 	},
 	.probe		= octeon_mgmt_probe,
-	.remove		= __exit_p(octeon_mgmt_remove),
+	.remove		= __devexit_p(octeon_mgmt_remove),
 };
 
 extern void octeon_mdiobus_force_mod_depencency(void);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index cecdbbd..4accd83 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -685,7 +685,7 @@
 }
 
 static struct phy_driver bcm5411_driver = {
-	.phy_id		= 0x00206070,
+	.phy_id		= PHY_ID_BCM5411,
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5411",
 	.features	= PHY_GBIT_FEATURES |
@@ -700,7 +700,7 @@
 };
 
 static struct phy_driver bcm5421_driver = {
-	.phy_id		= 0x002060e0,
+	.phy_id		= PHY_ID_BCM5421,
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5421",
 	.features	= PHY_GBIT_FEATURES |
@@ -715,7 +715,7 @@
 };
 
 static struct phy_driver bcm5461_driver = {
-	.phy_id		= 0x002060c0,
+	.phy_id		= PHY_ID_BCM5461,
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5461",
 	.features	= PHY_GBIT_FEATURES |
@@ -730,7 +730,7 @@
 };
 
 static struct phy_driver bcm5464_driver = {
-	.phy_id		= 0x002060b0,
+	.phy_id		= PHY_ID_BCM5464,
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5464",
 	.features	= PHY_GBIT_FEATURES |
@@ -745,7 +745,7 @@
 };
 
 static struct phy_driver bcm5481_driver = {
-	.phy_id		= 0x0143bca0,
+	.phy_id		= PHY_ID_BCM5481,
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5481",
 	.features	= PHY_GBIT_FEATURES |
@@ -760,7 +760,7 @@
 };
 
 static struct phy_driver bcm5482_driver = {
-	.phy_id		= 0x0143bcb0,
+	.phy_id		= PHY_ID_BCM5482,
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5482",
 	.features	= PHY_GBIT_FEATURES |
@@ -834,6 +834,21 @@
 	.driver		= { .owner = THIS_MODULE },
 };
 
+static struct phy_driver bcm5241_driver = {
+	.phy_id		= PHY_ID_BCM5241,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM5241",
+	.features	= PHY_BASIC_FEATURES |
+			  SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+	.config_init	= brcm_fet_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= brcm_fet_ack_interrupt,
+	.config_intr	= brcm_fet_config_intr,
+	.driver		= { .owner = THIS_MODULE },
+};
+
 static int __init broadcom_init(void)
 {
 	int ret;
@@ -868,8 +883,13 @@
 	ret = phy_driver_register(&bcmac131_driver);
 	if (ret)
 		goto out_ac131;
+	ret = phy_driver_register(&bcm5241_driver);
+	if (ret)
+		goto out_5241;
 	return ret;
 
+out_5241:
+	phy_driver_unregister(&bcmac131_driver);
 out_ac131:
 	phy_driver_unregister(&bcm57780_driver);
 out_57780:
@@ -894,6 +914,7 @@
 
 static void __exit broadcom_exit(void)
 {
+	phy_driver_unregister(&bcm5241_driver);
 	phy_driver_unregister(&bcmac131_driver);
 	phy_driver_unregister(&bcm57780_driver);
 	phy_driver_unregister(&bcm50610m_driver);
@@ -910,16 +931,17 @@
 module_exit(broadcom_exit);
 
 static struct mdio_device_id broadcom_tbl[] = {
-	{ 0x00206070, 0xfffffff0 },
-	{ 0x002060e0, 0xfffffff0 },
-	{ 0x002060c0, 0xfffffff0 },
-	{ 0x002060b0, 0xfffffff0 },
-	{ 0x0143bca0, 0xfffffff0 },
-	{ 0x0143bcb0, 0xfffffff0 },
+	{ PHY_ID_BCM5411, 0xfffffff0 },
+	{ PHY_ID_BCM5421, 0xfffffff0 },
+	{ PHY_ID_BCM5461, 0xfffffff0 },
+	{ PHY_ID_BCM5464, 0xfffffff0 },
+	{ PHY_ID_BCM5482, 0xfffffff0 },
+	{ PHY_ID_BCM5482, 0xfffffff0 },
 	{ PHY_ID_BCM50610, 0xfffffff0 },
 	{ PHY_ID_BCM50610M, 0xfffffff0 },
 	{ PHY_ID_BCM57780, 0xfffffff0 },
 	{ PHY_ID_BCMAC131, 0xfffffff0 },
+	{ PHY_ID_BCM5241, 0xfffffff0 },
 	{ }
 };
 
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index f443d43..bd12ba9 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -85,7 +85,7 @@
 	return 0;
 }
 
-static int __init octeon_mdiobus_probe(struct platform_device *pdev)
+static int __devinit octeon_mdiobus_probe(struct platform_device *pdev)
 {
 	struct octeon_mdiobus *bus;
 	union cvmx_smix_en smi_en;
@@ -143,7 +143,7 @@
 	return err;
 }
 
-static int __exit octeon_mdiobus_remove(struct platform_device *pdev)
+static int __devexit octeon_mdiobus_remove(struct platform_device *pdev)
 {
 	struct octeon_mdiobus *bus;
 	union cvmx_smix_en smi_en;
@@ -163,7 +163,7 @@
 		.owner		= THIS_MODULE,
 	},
 	.probe		= octeon_mdiobus_probe,
-	.remove		= __exit_p(octeon_mdiobus_remove),
+	.remove		= __devexit_p(octeon_mdiobus_remove),
 };
 
 void octeon_mdiobus_force_mod_depencency(void)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0692f75..8bb7db6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -12,7 +12,8 @@
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  *
- * Support : ksz9021 , vsc8201, ks8001
+ * Support : ksz9021 1000/100/10 phy from Micrel
+ *		ks8001, ks8737, ks8721, ks8041, ks8051 100/10 phy
  */
 
 #include <linux/kernel.h>
@@ -20,37 +21,146 @@
 #include <linux/phy.h>
 
 #define	PHY_ID_KSZ9021			0x00221611
-#define	PHY_ID_VSC8201			0x000FC413
+#define	PHY_ID_KS8737			0x00221720
+#define	PHY_ID_KS8041			0x00221510
+#define	PHY_ID_KS8051			0x00221550
+/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
 #define	PHY_ID_KS8001			0x0022161A
 
+/* general Interrupt control/status reg in vendor specific block. */
+#define MII_KSZPHY_INTCS			0x1B
+#define	KSZPHY_INTCS_JABBER			(1 << 15)
+#define	KSZPHY_INTCS_RECEIVE_ERR		(1 << 14)
+#define	KSZPHY_INTCS_PAGE_RECEIVE		(1 << 13)
+#define	KSZPHY_INTCS_PARELLEL			(1 << 12)
+#define	KSZPHY_INTCS_LINK_PARTNER_ACK		(1 << 11)
+#define	KSZPHY_INTCS_LINK_DOWN			(1 << 10)
+#define	KSZPHY_INTCS_REMOTE_FAULT		(1 << 9)
+#define	KSZPHY_INTCS_LINK_UP			(1 << 8)
+#define	KSZPHY_INTCS_ALL			(KSZPHY_INTCS_LINK_UP |\
+						KSZPHY_INTCS_LINK_DOWN)
+
+/* general PHY control reg in vendor specific block. */
+#define	MII_KSZPHY_CTRL			0x1F
+/* bitmap of PHY register to set interrupt mode */
+#define KSZPHY_CTRL_INT_ACTIVE_HIGH		(1 << 9)
+#define KSZ9021_CTRL_INT_ACTIVE_HIGH		(1 << 14)
+#define KS8737_CTRL_INT_ACTIVE_HIGH		(1 << 14)
+
+static int kszphy_ack_interrupt(struct phy_device *phydev)
+{
+	/* bit[7..0] int status, which is a read and clear register. */
+	int rc;
+
+	rc = phy_read(phydev, MII_KSZPHY_INTCS);
+
+	return (rc < 0) ? rc : 0;
+}
+
+static int kszphy_set_interrupt(struct phy_device *phydev)
+{
+	int temp;
+	temp = (PHY_INTERRUPT_ENABLED == phydev->interrupts) ?
+		KSZPHY_INTCS_ALL : 0;
+	return phy_write(phydev, MII_KSZPHY_INTCS, temp);
+}
+
+static int kszphy_config_intr(struct phy_device *phydev)
+{
+	int temp, rc;
+
+	/* set the interrupt pin active low */
+	temp = phy_read(phydev, MII_KSZPHY_CTRL);
+	temp &= ~KSZPHY_CTRL_INT_ACTIVE_HIGH;
+	phy_write(phydev, MII_KSZPHY_CTRL, temp);
+	rc = kszphy_set_interrupt(phydev);
+	return rc < 0 ? rc : 0;
+}
+
+static int ksz9021_config_intr(struct phy_device *phydev)
+{
+	int temp, rc;
+
+	/* set the interrupt pin active low */
+	temp = phy_read(phydev, MII_KSZPHY_CTRL);
+	temp &= ~KSZ9021_CTRL_INT_ACTIVE_HIGH;
+	phy_write(phydev, MII_KSZPHY_CTRL, temp);
+	rc = kszphy_set_interrupt(phydev);
+	return rc < 0 ? rc : 0;
+}
+
+static int ks8737_config_intr(struct phy_device *phydev)
+{
+	int temp, rc;
+
+	/* set the interrupt pin active low */
+	temp = phy_read(phydev, MII_KSZPHY_CTRL);
+	temp &= ~KS8737_CTRL_INT_ACTIVE_HIGH;
+	phy_write(phydev, MII_KSZPHY_CTRL, temp);
+	rc = kszphy_set_interrupt(phydev);
+	return rc < 0 ? rc : 0;
+}
 
 static int kszphy_config_init(struct phy_device *phydev)
 {
 	return 0;
 }
 
-
-static struct phy_driver ks8001_driver = {
-	.phy_id		= PHY_ID_KS8001,
-	.name		= "Micrel KS8001",
+static struct phy_driver ks8737_driver = {
+	.phy_id		= PHY_ID_KS8737,
 	.phy_id_mask	= 0x00fffff0,
-	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_POLL,
+	.name		= "Micrel KS8737",
+	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
+	.ack_interrupt	= kszphy_ack_interrupt,
+	.config_intr	= ks8737_config_intr,
 	.driver		= { .owner = THIS_MODULE,},
 };
 
-static struct phy_driver vsc8201_driver = {
-	.phy_id		= PHY_ID_VSC8201,
-	.name		= "Micrel VSC8201",
+static struct phy_driver ks8041_driver = {
+	.phy_id		= PHY_ID_KS8041,
 	.phy_id_mask	= 0x00fffff0,
-	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_POLL,
+	.name		= "Micrel KS8041",
+	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause
+				| SUPPORTED_Asym_Pause),
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
+	.ack_interrupt	= kszphy_ack_interrupt,
+	.config_intr	= kszphy_config_intr,
+	.driver		= { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver ks8051_driver = {
+	.phy_id		= PHY_ID_KS8051,
+	.phy_id_mask	= 0x00fffff0,
+	.name		= "Micrel KS8051",
+	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause
+				| SUPPORTED_Asym_Pause),
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+	.config_init	= kszphy_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= kszphy_ack_interrupt,
+	.config_intr	= kszphy_config_intr,
+	.driver		= { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver ks8001_driver = {
+	.phy_id		= PHY_ID_KS8001,
+	.name		= "Micrel KS8001 or KS8721",
+	.phy_id_mask	= 0x00fffff0,
+	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+	.config_init	= kszphy_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= kszphy_ack_interrupt,
+	.config_intr	= kszphy_config_intr,
 	.driver		= { .owner = THIS_MODULE,},
 };
 
@@ -58,11 +168,14 @@
 	.phy_id		= PHY_ID_KSZ9021,
 	.phy_id_mask	= 0x000fff10,
 	.name		= "Micrel KSZ9021 Gigabit PHY",
-	.features	= PHY_GBIT_FEATURES | SUPPORTED_Pause,
-	.flags		= PHY_POLL,
+	.features	= (PHY_GBIT_FEATURES | SUPPORTED_Pause
+				| SUPPORTED_Asym_Pause),
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
+	.ack_interrupt	= kszphy_ack_interrupt,
+	.config_intr	= ksz9021_config_intr,
 	.driver		= { .owner = THIS_MODULE, },
 };
 
@@ -73,17 +186,29 @@
 	ret = phy_driver_register(&ks8001_driver);
 	if (ret)
 		goto err1;
-	ret = phy_driver_register(&vsc8201_driver);
-	if (ret)
-		goto err2;
 
 	ret = phy_driver_register(&ksz9021_driver);
 	if (ret)
+		goto err2;
+
+	ret = phy_driver_register(&ks8737_driver);
+	if (ret)
 		goto err3;
+	ret = phy_driver_register(&ks8041_driver);
+	if (ret)
+		goto err4;
+	ret = phy_driver_register(&ks8051_driver);
+	if (ret)
+		goto err5;
+
 	return 0;
 
+err5:
+	phy_driver_unregister(&ks8041_driver);
+err4:
+	phy_driver_unregister(&ks8737_driver);
 err3:
-	phy_driver_unregister(&vsc8201_driver);
+	phy_driver_unregister(&ksz9021_driver);
 err2:
 	phy_driver_unregister(&ks8001_driver);
 err1:
@@ -93,8 +218,10 @@
 static void __exit ksphy_exit(void)
 {
 	phy_driver_unregister(&ks8001_driver);
-	phy_driver_unregister(&vsc8201_driver);
+	phy_driver_unregister(&ks8737_driver);
 	phy_driver_unregister(&ksz9021_driver);
+	phy_driver_unregister(&ks8041_driver);
+	phy_driver_unregister(&ks8051_driver);
 }
 
 module_init(ksphy_init);
@@ -106,8 +233,10 @@
 
 static struct mdio_device_id micrel_tbl[] = {
 	{ PHY_ID_KSZ9021, 0x000fff10 },
-	{ PHY_ID_VSC8201, 0x00fffff0 },
 	{ PHY_ID_KS8001, 0x00fffff0 },
+	{ PHY_ID_KS8737, 0x00fffff0 },
+	{ PHY_ID_KS8041, 0x00fffff0 },
+	{ PHY_ID_KS8051, 0x00fffff0 },
 	{ }
 };
 
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 1b2c291..e7b4187 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -69,7 +69,6 @@
 
 #define MPHDRLEN	6	/* multilink protocol header length */
 #define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
-#define MIN_FRAG_SIZE	64
 
 /*
  * An instance of /dev/ppp can be associated with either a ppp
@@ -539,14 +538,9 @@
 	}
 
 	len = uprog.len * sizeof(struct sock_filter);
-	code = kmalloc(len, GFP_KERNEL);
-	if (code == NULL)
-		return -ENOMEM;
-
-	if (copy_from_user(code, uprog.filter, len)) {
-		kfree(code);
-		return -EFAULT;
-	}
+	code = memdup_user(uprog.filter, len);
+	if (IS_ERR(code))
+		return PTR_ERR(code);
 
 	err = sk_chk_filter(code, uprog.len);
 	if (err) {
@@ -1933,9 +1927,9 @@
 	/* If the queue is getting long, don't wait any longer for packets
 	   before the start of the queue. */
 	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
-		struct sk_buff *skb = skb_peek(&ppp->mrq);
-		if (seq_before(ppp->minseq, skb->sequence))
-			ppp->minseq = skb->sequence;
+		struct sk_buff *mskb = skb_peek(&ppp->mrq);
+		if (seq_before(ppp->minseq, mskb->sequence))
+			ppp->minseq = mskb->sequence;
 	}
 
 	/* Pull completed packets off the queue and receive them. */
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 805b64d..344ef33 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -89,7 +89,6 @@
 #define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
 #define PPPOE_HASH_MASK	(PPPOE_HASH_SIZE - 1)
 
-static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
 
 static const struct proto_ops pppoe_ops;
@@ -949,7 +948,7 @@
 
 abort:
 	kfree_skb(skb);
-	return 1;
+	return 0;
 }
 
 /************************************************************************
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h
index 0a88b53..f7e51b7 100644
--- a/drivers/net/ps3_gelic_wireless.h
+++ b/drivers/net/ps3_gelic_wireless.h
@@ -74,7 +74,7 @@
 	u16 bss_type;    /* infra or adhoc */
 	u16 auth_method; /* shared key or open */
 	u16 op_mode; /* B/G */
-} __attribute__((packed));
+} __packed;
 
 
 /* for GELIC_EURUS_CMD_WEP_CFG */
@@ -88,7 +88,7 @@
 	/* all fields are big endian */
 	u16 security;
 	u8 key[4][16];
-} __attribute__((packed));
+} __packed;
 
 /* for GELIC_EURUS_CMD_WPA_CFG */
 enum gelic_eurus_wpa_security {
@@ -120,7 +120,7 @@
 	u16 security;
 	u16 psk_type; /* psk key encoding type */
 	u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
-} __attribute__((packed));
+} __packed;
 
 /* for GELIC_EURUS_CMD_{START,GET}_SCAN */
 enum gelic_eurus_scan_capability {
@@ -171,7 +171,7 @@
 	__be32 reserved3;
 	__be32 reserved4;
 	u8 elements[0]; /* ie */
-} __attribute__ ((packed));
+} __packed;
 
 /* the hypervisor returns bbs up to 16 */
 #define GELIC_EURUS_MAX_SCAN  (16)
@@ -193,7 +193,7 @@
 struct gelic_eurus_rssi_info {
 	/* big endian */
 	__be16 rssi;
-} __attribute__ ((packed));
+} __packed;
 
 
 /* for 'stat' member of gelic_wl_info */
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 896d40d..60ea7cb 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 2
-#define QLCNIC_LINUX_VERSIONID  "5.0.2"
+#define _QLCNIC_LINUX_SUBVERSION 6
+#define QLCNIC_LINUX_VERSIONID  "5.0.6"
 #define QLCNIC_DRV_IDC_VER  0x01
 
 #define QLCNIC_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
@@ -68,6 +68,7 @@
 #define QLCNIC_DECODE_VERSION(v) \
 	QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
 
+#define QLCNIC_MIN_FW_VERSION     QLCNIC_VERSION_CODE(4, 4, 2)
 #define QLCNIC_NUM_FLASH_SECTORS (64)
 #define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
 #define QLCNIC_FLASH_TOTAL_SIZE  (QLCNIC_NUM_FLASH_SECTORS \
@@ -112,8 +113,10 @@
 #define TX_UDPV6_PKT	0x0c
 
 /* Tx defines */
-#define MAX_BUFFERS_PER_CMD	32
-#define TX_STOP_THRESH		((MAX_SKB_FRAGS >> 2) + 4)
+#define MAX_TSO_HEADER_DESC	2
+#define MGMT_CMD_DESC_RESV	4
+#define TX_STOP_THRESH		((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+							+ MGMT_CMD_DESC_RESV)
 #define QLCNIC_MAX_TX_TIMEOUTS	2
 
 /*
@@ -197,8 +200,7 @@
 
 	__le64 addr_buffer4;
 
-	__le32 reserved2;
-	__le16 reserved;
+	u8 eth_addr[ETH_ALEN];
 	__le16 vlan_TCI;
 
 } __attribute__ ((aligned(64)));
@@ -315,6 +317,8 @@
 #define QLCNIC_BRDTYPE_P3_10G_XFP	0x0032
 #define QLCNIC_BRDTYPE_P3_10G_TP	0x0080
 
+#define QLCNIC_MSIX_TABLE_OFFSET	0x44
+
 /* Flash memory map */
 #define QLCNIC_BRDCFG_START	0x4000		/* board config */
 #define QLCNIC_BOOTLD_START	0x10000		/* bootld */
@@ -367,7 +371,7 @@
  */
 struct qlcnic_cmd_buffer {
 	struct sk_buff *skb;
-	struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+	struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
 	u32 frag_count;
 };
 
@@ -377,7 +381,6 @@
 	struct sk_buff *skb;
 	u64 dma;
 	u16 ref_handle;
-	u16 state;
 };
 
 /* Board types */
@@ -419,7 +422,6 @@
 	u64  xmit_on;
 	u64  xmit_off;
 	u64  skb_alloc_failure;
-	u64  null_skb;
 	u64  null_rxbuf;
 	u64  rx_dma_map_error;
 	u64  tx_dma_map_error;
@@ -542,7 +544,17 @@
 #define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS	0x0000001c
 #define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES	0x0000001d
 #define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD	0x0000001e
-#define QLCNIC_CDRP_CMD_MAX			0x0000001f
+#define QLCNIC_CDRP_CMD_MAC_ADDRESS		0x0000001f
+
+#define QLCNIC_CDRP_CMD_GET_PCI_INFO		0x00000020
+#define QLCNIC_CDRP_CMD_GET_NIC_INFO		0x00000021
+#define QLCNIC_CDRP_CMD_SET_NIC_INFO		0x00000022
+#define QLCNIC_CDRP_CMD_RESET_NPAR		0x00000023
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY	0x00000024
+#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH		0x00000025
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS	0x00000026
+#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING	0x00000027
+#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH	0x00000028
 
 #define QLCNIC_RCODE_SUCCESS		0
 #define QLCNIC_RCODE_TIMEOUT		17
@@ -556,12 +568,12 @@
 #define QLCNIC_CAP0_LSO 		(1 << 6)
 #define QLCNIC_CAP0_JUMBO_CONTIGUOUS	(1 << 7)
 #define QLCNIC_CAP0_LRO_CONTIGUOUS	(1 << 8)
+#define QLCNIC_CAP0_VALIDOFF		(1 << 11)
 
 /*
  * Context state
  */
-#define QLCHAL_VERSION	1
-
+#define QLCNIC_HOST_CTX_STATE_FREED	0
 #define QLCNIC_HOST_CTX_STATE_ACTIVE	2
 
 /*
@@ -592,9 +604,10 @@
 	__le32 sds_ring_offset;	/* Offset to SDS config */
 	__le16 num_rds_rings;	/* Count of RDS rings */
 	__le16 num_sds_rings;	/* Count of SDS rings */
-	__le16 rsvd1;		/* Padding */
-	__le16 rsvd2;		/* Padding */
-	u8  reserved[128]; 	/* reserve space for future expansion*/
+	__le16 valid_field_offset;
+	u8  txrx_sds_binding;
+	u8  msix_handler;
+	u8  reserved[128];      /* reserve space for future expansion*/
 	/* MUST BE 64-bit aligned.
 	   The following is packed:
 	   - N hostrq_rds_rings
@@ -881,12 +894,14 @@
 #define QLCNIC_LRO_ENABLED		0x08
 #define QLCNIC_BRIDGE_ENABLED       	0X10
 #define QLCNIC_DIAG_ENABLED		0x20
+#define QLCNIC_ESWITCH_ENABLED		0x40
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
 	((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
 
 #define MSIX_ENTRIES_PER_ADAPTER	NUM_STS_DESC_RINGS
 #define QLCNIC_MSIX_TBL_SPACE		8192
 #define QLCNIC_PCI_REG_MSIX_TBL 	0x44
+#define QLCNIC_MSIX_TBL_PGSIZE		4096
 
 #define QLCNIC_NETDEV_WEIGHT	128
 #define QLCNIC_ADAPTER_UP_MAGIC 777
@@ -919,11 +934,11 @@
 	u8 rx_csum;
 	u8 portnum;
 	u8 physical_port;
+	u8 reset_context;
 
 	u8 mc_enabled;
 	u8 max_mc_count;
 	u8 rss_supported;
-	u8 rsrvd1;
 	u8 fw_wait_cnt;
 	u8 fw_fail_cnt;
 	u8 tx_timeo_cnt;
@@ -940,6 +955,13 @@
 	u16 link_autoneg;
 	u16 module_type;
 
+	u16 op_mode;
+	u16 switch_mode;
+	u16 max_tx_ques;
+	u16 max_rx_ques;
+	u16 max_mtu;
+
+	u32 fw_hal_version;
 	u32 capabilities;
 	u32 flags;
 	u32 irq;
@@ -948,18 +970,22 @@
 	u32 int_vec_bit;
 	u32 heartbit;
 
+	u8 max_mac_filters;
 	u8 dev_state;
 	u8 diag_test;
 	u8 diag_cnt;
 	u8 reset_ack_timeo;
 	u8 dev_init_timeo;
-	u8 rsrd1;
 	u16 msg_enable;
 
 	u8 mac_addr[ETH_ALEN];
 
 	u64 dev_rst_time;
 
+	struct qlcnic_npar_info *npars;
+	struct qlcnic_eswitch *eswitch;
+	struct qlcnic_nic_template *nic_ops;
+
 	struct qlcnic_adapter_stats stats;
 
 	struct qlcnic_recv_context recv_ctx;
@@ -974,8 +1000,6 @@
 
 	struct delayed_work fw_work;
 
-	struct work_struct  tx_timeout_task;
-
 	struct qlcnic_nic_intr_coalesce coal;
 
 	unsigned long state;
@@ -984,6 +1008,122 @@
 	const struct firmware *fw;
 };
 
+struct qlcnic_info {
+	__le16	pci_func;
+	__le16	op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
+	__le16	phys_port;
+	__le16	switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
+
+	__le32	capabilities;
+	u8	max_mac_filters;
+	u8	reserved1;
+	__le16	max_mtu;
+
+	__le16	max_tx_ques;
+	__le16	max_rx_ques;
+	__le16	min_tx_bw;
+	__le16	max_tx_bw;
+	u8	reserved2[104];
+};
+
+struct qlcnic_pci_info {
+	__le16	id; /* pci function id */
+	__le16	active; /* 1 = Enabled */
+	__le16	type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
+	__le16	default_port; /* default port number */
+
+	__le16	tx_min_bw; /* Multiple of 100mbpc */
+	__le16	tx_max_bw;
+	__le16	reserved1[2];
+
+	u8	mac[ETH_ALEN];
+	u8	reserved2[106];
+};
+
+struct qlcnic_npar_info {
+	u16	vlan_id;
+	u8	phy_port;
+	u8	type;
+	u8	active;
+	u8	enable_pm;
+	u8	dest_npar;
+	u8	host_vlan_tag;
+	u8	promisc_mode;
+	u8	discard_tagged;
+	u8	mac_learning;
+};
+struct qlcnic_eswitch {
+	u8	port;
+	u8	active_vports;
+	u8	active_vlans;
+	u8	active_ucast_filters;
+	u8	max_ucast_filters;
+	u8	max_active_vlans;
+
+	u32	flags;
+#define QLCNIC_SWITCH_ENABLE		BIT_1
+#define QLCNIC_SWITCH_VLAN_FILTERING	BIT_2
+#define QLCNIC_SWITCH_PROMISC_MODE	BIT_3
+#define QLCNIC_SWITCH_PORT_MIRRORING	BIT_4
+};
+
+
+/* Return codes for Error handling */
+#define QL_STATUS_INVALID_PARAM	-1
+
+#define MAX_BW			10000
+#define MIN_BW			100
+#define MAX_VLAN_ID		4095
+#define MIN_VLAN_ID		2
+#define MAX_TX_QUEUES		1
+#define MAX_RX_QUEUES		4
+#define DEFAULT_MAC_LEARN	1
+
+#define IS_VALID_VLAN(vlan)	(vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
+#define IS_VALID_BW(bw)		(bw >= MIN_BW && bw <= MAX_BW \
+							&& (bw % 100) == 0)
+#define IS_VALID_TX_QUEUES(que)	(que > 0 && que <= MAX_TX_QUEUES)
+#define IS_VALID_RX_QUEUES(que)	(que > 0 && que <= MAX_RX_QUEUES)
+#define IS_VALID_MODE(mode)	(mode == 0 || mode == 1)
+
+struct qlcnic_pci_func_cfg {
+	u16	func_type;
+	u16	min_bw;
+	u16	max_bw;
+	u16	port_num;
+	u8	pci_func;
+	u8	func_state;
+	u8	def_mac_addr[6];
+};
+
+struct qlcnic_npar_func_cfg {
+	u32	fw_capab;
+	u16	port_num;
+	u16	min_bw;
+	u16	max_bw;
+	u16	max_tx_queues;
+	u16	max_rx_queues;
+	u8	pci_func;
+	u8	op_mode;
+};
+
+struct qlcnic_pm_func_cfg {
+	u8	pci_func;
+	u8	action;
+	u8	dest_npar;
+	u8	reserved[5];
+};
+
+struct qlcnic_esw_func_cfg {
+	u16	vlan_id;
+	u8	pci_func;
+	u8	host_vlan_tag;
+	u8	promisc_mode;
+	u8	discard_tagged;
+	u8	mac_learning;
+	u8	reserved;
+};
+
 int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
 int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
 
@@ -1031,13 +1171,13 @@
 int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
 
 /* Functions from qlcnic_init.c */
-int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
 int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
 int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
 void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
 void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
 int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
 int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
+int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
 
 int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
 int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1050,6 +1190,10 @@
 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
 
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
 void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
 void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
 
@@ -1070,13 +1214,14 @@
 int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
 int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
-int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
 void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
 		struct qlcnic_host_tx_ring *tx_ring);
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac);
 void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
 int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
+void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
 
 /* Functions from qlcnic_main.c */
 int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1088,6 +1233,25 @@
 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
 
+/* Management functions */
+int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_reset_partition(struct qlcnic_adapter *, u8);
+
+/*  eSwitch management functions */
+int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
+				struct qlcnic_eswitch *);
+int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
+				struct qlcnic_eswitch *);
+int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
+int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8,
+			u8, u8, u16);
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
+extern int qlcnic_config_tso;
+
 /*
  * QLOGIC Board information
  */
@@ -1131,6 +1295,15 @@
 
 extern const struct ethtool_ops qlcnic_ethtool_ops;
 
+struct qlcnic_nic_template {
+	int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
+	int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+	int (*config_led) (struct qlcnic_adapter *, u32, u32);
+	int (*set_ilb_mode) (struct qlcnic_adapter *);
+	void (*clear_ilb_mode) (struct qlcnic_adapter *);
+	int (*start_firmware) (struct qlcnic_adapter *);
+};
+
 #define QLCDB(adapter, lvl, _fmt, _args...) do {	\
 	if (NETIF_MSG_##lvl & adapter->msg_enable)	\
 		printk(KERN_INFO "%s: %s: " _fmt,	\
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index c2c1f5c..cdd44b4 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -88,12 +88,12 @@
 
 	if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
 		if (qlcnic_issue_cmd(adapter,
-				adapter->ahw.pci_func,
-				QLCHAL_VERSION,
-				recv_ctx->context_id,
-				mtu,
-				0,
-				QLCNIC_CDRP_CMD_SET_MTU)) {
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			recv_ctx->context_id,
+			mtu,
+			0,
+			QLCNIC_CDRP_CMD_SET_MTU)) {
 
 			dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
 			return -EIO;
@@ -121,7 +121,7 @@
 
 	int i, nrds_rings, nsds_rings;
 	size_t rq_size, rsp_size;
-	u32 cap, reg, val;
+	u32 cap, reg, val, reg2;
 	int err;
 
 	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -152,9 +152,14 @@
 
 	prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
 
-	cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
+	cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
+						| QLCNIC_CAP0_VALIDOFF);
 	cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
 
+	prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
+							 msix_handler);
+	prq->txrx_sds_binding = nsds_rings - 1;
+
 	prq->capabilities[0] = cpu_to_le32(cap);
 	prq->host_int_crb_mode =
 		cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
@@ -175,6 +180,7 @@
 	for (i = 0; i < nrds_rings; i++) {
 
 		rds_ring = &recv_ctx->rds_rings[i];
+		rds_ring->producer = 0;
 
 		prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
 		prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
@@ -188,6 +194,8 @@
 	for (i = 0; i < nsds_rings; i++) {
 
 		sds_ring = &recv_ctx->sds_rings[i];
+		sds_ring->consumer = 0;
+		memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
 
 		prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
 		prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
@@ -197,7 +205,7 @@
 	phys_addr = hostrq_phys_addr;
 	err = qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			(u32)(phys_addr >> 32),
 			(u32)(phys_addr & 0xffffffff),
 			rq_size,
@@ -216,8 +224,7 @@
 		rds_ring = &recv_ctx->rds_rings[i];
 
 		reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
-		rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
-				QLCNIC_REG(reg - 0x200));
+		rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 + reg;
 	}
 
 	prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -227,12 +234,10 @@
 		sds_ring = &recv_ctx->sds_rings[i];
 
 		reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
-		sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
-				QLCNIC_REG(reg - 0x200));
+		reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
 
-		reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
-		sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
-				QLCNIC_REG(reg - 0x200));
+		sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 + reg;
+		sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2;
 	}
 
 	recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
@@ -253,7 +258,7 @@
 
 	if (qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			recv_ctx->context_id,
 			QLCNIC_DESTROY_CTX_RESET,
 			0,
@@ -262,6 +267,8 @@
 		dev_err(&adapter->pdev->dev,
 			"Failed to destroy rx ctx in firmware\n");
 	}
+
+	recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
 }
 
 static int
@@ -278,6 +285,11 @@
 	dma_addr_t	rq_phys_addr, rsp_phys_addr;
 	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
 
+	/* reset host resources */
+	tx_ring->producer = 0;
+	tx_ring->sw_consumer = 0;
+	*(tx_ring->hw_consumer) = 0;
+
 	rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
 	rq_addr = pci_alloc_consistent(adapter->pdev,
 		rq_size, &rq_phys_addr);
@@ -319,7 +331,7 @@
 	phys_addr = rq_phys_addr;
 	err = qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			(u32)(phys_addr >> 32),
 			((u32)phys_addr & 0xffffffff),
 			rq_size,
@@ -327,8 +339,7 @@
 
 	if (err == QLCNIC_RCODE_SUCCESS) {
 		temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
-		tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
-				QLCNIC_REG(temp - 0x200));
+		tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 + temp;
 
 		adapter->tx_context_id =
 			le16_to_cpu(prsp->context_id);
@@ -351,7 +362,7 @@
 {
 	if (qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			adapter->tx_context_id,
 			QLCNIC_DESTROY_CTX_RESET,
 			0,
@@ -368,7 +379,7 @@
 
 	if (qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			reg,
 			0,
 			0,
@@ -385,7 +396,7 @@
 {
 	return qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			reg,
 			val,
 			0,
@@ -457,15 +468,6 @@
 		sds_ring->desc_head = (struct status_desc *)addr;
 	}
 
-
-	err = qlcnic_fw_cmd_create_rx_ctx(adapter);
-	if (err)
-		goto err_out_free;
-	err = qlcnic_fw_cmd_create_tx_ctx(adapter);
-	if (err)
-		goto err_out_free;
-
-	set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
 	return 0;
 
 err_out_free:
@@ -473,6 +475,36 @@
 	return err;
 }
 
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
+{
+	int err;
+
+	err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+	if (err)
+		return err;
+
+	err = qlcnic_fw_cmd_create_tx_ctx(adapter);
+	if (err) {
+		qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+		return err;
+	}
+
+	set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+	return 0;
+}
+
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
+{
+	if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+		qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+		qlcnic_fw_cmd_destroy_tx_ctx(adapter);
+
+		/* Allow dma queues to drain after context reset */
+		msleep(20);
+	}
+}
+
 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_recv_context *recv_ctx;
@@ -481,15 +513,6 @@
 	struct qlcnic_host_tx_ring *tx_ring;
 	int ring;
 
-
-	if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
-		qlcnic_fw_cmd_destroy_rx_ctx(adapter);
-		qlcnic_fw_cmd_destroy_tx_ctx(adapter);
-
-		/* Allow dma queues to drain after context reset */
-		msleep(20);
-	}
-
 	recv_ctx = &adapter->recv_ctx;
 
 	tx_ring = adapter->tx_ring;
@@ -533,3 +556,428 @@
 	}
 }
 
+/* Set MAC address of a NIC partition */
+int qlcnic_set_mac_address(struct qlcnic_adapter *adapter, u8* mac)
+{
+	int err = 0;
+	u32 arg1, arg2, arg3;
+
+	arg1 = adapter->ahw.pci_func | BIT_9;
+	arg2 = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	arg3 = mac[4] | (mac[5] << 16);
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			arg2,
+			arg3,
+			QLCNIC_CDRP_CMD_MAC_ADDRESS);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to set mac address%d\n", err);
+		err = -EIO;
+	}
+
+	return err;
+}
+
+/* Get MAC address of a NIC partition */
+int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+{
+	int err;
+	u32 arg1;
+
+	arg1 = adapter->ahw.pci_func | BIT_8;
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_MAC_ADDRESS);
+
+	if (err == QLCNIC_RCODE_SUCCESS)
+		qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
+				QLCNIC_ARG2_CRB_OFFSET, 0, mac);
+	else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get mac address%d\n", err);
+		err = -EIO;
+	}
+
+	return err;
+}
+
+/* Get info of a NIC partition */
+int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+				struct qlcnic_info *npar_info, u8 func_id)
+{
+	int	err;
+	dma_addr_t nic_dma_t;
+	struct qlcnic_info *nic_info;
+	void *nic_info_addr;
+	size_t	nic_size = sizeof(struct qlcnic_info);
+
+	nic_info_addr = pci_alloc_consistent(adapter->pdev,
+		nic_size, &nic_dma_t);
+	if (!nic_info_addr)
+		return -ENOMEM;
+	memset(nic_info_addr, 0, nic_size);
+
+	nic_info = (struct qlcnic_info *) nic_info_addr;
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			MSD(nic_dma_t),
+			LSD(nic_dma_t),
+			(func_id << 16 | nic_size),
+			QLCNIC_CDRP_CMD_GET_NIC_INFO);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
+		npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
+		npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
+		npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
+		npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+		npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
+		npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
+		npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+
+		dev_info(&adapter->pdev->dev,
+			"phy port: %d switch_mode: %d,\n"
+			"\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
+			"\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
+			npar_info->phys_port, npar_info->switch_mode,
+			npar_info->max_tx_ques, npar_info->max_rx_ques,
+			npar_info->min_tx_bw, npar_info->max_tx_bw,
+			npar_info->max_mtu, npar_info->capabilities);
+	} else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get nic info%d\n", err);
+		err = -EIO;
+	}
+
+	pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
+	return err;
+}
+
+/* Configure a NIC partition */
+int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
+{
+	int err = -EIO;
+	dma_addr_t nic_dma_t;
+	void *nic_info_addr;
+	struct qlcnic_info *nic_info;
+	size_t nic_size = sizeof(struct qlcnic_info);
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size,
+			&nic_dma_t);
+	if (!nic_info_addr)
+		return -ENOMEM;
+
+	memset(nic_info_addr, 0, nic_size);
+	nic_info = (struct qlcnic_info *)nic_info_addr;
+
+	nic_info->pci_func = cpu_to_le16(nic->pci_func);
+	nic_info->op_mode = cpu_to_le16(nic->op_mode);
+	nic_info->phys_port = cpu_to_le16(nic->phys_port);
+	nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
+	nic_info->capabilities = cpu_to_le32(nic->capabilities);
+	nic_info->max_mac_filters = nic->max_mac_filters;
+	nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
+	nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
+	nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
+	nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			MSD(nic_dma_t),
+			LSD(nic_dma_t),
+			((nic->pci_func << 16) | nic_size),
+			QLCNIC_CDRP_CMD_SET_NIC_INFO);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to set nic info%d\n", err);
+		err = -EIO;
+	}
+
+	pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
+	return err;
+}
+
+/* Get PCI Info of a partition */
+int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+				struct qlcnic_pci_info *pci_info)
+{
+	int err = 0, i;
+	dma_addr_t pci_info_dma_t;
+	struct qlcnic_pci_info *npar;
+	void *pci_info_addr;
+	size_t npar_size = sizeof(struct qlcnic_pci_info);
+	size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+
+	pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size,
+			&pci_info_dma_t);
+	if (!pci_info_addr)
+		return -ENOMEM;
+	memset(pci_info_addr, 0, pci_size);
+
+	npar = (struct qlcnic_pci_info *) pci_info_addr;
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			MSD(pci_info_dma_t),
+			LSD(pci_info_dma_t),
+			pci_size,
+			QLCNIC_CDRP_CMD_GET_PCI_INFO);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
+			pci_info->id = le32_to_cpu(npar->id);
+			pci_info->active = le32_to_cpu(npar->active);
+			pci_info->type = le32_to_cpu(npar->type);
+			pci_info->default_port =
+				le32_to_cpu(npar->default_port);
+			pci_info->tx_min_bw =
+				le32_to_cpu(npar->tx_min_bw);
+			pci_info->tx_max_bw =
+				le32_to_cpu(npar->tx_max_bw);
+			memcpy(pci_info->mac, npar->mac, ETH_ALEN);
+		}
+	} else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get PCI Info%d\n", err);
+		err = -EIO;
+	}
+
+	pci_free_consistent(adapter->pdev, pci_size, pci_info_addr,
+		pci_info_dma_t);
+	return err;
+}
+
+/* Reset a NIC partition */
+
+int qlcnic_reset_partition(struct qlcnic_adapter *adapter, u8 func_no)
+{
+	int err = -EIO;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			func_no,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_RESET_NPAR);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to issue reset partition%d\n", err);
+		err = -EIO;
+	}
+
+	return err;
+}
+
+/* Get eSwitch Capabilities */
+int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
+					struct qlcnic_eswitch *eswitch)
+{
+	int err = -EIO;
+	u32 arg1, arg2;
+
+	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
+		return err;
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			port,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+		arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
+
+		eswitch->port = arg1 & 0xf;
+		eswitch->active_vports = LSB(arg2);
+		eswitch->max_ucast_filters = MSB(arg2);
+		eswitch->max_active_vlans = LSB(MSW(arg2));
+		if (arg1 & BIT_6)
+			eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
+		if (arg1 & BIT_7)
+			eswitch->flags |= QLCNIC_SWITCH_PROMISC_MODE;
+		if (arg1 & BIT_8)
+			eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
+	} else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get eswitch capabilities%d\n", err);
+	}
+
+	return err;
+}
+
+/* Get current status of eswitch */
+int qlcnic_get_eswitch_status(struct qlcnic_adapter *adapter, u8 port,
+				struct qlcnic_eswitch *eswitch)
+{
+	int err = -EIO;
+	u32 arg1, arg2;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			port,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+		arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
+
+		eswitch->port = arg1 & 0xf;
+		eswitch->active_vports = LSB(arg2);
+		eswitch->active_ucast_filters = MSB(arg2);
+		eswitch->active_vlans = LSB(MSW(arg2));
+		if (arg1 & BIT_6)
+			eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
+		if (arg1 & BIT_8)
+			eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
+
+	} else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get eswitch status%d\n", err);
+	}
+
+	return err;
+}
+
+/* Enable/Disable eSwitch */
+int qlcnic_toggle_eswitch(struct qlcnic_adapter *adapter, u8 id, u8 enable)
+{
+	int err = -EIO;
+	u32 arg1, arg2;
+	struct qlcnic_eswitch *eswitch;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	eswitch = &adapter->eswitch[id];
+	if (!eswitch)
+		return err;
+
+	arg1 = eswitch->port | (enable ? BIT_4 : 0);
+	arg2 = eswitch->active_vports | (eswitch->max_ucast_filters << 8) |
+		(eswitch->max_active_vlans << 16);
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			arg2,
+			0,
+			QLCNIC_CDRP_CMD_TOGGLE_ESWITCH);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to enable eswitch%d\n", eswitch->port);
+		eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
+		err = -EIO;
+	} else {
+		eswitch->flags |= QLCNIC_SWITCH_ENABLE;
+		dev_info(&adapter->pdev->dev,
+			"Enabled eSwitch for port %d\n", eswitch->port);
+	}
+
+	return err;
+}
+
+/* Configure eSwitch for port mirroring */
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
+				u8 enable_mirroring, u8 pci_func)
+{
+	int err = -EIO;
+	u32 arg1;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
+		!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+		return err;
+
+	arg1 = id | (enable_mirroring ? BIT_4 : 0);
+	arg1 |= pci_func << 8;
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to configure port mirroring%d on eswitch:%d\n",
+			pci_func, id);
+	} else {
+		dev_info(&adapter->pdev->dev,
+			"Configured eSwitch %d for port mirroring:%d\n",
+			id, pci_func);
+	}
+
+	return err;
+}
+
+/* Configure eSwitch port */
+int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id,
+		int vlan_tagging, u8 discard_tagged, u8 promsc_mode,
+		u8 mac_learn, u8 pci_func, u16 vlan_id)
+{
+	int err = -EIO;
+	u32 arg1;
+	struct qlcnic_eswitch *eswitch;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	eswitch = &adapter->eswitch[id];
+	if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE))
+		return err;
+
+	arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0);
+	arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0);
+	arg1 |= pci_func << 8;
+	if (vlan_tagging)
+		arg1 |= BIT_5 | (vlan_id << 16);
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to configure eswitch port%d\n", eswitch->port);
+	} else {
+		dev_info(&adapter->pdev->dev,
+			"Configured eSwitch for port %d\n", eswitch->port);
+	}
+
+	return err;
+}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 3bd514e..f8e39e4 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,8 +69,6 @@
 		QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
 	{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
 		QLC_OFF(stats.skb_alloc_failure)},
-	{"null skb",
-		QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
 	{"null rxbuf",
 		QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
 	{"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
@@ -350,7 +348,7 @@
 	for (i = 0; diag_registers[i] != -1; i++)
 		regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
 
-	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
 		return;
 
 	regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
@@ -683,13 +681,13 @@
 	if (ret)
 		goto clear_it;
 
-	ret = qlcnic_set_ilb_mode(adapter);
+	ret = adapter->nic_ops->set_ilb_mode(adapter);
 	if (ret)
 		goto done;
 
 	ret = qlcnic_do_ilb_test(adapter);
 
-	qlcnic_clear_ilb_mode(adapter);
+	adapter->nic_ops->clear_ilb_mode(adapter);
 
 done:
 	qlcnic_diag_free_res(netdev, max_sds_rings);
@@ -715,7 +713,8 @@
 
 	adapter->diag_cnt = 0;
 	ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
-			QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011);
+			adapter->fw_hal_version, adapter->portnum,
+			0, 0, 0x00000011);
 	if (ret)
 		goto done;
 
@@ -834,7 +833,10 @@
 	struct qlcnic_adapter *adapter = netdev_priv(dev);
 	int ret;
 
-	ret = qlcnic_config_led(adapter, 1, 0xf);
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+		return -EIO;
+
+	ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
 	if (ret) {
 		dev_err(&adapter->pdev->dev,
 			"Failed to set LED blink state.\n");
@@ -843,7 +845,7 @@
 
 	msleep_interruptible(val * 1000);
 
-	ret = qlcnic_config_led(adapter, 0, 0xf);
+	ret = adapter->nic_ops->config_led(adapter, 0, 0xf);
 	if (ret) {
 		dev_err(&adapter->pdev->dev,
 			"Failed to reset LED blink state.\n");
@@ -905,7 +907,7 @@
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
-	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
 		return -EINVAL;
 
 	/*
@@ -981,12 +983,19 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	int hw_lro;
 
+	if (data & ~ETH_FLAG_LRO)
+		return -EINVAL;
+
 	if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
 		return -EINVAL;
 
-	ethtool_op_set_flags(netdev, data);
-
-	hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
+	if (data & ETH_FLAG_LRO) {
+		hw_lro = QLCNIC_LRO_ENABLED;
+		netdev->features |= NETIF_F_LRO;
+	} else {
+		hw_lro = 0;
+		netdev->features &= ~NETIF_F_LRO;
+	}
 
 	if (qlcnic_config_hw_lro(adapter, hw_lro))
 		return -EIO;
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index ad9d167..15fc320 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -208,6 +208,39 @@
 	QLCNIC_HW_PX_MAP_CRB_PGR0
 };
 
+#define	BIT_0	0x1
+#define	BIT_1	0x2
+#define	BIT_2	0x4
+#define	BIT_3	0x8
+#define	BIT_4	0x10
+#define	BIT_5	0x20
+#define	BIT_6	0x40
+#define	BIT_7	0x80
+#define	BIT_8	0x100
+#define	BIT_9	0x200
+#define	BIT_10	0x400
+#define	BIT_11	0x800
+#define	BIT_12	0x1000
+#define	BIT_13	0x2000
+#define	BIT_14	0x4000
+#define	BIT_15	0x8000
+#define	BIT_16	0x10000
+#define	BIT_17	0x20000
+#define	BIT_18	0x40000
+#define	BIT_19	0x80000
+#define	BIT_20	0x100000
+#define	BIT_21	0x200000
+#define	BIT_22	0x400000
+#define	BIT_23	0x800000
+#define	BIT_24	0x1000000
+#define	BIT_25	0x2000000
+#define	BIT_26	0x4000000
+#define	BIT_27	0x8000000
+#define	BIT_28	0x10000000
+#define	BIT_29	0x20000000
+#define	BIT_30	0x40000000
+#define	BIT_31	0x80000000
+
 /*  This field defines CRB adr [31:20] of the agents */
 
 #define QLCNIC_HW_CRB_HUB_AGT_ADR_MN	\
@@ -668,10 +701,11 @@
 #define QLCNIC_CRB_DEV_REF_COUNT	(QLCNIC_CAM_RAM(0x138))
 #define QLCNIC_CRB_DEV_STATE		(QLCNIC_CAM_RAM(0x140))
 
-#define QLCNIC_CRB_DRV_STATE               (QLCNIC_CAM_RAM(0x144))
-#define QLCNIC_CRB_DRV_SCRATCH             (QLCNIC_CAM_RAM(0x148))
-#define QLCNIC_CRB_DEV_PARTITION_INFO      (QLCNIC_CAM_RAM(0x14c))
+#define QLCNIC_CRB_DRV_STATE		(QLCNIC_CAM_RAM(0x144))
+#define QLCNIC_CRB_DRV_SCRATCH		(QLCNIC_CAM_RAM(0x148))
+#define QLCNIC_CRB_DEV_PARTITION_INFO	(QLCNIC_CAM_RAM(0x14c))
 #define QLCNIC_CRB_DRV_IDC_VER		(QLCNIC_CAM_RAM(0x174))
+#define QLCNIC_CRB_DEV_NPAR_STATE	(QLCNIC_CAM_RAM(0x19c))
 #define QLCNIC_ROM_DEV_INIT_TIMEOUT	(0x3e885c)
 #define QLCNIC_ROM_DRV_RESET_TIMEOUT	(0x3e8860)
 
@@ -684,15 +718,26 @@
 #define QLCNIC_DEV_FAILED		0x6
 #define QLCNIC_DEV_QUISCENT		0x7
 
+#define QLCNIC_DEV_NPAR_NOT_RDY	0
+#define QLCNIC_DEV_NPAR_RDY		1
+
+#define QLC_DEV_CHECK_ACTIVE(VAL, FN)		((VAL) &= (1 << (FN * 4)))
 #define QLC_DEV_SET_REF_CNT(VAL, FN)		((VAL) |= (1 << (FN * 4)))
 #define QLC_DEV_CLR_REF_CNT(VAL, FN)		((VAL) &= ~(1 << (FN * 4)))
 #define QLC_DEV_SET_RST_RDY(VAL, FN)		((VAL) |= (1 << (FN * 4)))
 #define QLC_DEV_SET_QSCNT_RDY(VAL, FN)		((VAL) |= (2 << (FN * 4)))
 #define QLC_DEV_CLR_RST_QSCNT(VAL, FN)		((VAL) &= ~(3 << (FN * 4)))
 
+#define QLC_DEV_GET_DRV(VAL, FN)		(0xf & ((VAL) >> (FN * 4)))
+#define QLC_DEV_SET_DRV(VAL, FN)		((VAL) << (FN * 4))
+
+#define QLCNIC_TYPE_NIC		1
+#define QLCNIC_TYPE_FCOE		2
+#define QLCNIC_TYPE_ISCSI		3
+
 #define QLCNIC_RCODE_DRIVER_INFO		0x20000000
-#define QLCNIC_RCODE_DRIVER_CAN_RELOAD		0x40000000
-#define QLCNIC_RCODE_FATAL_ERROR		0x80000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD		BIT_30
+#define QLCNIC_RCODE_FATAL_ERROR		BIT_31
 #define QLCNIC_FWERROR_PEGNUM(code)		((code) & 0xff)
 #define QLCNIC_FWERROR_CODE(code)		((code >> 8) & 0xfffff)
 
@@ -721,6 +766,29 @@
 	u32	pci_int_reg;
 };
 
+#define QLCNIC_FW_API		0x1b216c
+#define QLCNIC_DRV_OP_MODE	0x1b2170
+#define QLCNIC_MSIX_BASE	0x132110
+#define QLCNIC_MAX_PCI_FUNC	8
+
+/* PCI function operational mode */
+enum {
+	QLCNIC_MGMT_FUNC	= 0,
+	QLCNIC_PRIV_FUNC	= 1,
+	QLCNIC_NON_PRIV_FUNC	= 2
+};
+
+#define QLC_DEV_DRV_DEFAULT 0x11111111
+
+#define LSB(x)	((uint8_t)(x))
+#define MSB(x)	((uint8_t)((uint16_t)(x) >> 8))
+
+#define LSW(x)  ((uint16_t)((uint32_t)(x)))
+#define MSW(x)  ((uint16_t)((uint32_t)(x) >> 16))
+
+#define LSD(x)  ((uint32_t)((uint64_t)(x)))
+#define MSD(x)  ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
 #define	QLCNIC_LEGACY_INTR_CONFIG					\
 {									\
 	{								\
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 0c2e1f0..e08c8b0 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -327,7 +327,7 @@
 
 	i = 0;
 
-	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
 		return -EIO;
 
 	tx_ring = adapter->tx_ring;
@@ -338,9 +338,15 @@
 
 	if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
 		netif_tx_stop_queue(tx_ring->txq);
-		__netif_tx_unlock_bh(tx_ring->txq);
-		adapter->stats.xmit_off++;
-		return -EBUSY;
+		smp_mb();
+		if (qlcnic_tx_avail(tx_ring) > nr_desc) {
+			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+				netif_tx_wake_queue(tx_ring->txq);
+		} else {
+			adapter->stats.xmit_off++;
+			__netif_tx_unlock_bh(tx_ring->txq);
+			return -EBUSY;
+		}
 	}
 
 	do {
@@ -407,10 +413,15 @@
 		return -ENOMEM;
 	}
 	memcpy(cur->mac_addr, addr, ETH_ALEN);
-	list_add_tail(&cur->list, &adapter->mac_list);
 
-	return qlcnic_sre_macaddr_change(adapter,
-				cur->mac_addr, QLCNIC_MAC_ADD);
+	if (qlcnic_sre_macaddr_change(adapter,
+				cur->mac_addr, QLCNIC_MAC_ADD)) {
+		kfree(cur);
+		return -EIO;
+	}
+
+	list_add_tail(&cur->list, &adapter->mac_list);
+	return 0;
 }
 
 void qlcnic_set_multi(struct net_device *netdev)
@@ -420,7 +431,7 @@
 	u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 	u32 mode = VPORT_MISS_MODE_DROP;
 
-	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
 		return;
 
 	qlcnic_nic_add_mac(adapter, adapter->mac_addr);
@@ -538,7 +549,7 @@
 	return rv;
 }
 
-int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
 {
 	struct qlcnic_nic_req req;
 	u64 word;
@@ -704,21 +715,15 @@
 	return rc;
 }
 
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac)
 {
-	u32 crbaddr, mac_hi, mac_lo;
+	u32 crbaddr;
 	int pci_func = adapter->ahw.pci_func;
 
 	crbaddr = CRB_MAC_BLOCK_START +
 		(4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
 
-	mac_lo = QLCRD32(adapter, crbaddr);
-	mac_hi = QLCRD32(adapter, crbaddr+4);
-
-	if (pci_func & 1)
-		*mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
-	else
-		*mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+	qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac);
 
 	return 0;
 }
@@ -766,7 +771,7 @@
  * Out: 'off' is 2M pci map addr
  * side effect: lock crb window
  */
-static void
+static int
 qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
 {
 	u32 window;
@@ -775,6 +780,10 @@
 	off -= QLCNIC_PCI_CRBSPACE;
 
 	window = CRB_HI(off);
+	if (window == 0) {
+		dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
+		return -EIO;
+	}
 
 	writel(window, addr);
 	if (readl(addr) != window) {
@@ -782,7 +791,9 @@
 			dev_warn(&adapter->pdev->dev,
 				"failed to set CRB window to %d off 0x%lx\n",
 				window, off);
+		return -EIO;
 	}
+	return 0;
 }
 
 int
@@ -803,11 +814,12 @@
 		/* indirect access */
 		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
 		crb_win_lock(adapter);
-		qlcnic_pci_set_crbwindow_2M(adapter, off);
-		writel(data, addr);
+		rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
+		if (!rv)
+			writel(data, addr);
 		crb_win_unlock(adapter);
 		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
-		return 0;
+		return rv;
 	}
 
 	dev_err(&adapter->pdev->dev,
@@ -821,7 +833,7 @@
 {
 	unsigned long flags;
 	int rv;
-	u32 data;
+	u32 data = -1;
 	void __iomem *addr = NULL;
 
 	rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
@@ -833,8 +845,8 @@
 		/* indirect access */
 		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
 		crb_win_lock(adapter);
-		qlcnic_pci_set_crbwindow_2M(adapter, off);
-		data = readl(addr);
+		if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
+			data = readl(addr);
 		crb_win_unlock(adapter);
 		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
 		return data;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 71a4e66..75ba744 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -112,18 +112,45 @@
 		rds_ring = &recv_ctx->rds_rings[ring];
 		for (i = 0; i < rds_ring->num_desc; ++i) {
 			rx_buf = &(rds_ring->rx_buf_arr[i]);
-			if (rx_buf->state == QLCNIC_BUFFER_FREE)
+			if (rx_buf->skb == NULL)
 				continue;
+
 			pci_unmap_single(adapter->pdev,
 					rx_buf->dma,
 					rds_ring->dma_size,
 					PCI_DMA_FROMDEVICE);
-			if (rx_buf->skb != NULL)
-				dev_kfree_skb_any(rx_buf->skb);
+
+			dev_kfree_skb_any(rx_buf->skb);
 		}
 	}
 }
 
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_recv_context *recv_ctx;
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct qlcnic_rx_buffer *rx_buf;
+	int i, ring;
+
+	recv_ctx = &adapter->recv_ctx;
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &recv_ctx->rds_rings[ring];
+
+		spin_lock(&rds_ring->lock);
+
+		INIT_LIST_HEAD(&rds_ring->free_list);
+
+		rx_buf = rds_ring->rx_buf_arr;
+		for (i = 0; i < rds_ring->num_desc; i++) {
+			list_add_tail(&rx_buf->list,
+					&rds_ring->free_list);
+			rx_buf++;
+		}
+
+		spin_unlock(&rds_ring->lock);
+	}
+}
+
 void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_cmd_buffer *cmd_buf;
@@ -181,7 +208,9 @@
 
 	tx_ring = adapter->tx_ring;
 	vfree(tx_ring->cmd_buf_arr);
+	tx_ring->cmd_buf_arr = NULL;
 	kfree(adapter->tx_ring);
+	adapter->tx_ring = NULL;
 }
 
 int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
@@ -264,7 +293,6 @@
 			list_add_tail(&rx_buf->list,
 					&rds_ring->free_list);
 			rx_buf->ref_handle = i;
-			rx_buf->state = QLCNIC_BUFFER_FREE;
 			rx_buf++;
 		}
 		spin_lock_init(&rds_ring->lock);
@@ -413,7 +441,7 @@
 
 	/* resetall */
 	qlcnic_rom_lock(adapter);
-	QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
+	QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
 	qlcnic_rom_unlock(adapter);
 
 	if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
@@ -521,16 +549,13 @@
 	u32 val;
 
 	val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
-	val = (val >> (adapter->portnum * 4)) & 0xf;
-
-	if ((val & 0x3) != 1) {
-		dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n",
-									val);
+	val = QLC_DEV_GET_DRV(val, adapter->portnum);
+	if ((val & 0x3) != QLCNIC_TYPE_NIC) {
+		dev_err(&adapter->pdev->dev,
+			"Not an Ethernet NIC func=%u\n", val);
 		return -EIO;
 	}
-
 	adapter->physical_port = (val >> 2);
-
 	if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
 		timeo = 30;
 
@@ -544,16 +569,34 @@
 	return 0;
 }
 
+int
+qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
+{
+	u32 ver = -1, min_ver;
+
+	qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
+
+	ver = QLCNIC_DECODE_VERSION(ver);
+	min_ver = QLCNIC_MIN_FW_VERSION;
+
+	if (ver < min_ver) {
+		dev_err(&adapter->pdev->dev,
+			"firmware version %d.%d.%d unsupported."
+			"Min supported version %d.%d.%d\n",
+			_major(ver), _minor(ver), _build(ver),
+			_major(min_ver), _minor(min_ver), _build(min_ver));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int
 qlcnic_has_mn(struct qlcnic_adapter *adapter)
 {
-	u32 capability, flashed_ver;
+	u32 capability;
 	capability = 0;
 
-	qlcnic_rom_fast_read(adapter,
-			QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
-	flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
-
 	capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
 	if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
 		return 1;
@@ -1007,7 +1050,7 @@
 qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
 {
 	__le32 val;
-	u32 ver, min_ver, bios, min_size;
+	u32 ver, bios, min_size;
 	struct pci_dev *pdev = adapter->pdev;
 	const struct firmware *fw = adapter->fw;
 	u8 fw_type = adapter->fw_type;
@@ -1029,12 +1072,9 @@
 		return -EINVAL;
 
 	val = qlcnic_get_fw_version(adapter);
-
-	min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
-
 	ver = QLCNIC_DECODE_VERSION(val);
 
-	if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
+	if (ver < QLCNIC_MIN_FW_VERSION) {
 		dev_err(&pdev->dev,
 				"%s: firmware version %d.%d.%d unsupported\n",
 		fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
@@ -1122,7 +1162,7 @@
 	adapter->fw = NULL;
 }
 
-int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
+static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
 {
 	u32 val;
 	int retries = 60;
@@ -1147,7 +1187,8 @@
 	QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
 
 out_err:
-	dev_err(&adapter->pdev->dev, "firmware init failed\n");
+	dev_err(&adapter->pdev->dev, "Command Peg initialization not "
+		      "complete, state: 0x%x.\n", val);
 	return -EIO;
 }
 
@@ -1180,6 +1221,10 @@
 {
 	int err;
 
+	err = qlcnic_cmd_peg_ready(adapter);
+	if (err)
+		return err;
+
 	err = qlcnic_receive_peg_ready(adapter);
 	if (err)
 		return err;
@@ -1265,14 +1310,12 @@
 	dma_addr_t dma;
 	struct pci_dev *pdev = adapter->pdev;
 
-	buffer->skb = dev_alloc_skb(rds_ring->skb_size);
-	if (!buffer->skb) {
+	skb = dev_alloc_skb(rds_ring->skb_size);
+	if (!skb) {
 		adapter->stats.skb_alloc_failure++;
 		return -ENOMEM;
 	}
 
-	skb = buffer->skb;
-
 	skb_reserve(skb, 2);
 
 	dma = pci_map_single(pdev, skb->data,
@@ -1281,13 +1324,11 @@
 	if (pci_dma_mapping_error(pdev, dma)) {
 		adapter->stats.rx_dma_map_error++;
 		dev_kfree_skb_any(skb);
-		buffer->skb = NULL;
 		return -ENOMEM;
 	}
 
 	buffer->skb = skb;
 	buffer->dma = dma;
-	buffer->state = QLCNIC_BUFFER_BUSY;
 
 	return 0;
 }
@@ -1300,14 +1341,15 @@
 
 	buffer = &rds_ring->rx_buf_arr[index];
 
+	if (unlikely(buffer->skb == NULL)) {
+		WARN_ON(1);
+		return NULL;
+	}
+
 	pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
 			PCI_DMA_FROMDEVICE);
 
 	skb = buffer->skb;
-	if (!skb) {
-		adapter->stats.null_skb++;
-		goto no_skb;
-	}
 
 	if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
 		adapter->stats.csummed++;
@@ -1319,8 +1361,7 @@
 	skb->dev = adapter->netdev;
 
 	buffer->skb = NULL;
-no_skb:
-	buffer->state = QLCNIC_BUFFER_FREE;
+
 	return skb;
 }
 
@@ -1495,7 +1536,7 @@
 
 		WARN_ON(desc_cnt > 1);
 
-		if (rxbuf)
+		if (likely(rxbuf))
 			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
 		else
 			adapter->stats.null_rxbuf++;
@@ -1701,3 +1742,24 @@
 	sds_ring->consumer = consumer;
 	writel(consumer, sds_ring->crb_sts_consumer);
 }
+
+void
+qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
+			u8 alt_mac, u8 *mac)
+{
+	u32 mac_low, mac_high;
+	int i;
+
+	mac_low = QLCRD32(adapter, off1);
+	mac_high = QLCRD32(adapter, off2);
+
+	if (alt_mac) {
+		mac_low |= (mac_low >> 16) | (mac_high << 16);
+		mac_high >>= 16;
+	}
+
+	for (i = 0; i < 2; i++)
+		mac[i] = (u8)(mac_high >> ((1 - i) * 8));
+	for (i = 2; i < 6; i++)
+		mac[i] = (u8)(mac_low >> ((5 - i) * 8));
+}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 23ea9ca..18e2b2e 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -35,14 +35,14 @@
 #include <linux/inetdevice.h>
 #include <linux/sysfs.h>
 
-MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
+MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
 MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
 
 char qlcnic_driver_name[] = "qlcnic";
-static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
-    QLCNIC_LINUX_VERSIONID;
+static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
+	"Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
 
 static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
 
@@ -65,13 +65,16 @@
 module_param(load_fw_file, int, 0644);
 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
 
+static int qlcnic_config_npars;
+module_param(qlcnic_config_npars, int, 0644);
+MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
+
 static int __devinit qlcnic_probe(struct pci_dev *pdev,
 		const struct pci_device_id *ent);
 static void __devexit qlcnic_remove(struct pci_dev *pdev);
 static int qlcnic_open(struct net_device *netdev);
 static int qlcnic_close(struct net_device *netdev);
 static void qlcnic_tx_timeout(struct net_device *netdev);
-static void qlcnic_tx_timeout_task(struct work_struct *work);
 static void qlcnic_attach_work(struct work_struct *work);
 static void qlcnic_fwinit_work(struct work_struct *work);
 static void qlcnic_fw_poll_work(struct work_struct *work);
@@ -79,6 +82,7 @@
 		work_func_t func, int delay);
 static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
 static int qlcnic_poll(struct napi_struct *napi, int budget);
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void qlcnic_poll_controller(struct net_device *netdev);
 #endif
@@ -99,7 +103,14 @@
 
 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
 static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
+static int qlcnic_start_firmware(struct qlcnic_adapter *);
 
+static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
+static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *);
+static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *);
+static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
 /*  PCI Device ID Table  */
 #define ENTRY(device) \
 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -120,12 +131,6 @@
 		struct qlcnic_host_tx_ring *tx_ring)
 {
 	writel(tx_ring->producer, tx_ring->crb_cmd_producer);
-
-	if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
-		netif_stop_queue(adapter->netdev);
-		smp_mb();
-		adapter->stats.xmit_off++;
-	}
 }
 
 static const u32 msi_tgt_status[8] = {
@@ -184,8 +189,13 @@
 
 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
-		netif_napi_add(netdev, &sds_ring->napi,
-				qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
+
+		if (ring == adapter->max_sds_rings - 1)
+			netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
+				QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
+		else
+			netif_napi_add(netdev, &sds_ring->napi,
+				qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
 	}
 
 	return 0;
@@ -307,19 +317,14 @@
 static int
 qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
 {
-	int i;
-	unsigned char *p;
-	u64 mac_addr;
+	u8 mac_addr[ETH_ALEN];
 	struct net_device *netdev = adapter->netdev;
 	struct pci_dev *pdev = adapter->pdev;
 
-	if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
+	if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
 		return -EIO;
 
-	p = (unsigned char *)&mac_addr;
-	for (i = 0; i < 6; i++)
-		netdev->dev_addr[i] = *(p + 5 - i);
-
+	memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
 	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
 	memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
 
@@ -340,7 +345,7 @@
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EINVAL;
 
-	if (netif_running(netdev)) {
+	if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
 		netif_device_detach(netdev);
 		qlcnic_napi_disable(adapter);
 	}
@@ -349,7 +354,7 @@
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 	qlcnic_set_multi(adapter->netdev);
 
-	if (netif_running(netdev)) {
+	if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
 		netif_device_attach(netdev);
 		qlcnic_napi_enable(adapter);
 	}
@@ -371,6 +376,24 @@
 #endif
 };
 
+static struct qlcnic_nic_template qlcnic_ops = {
+	.get_mac_addr = qlcnic_get_mac_address,
+	.config_bridged_mode = qlcnic_config_bridged_mode,
+	.config_led = qlcnic_config_led,
+	.set_ilb_mode = qlcnic_set_ilb_mode,
+	.clear_ilb_mode = qlcnic_clear_ilb_mode,
+	.start_firmware = qlcnic_start_firmware
+};
+
+static struct qlcnic_nic_template qlcnic_vf_ops = {
+	.get_mac_addr = qlcnic_get_mac_address,
+	.config_bridged_mode = qlcnicvf_config_bridged_mode,
+	.config_led = qlcnicvf_config_led,
+	.set_ilb_mode = qlcnicvf_set_ilb_mode,
+	.clear_ilb_mode = qlcnicvf_clear_ilb_mode,
+	.start_firmware = qlcnicvf_start_firmware
+};
+
 static void
 qlcnic_setup_intr(struct qlcnic_adapter *adapter)
 {
@@ -453,6 +476,167 @@
 }
 
 static int
+qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
+	int i, ret = 0, err;
+	u8 pfn;
+
+	if (!adapter->npars)
+		adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+				QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+	if (!adapter->npars)
+		return -ENOMEM;
+
+	if (!adapter->eswitch)
+		adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+				QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
+	if (!adapter->eswitch) {
+		err = -ENOMEM;
+		goto err_eswitch;
+	}
+
+	ret = qlcnic_get_pci_info(adapter, pci_info);
+	if (!ret) {
+		for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+			pfn = pci_info[i].id;
+			if (pfn > QLCNIC_MAX_PCI_FUNC)
+				return QL_STATUS_INVALID_PARAM;
+			adapter->npars[pfn].active = pci_info[i].active;
+			adapter->npars[pfn].type = pci_info[i].type;
+			adapter->npars[pfn].phy_port = pci_info[i].default_port;
+			adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
+		}
+
+		for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+			adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+
+		return ret;
+	}
+
+	kfree(adapter->eswitch);
+	adapter->eswitch = NULL;
+err_eswitch:
+	kfree(adapter->npars);
+
+	return ret;
+}
+
+static int
+qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
+{
+	u8 id;
+	u32 ref_count;
+	int i, ret = 1;
+	u32 data = QLCNIC_MGMT_FUNC;
+	void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+
+	/* If other drivers are not in use set their privilege level */
+	ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+	ret = qlcnic_api_lock(adapter);
+	if (ret)
+		goto err_lock;
+	if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
+		goto err_npar;
+
+	if (qlcnic_config_npars) {
+		for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+			id = i;
+			if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
+				id == adapter->ahw.pci_func)
+				continue;
+			data |= (qlcnic_config_npars &
+					QLC_DEV_SET_DRV(0xf, id));
+		}
+	} else {
+		data = readl(priv_op);
+		data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
+			(QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
+			adapter->ahw.pci_func));
+	}
+	writel(data, priv_op);
+err_npar:
+	qlcnic_api_unlock(adapter);
+err_lock:
+	return ret;
+}
+
+static u32
+qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
+{
+	void __iomem *msix_base_addr;
+	void __iomem *priv_op;
+	struct qlcnic_info nic_info;
+	u32 func;
+	u32 msix_base;
+	u32 op_mode, priv_level;
+
+	/* Determine FW API version */
+	adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
+
+	/* Find PCI function number */
+	pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+	msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
+	msix_base = readl(msix_base_addr);
+	func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
+	adapter->ahw.pci_func = func;
+
+	if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
+		adapter->capabilities = nic_info.capabilities;
+
+		if (adapter->capabilities & BIT_6)
+			adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+		else
+			adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+	}
+
+	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))	{
+		adapter->nic_ops = &qlcnic_ops;
+		return adapter->fw_hal_version;
+	}
+
+	/* Determine function privilege level */
+	priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+	op_mode = readl(priv_op);
+	if (op_mode == QLC_DEV_DRV_DEFAULT)
+		priv_level = QLCNIC_MGMT_FUNC;
+	else
+		priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+
+	switch (priv_level) {
+	case QLCNIC_MGMT_FUNC:
+		adapter->op_mode = QLCNIC_MGMT_FUNC;
+		adapter->nic_ops = &qlcnic_ops;
+		qlcnic_init_pci_info(adapter);
+		/* Set privilege level for other functions */
+		qlcnic_set_function_modes(adapter);
+		dev_info(&adapter->pdev->dev,
+			"HAL Version: %d, Management function\n",
+			adapter->fw_hal_version);
+		break;
+	case QLCNIC_PRIV_FUNC:
+		adapter->op_mode = QLCNIC_PRIV_FUNC;
+		dev_info(&adapter->pdev->dev,
+			"HAL Version: %d, Privileged function\n",
+			adapter->fw_hal_version);
+		adapter->nic_ops = &qlcnic_ops;
+		break;
+	case QLCNIC_NON_PRIV_FUNC:
+		adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
+		dev_info(&adapter->pdev->dev,
+			"HAL Version: %d Non Privileged function\n",
+			adapter->fw_hal_version);
+		adapter->nic_ops = &qlcnic_vf_ops;
+		break;
+	default:
+		dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
+			priv_level);
+		return 0;
+	}
+	return adapter->fw_hal_version;
+}
+
+static int
 qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
 {
 	void __iomem *mem_ptr0 = NULL;
@@ -460,7 +644,6 @@
 	unsigned long mem_len, pci_len0 = 0;
 
 	struct pci_dev *pdev = adapter->pdev;
-	int pci_func = adapter->ahw.pci_func;
 
 	/* remap phys address */
 	mem_base = pci_resource_start(pdev, 0);	/* 0 is for BAR 0 */
@@ -483,8 +666,13 @@
 	adapter->ahw.pci_base0 = mem_ptr0;
 	adapter->ahw.pci_len0 = pci_len0;
 
+	if (!qlcnic_get_driver_mode(adapter)) {
+		iounmap(adapter->ahw.pci_base0);
+		return -EIO;
+	}
+
 	adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
-		QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+		QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
 
 	return 0;
 }
@@ -509,7 +697,7 @@
 	}
 
 	if (!found)
-		name = "Unknown";
+		sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
 }
 
 static void
@@ -521,7 +709,7 @@
 	int i, offset, val;
 	int *ptr32;
 	struct pci_dev *pdev = adapter->pdev;
-
+	struct qlcnic_info nic_info;
 	adapter->driver_mismatch = 0;
 
 	ptr32 = (int *)&serial_num;
@@ -553,8 +741,6 @@
 	dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
 			fw_major, fw_minor, fw_build);
 
-	adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
-
 	adapter->flags &= ~QLCNIC_LRO_ENABLED;
 
 	if (adapter->ahw.port_type == QLCNIC_XGBE) {
@@ -565,6 +751,16 @@
 		adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
 	}
 
+	if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
+		adapter->physical_port = nic_info.phys_port;
+		adapter->switch_mode = nic_info.switch_mode;
+		adapter->max_tx_ques = nic_info.max_tx_ques;
+		adapter->max_rx_ques = nic_info.max_rx_ques;
+		adapter->capabilities = nic_info.capabilities;
+		adapter->max_mac_filters = nic_info.max_mac_filters;
+		adapter->max_mtu = nic_info.max_mtu;
+	}
+
 	adapter->msix_supported = !!use_msi_x;
 	adapter->rss_supported = !!use_msi_x;
 
@@ -591,8 +787,12 @@
 
 	if (load_fw_file)
 		qlcnic_request_firmware(adapter);
-	else
+	else {
+		if (qlcnic_check_flash_fw_ver(adapter))
+			goto err_out;
+
 		adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
+	}
 
 	err = qlcnic_need_fw_reset(adapter);
 	if (err < 0)
@@ -602,6 +802,7 @@
 
 	if (first_boot != 0x55555555) {
 		QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+		QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
 		qlcnic_pinit_from_rom(adapter);
 		msleep(1);
 	}
@@ -624,7 +825,7 @@
 
 wait_init:
 	/* Handshake with the card before we register the devices. */
-	err = qlcnic_phantom_init(adapter);
+	err = qlcnic_init_firmware(adapter);
 	if (err)
 		goto err_out;
 
@@ -633,6 +834,10 @@
 
 	qlcnic_check_options(adapter);
 
+	if (adapter->flags & QLCNIC_ESWITCH_ENABLED &&
+		adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
+		qlcnic_dev_set_npar_ready(adapter);
+
 	adapter->need_fw_reset = 0;
 
 	qlcnic_release_firmware(adapter);
@@ -716,9 +921,23 @@
 static int
 __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
 {
+	int ring;
+	struct qlcnic_host_rds_ring *rds_ring;
+
 	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
 		return -EIO;
 
+	if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+		return 0;
+
+	if (qlcnic_fw_create_ctx(adapter))
+		return -EIO;
+
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &adapter->recv_ctx.rds_rings[ring];
+		qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+	}
+
 	qlcnic_set_multi(netdev);
 	qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
 
@@ -736,6 +955,7 @@
 
 	qlcnic_linkevent_request(adapter, 1);
 
+	adapter->reset_context = 0;
 	set_bit(__QLCNIC_DEV_UP, &adapter->state);
 	return 0;
 }
@@ -775,6 +995,9 @@
 
 	qlcnic_napi_disable(adapter);
 
+	qlcnic_fw_destroy_ctx(adapter);
+
+	qlcnic_reset_rx_buffers_list(adapter);
 	qlcnic_release_tx_buffers(adapter);
 	spin_unlock(&adapter->tx_clean_lock);
 }
@@ -796,16 +1019,11 @@
 {
 	struct net_device *netdev = adapter->netdev;
 	struct pci_dev *pdev = adapter->pdev;
-	int err, ring;
-	struct qlcnic_host_rds_ring *rds_ring;
+	int err;
 
 	if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
 		return 0;
 
-	err = qlcnic_init_firmware(adapter);
-	if (err)
-		return err;
-
 	err = qlcnic_napi_add(adapter, netdev);
 	if (err)
 		return err;
@@ -813,7 +1031,7 @@
 	err = qlcnic_alloc_sw_resources(adapter);
 	if (err) {
 		dev_err(&pdev->dev, "Error in setting sw resources\n");
-		return err;
+		goto err_out_napi_del;
 	}
 
 	err = qlcnic_alloc_hw_resources(adapter);
@@ -822,16 +1040,10 @@
 		goto err_out_free_sw;
 	}
 
-
-	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
-		rds_ring = &adapter->recv_ctx.rds_rings[ring];
-		qlcnic_post_rx_buffers(adapter, ring, rds_ring);
-	}
-
 	err = qlcnic_request_irq(adapter);
 	if (err) {
 		dev_err(&pdev->dev, "failed to setup interrupt\n");
-		goto err_out_free_rxbuf;
+		goto err_out_free_hw;
 	}
 
 	qlcnic_init_coalesce_defaults(adapter);
@@ -841,11 +1053,12 @@
 	adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
 	return 0;
 
-err_out_free_rxbuf:
-	qlcnic_release_rx_buffers(adapter);
+err_out_free_hw:
 	qlcnic_free_hw_resources(adapter);
 err_out_free_sw:
 	qlcnic_free_sw_resources(adapter);
+err_out_napi_del:
+	qlcnic_napi_del(adapter);
 	return err;
 }
 
@@ -880,6 +1093,8 @@
 		}
 	}
 
+	qlcnic_fw_destroy_ctx(adapter);
+
 	qlcnic_detach(adapter);
 
 	adapter->diag_test = 0;
@@ -898,6 +1113,7 @@
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_host_rds_ring *rds_ring;
 	int ring;
 	int ret;
 
@@ -917,6 +1133,17 @@
 		return ret;
 	}
 
+	ret = qlcnic_fw_create_ctx(adapter);
+	if (ret) {
+		qlcnic_detach(adapter);
+		return ret;
+	}
+
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &adapter->recv_ctx.rds_rings[ring];
+		qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+	}
+
 	if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
 		for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 			sds_ring = &adapter->recv_ctx.sds_rings[ring];
@@ -928,6 +1155,27 @@
 	return 0;
 }
 
+/* Reset context in hardware only */
+static int
+qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+		return -EBUSY;
+
+	netif_device_detach(netdev);
+
+	qlcnic_down(adapter, netdev);
+
+	qlcnic_up(adapter, netdev);
+
+	netif_device_attach(netdev);
+
+	clear_bit(__QLCNIC_RESETTING, &adapter->state);
+	return 0;
+}
+
 int
 qlcnic_reset_context(struct qlcnic_adapter *adapter)
 {
@@ -971,18 +1219,17 @@
 	adapter->max_mc_count = 38;
 
 	netdev->netdev_ops	   = &qlcnic_netdev_ops;
-	netdev->watchdog_timeo     = 2*HZ;
+	netdev->watchdog_timeo     = 5*HZ;
 
 	qlcnic_change_mtu(netdev, netdev->mtu);
 
 	SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
 
-	netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
-	netdev->features |= (NETIF_F_GRO);
-	netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+	netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+		NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6);
 
-	netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
-	netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+	netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+		NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6);
 
 	if (pci_using_dac) {
 		netdev->features |= NETIF_F_HIGHDMA;
@@ -997,8 +1244,6 @@
 
 	netdev->irq = adapter->msix_entries[0].vector;
 
-	INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
-
 	if (qlcnic_read_mac_addr(adapter))
 		dev_warn(&pdev->dev, "failed to read mac addr\n");
 
@@ -1036,7 +1281,6 @@
 	struct net_device *netdev = NULL;
 	struct qlcnic_adapter *adapter = NULL;
 	int err;
-	int pci_func_id = PCI_FUNC(pdev->devfn);
 	uint8_t revision_id;
 	uint8_t pci_using_dac;
 
@@ -1072,7 +1316,6 @@
 	adapter->netdev  = netdev;
 	adapter->pdev    = pdev;
 	adapter->dev_rst_time = jiffies;
-	adapter->ahw.pci_func  = pci_func_id;
 
 	revision_id = pdev->revision;
 	adapter->ahw.revision_id = revision_id;
@@ -1088,7 +1331,7 @@
 		goto err_out_free_netdev;
 
 	/* This will be reset for mezz cards  */
-	adapter->portnum = pci_func_id;
+	adapter->portnum = adapter->ahw.pci_func;
 
 	err = qlcnic_get_board_info(adapter);
 	if (err) {
@@ -1102,7 +1345,7 @@
 	if (qlcnic_setup_idc_param(adapter))
 		goto err_out_iounmap;
 
-	err = qlcnic_start_firmware(adapter);
+	err = adapter->nic_ops->start_firmware(adapter);
 	if (err) {
 		dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
 		goto err_out_decr_ref;
@@ -1171,10 +1414,13 @@
 
 	unregister_netdev(netdev);
 
-	cancel_work_sync(&adapter->tx_timeout_task);
-
 	qlcnic_detach(adapter);
 
+	if (adapter->npars != NULL)
+		kfree(adapter->npars);
+	if (adapter->eswitch != NULL)
+		kfree(adapter->eswitch);
+
 	qlcnic_clr_all_drv_state(adapter);
 
 	clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1206,10 +1452,6 @@
 	if (netif_running(netdev))
 		qlcnic_down(adapter, netdev);
 
-	cancel_work_sync(&adapter->tx_timeout_task);
-
-	qlcnic_detach(adapter);
-
 	qlcnic_clr_all_drv_state(adapter);
 
 	clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1263,35 +1505,23 @@
 	pci_set_master(pdev);
 	pci_restore_state(pdev);
 
-	err = qlcnic_start_firmware(adapter);
+	err = adapter->nic_ops->start_firmware(adapter);
 	if (err) {
 		dev_err(&pdev->dev, "failed to start firmware\n");
 		return err;
 	}
 
 	if (netif_running(netdev)) {
-		err = qlcnic_attach(adapter);
-		if (err)
-			goto err_out;
-
 		err = qlcnic_up(adapter, netdev);
 		if (err)
-			goto err_out_detach;
-
+			goto done;
 
 		qlcnic_config_indev_addr(netdev, NETDEV_UP);
 	}
-
+done:
 	netif_device_attach(netdev);
 	qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
 	return 0;
-
-err_out_detach:
-	qlcnic_detach(adapter);
-err_out:
-	qlcnic_clr_all_drv_state(adapter);
-	netif_device_attach(netdev);
-	return err;
 }
 #endif
 
@@ -1340,11 +1570,11 @@
 	u8 opcode = TX_ETHER_PKT;
 	__be16 protocol = skb->protocol;
 	u16 flags = 0, vid = 0;
-	u32 producer;
 	int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
 	struct cmd_desc_type0 *hwdesc;
 	struct vlan_ethhdr *vh;
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	u32 producer = tx_ring->producer;
 
 	if (protocol == cpu_to_be16(ETH_P_8021Q)) {
 
@@ -1360,6 +1590,11 @@
 		vlan_oob = 1;
 	}
 
+	if (*(skb->data) & BIT_0) {
+		flags |= BIT_0;
+		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+	}
+
 	if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
 			skb_shinfo(skb)->gso_size > 0) {
 
@@ -1409,7 +1644,6 @@
 	/* For LSO, we need to copy the MAC/IP/TCP headers into
 	 * the descriptor ring
 	 */
-	producer = tx_ring->producer;
 	copied = 0;
 	offset = 2;
 
@@ -1537,10 +1771,15 @@
 	/* 4 fragments per cmd des */
 	no_of_desc = (frag_count + 3) >> 2;
 
-	if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+	if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
 		netif_stop_queue(netdev);
-		adapter->stats.xmit_off++;
-		return NETDEV_TX_BUSY;
+		smp_mb();
+		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+			netif_start_queue(netdev);
+		else {
+			adapter->stats.xmit_off++;
+			return NETDEV_TX_BUSY;
+		}
 	}
 
 	producer = tx_ring->producer;
@@ -1675,35 +1914,11 @@
 		return;
 
 	dev_err(&netdev->dev, "transmit timeout, resetting.\n");
-	schedule_work(&adapter->tx_timeout_task);
-}
-
-static void qlcnic_tx_timeout_task(struct work_struct *work)
-{
-	struct qlcnic_adapter *adapter =
-		container_of(work, struct qlcnic_adapter, tx_timeout_task);
-
-	if (!netif_running(adapter->netdev))
-		return;
-
-	if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
-		return;
 
 	if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
-		goto request_reset;
-
-	clear_bit(__QLCNIC_RESETTING, &adapter->state);
-	if (!qlcnic_reset_context(adapter)) {
-		adapter->netdev->trans_start = jiffies;
-		return;
-
-		/* context reset failed, fall through for fw reset */
-	}
-
-request_reset:
-	adapter->need_fw_reset = 1;
-	clear_bit(__QLCNIC_RESETTING, &adapter->state);
-	QLCDB(adapter, DRV, "Resetting adapter\n");
+		adapter->need_fw_reset = 1;
+	else
+		adapter->reset_context = 1;
 }
 
 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -1846,14 +2061,12 @@
 		smp_mb();
 
 		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
-			__netif_tx_lock(tx_ring->txq, smp_processor_id());
 			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
 				netif_wake_queue(netdev);
-				adapter->tx_timeo_cnt = 0;
 				adapter->stats.xmit_on++;
 			}
-			__netif_tx_unlock(tx_ring->txq);
 		}
+		adapter->tx_timeo_cnt = 0;
 	}
 	/*
 	 * If everything is freed up to consumer then check if the ring is full
@@ -1898,6 +2111,25 @@
 	return work_done;
 }
 
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct qlcnic_host_sds_ring *sds_ring =
+		container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+	struct qlcnic_adapter *adapter = sds_ring->adapter;
+	int work_done;
+
+	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+	if (work_done < budget) {
+		napi_complete(&sds_ring->napi);
+		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+			qlcnic_enable_int(sds_ring);
+	}
+
+	return work_done;
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void qlcnic_poll_controller(struct net_device *netdev)
 {
@@ -2109,7 +2341,7 @@
 {
 	struct qlcnic_adapter *adapter = container_of(work,
 			struct qlcnic_adapter, fw_work.work);
-	u32 dev_state = 0xf;
+	u32 dev_state = 0xf, npar_state;
 
 	if (qlcnic_api_lock(adapter))
 		goto err_ret;
@@ -2122,6 +2354,19 @@
 		return;
 	}
 
+	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+		npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+		if (npar_state == QLCNIC_DEV_NPAR_RDY) {
+			qlcnic_api_unlock(adapter);
+			goto wait_npar;
+		} else {
+			qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+				FW_POLL_DELAY);
+			qlcnic_api_unlock(adapter);
+			return;
+		}
+	}
+
 	if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
 		dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
 					adapter->reset_ack_timeo);
@@ -2154,7 +2399,7 @@
 
 		qlcnic_api_unlock(adapter);
 
-		if (!qlcnic_start_firmware(adapter)) {
+		if (!adapter->nic_ops->start_firmware(adapter)) {
 			qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
 			return;
 		}
@@ -2163,6 +2408,7 @@
 
 	qlcnic_api_unlock(adapter);
 
+wait_npar:
 	dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 	QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
 
@@ -2177,7 +2423,7 @@
 		break;
 
 	default:
-		if (!qlcnic_start_firmware(adapter)) {
+		if (!adapter->nic_ops->start_firmware(adapter)) {
 			qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
 			return;
 		}
@@ -2202,10 +2448,6 @@
 
 	qlcnic_down(adapter, netdev);
 
-	rtnl_lock();
-	qlcnic_detach(adapter);
-	rtnl_unlock();
-
 	status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
 
 	if (status & QLCNIC_RCODE_FATAL_ERROR)
@@ -2251,6 +2493,26 @@
 	qlcnic_api_unlock(adapter);
 }
 
+/* Transit to NPAR READY state from NPAR NOT READY state */
+static void
+qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
+{
+	u32 state;
+
+	if (qlcnic_api_lock(adapter))
+		return;
+
+	state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+
+	if (state != QLCNIC_DEV_NPAR_RDY) {
+		QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+			QLCNIC_DEV_NPAR_RDY);
+		QLCDB(adapter, DRV, "NPAR READY state set\n");
+	}
+
+	qlcnic_api_unlock(adapter);
+}
+
 static void
 qlcnic_schedule_work(struct qlcnic_adapter *adapter,
 		work_func_t func, int delay)
@@ -2274,19 +2536,11 @@
 	struct qlcnic_adapter *adapter = container_of(work,
 				struct qlcnic_adapter, fw_work.work);
 	struct net_device *netdev = adapter->netdev;
-	int err;
 
 	if (netif_running(netdev)) {
-		err = qlcnic_attach(adapter);
-		if (err)
+		if (qlcnic_up(adapter, netdev))
 			goto done;
 
-		err = qlcnic_up(adapter, netdev);
-		if (err) {
-			qlcnic_detach(adapter);
-			goto done;
-		}
-
 		qlcnic_config_indev_addr(netdev, NETDEV_UP);
 	}
 
@@ -2322,6 +2576,12 @@
 		adapter->fw_fail_cnt = 0;
 		if (adapter->need_fw_reset)
 			goto detach;
+
+		if (adapter->reset_context) {
+			qlcnic_reset_hw_context(adapter);
+			adapter->netdev->trans_start = jiffies;
+		}
+
 		return 0;
 	}
 
@@ -2365,6 +2625,46 @@
 	qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
 }
 
+static int
+qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
+{
+	int err;
+
+	err = qlcnic_can_start_firmware(adapter);
+	if (err)
+		return err;
+
+	qlcnic_check_options(adapter);
+
+	adapter->need_fw_reset = 0;
+
+	return err;
+}
+
+static int
+qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter)
+{
+	return -EOPNOTSUPP;
+}
+
+static void
+qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter)
+{
+	return;
+}
+
 static ssize_t
 qlcnic_store_bridged_mode(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
@@ -2376,13 +2676,13 @@
 	if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
 		goto err_out;
 
-	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
 		goto err_out;
 
 	if (strict_strtoul(buf, 2, &new))
 		goto err_out;
 
-	if (!qlcnic_config_bridged_mode(adapter, !!new))
+	if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
 		ret = len;
 
 err_out:
@@ -2585,6 +2885,364 @@
 	.write = qlcnic_sysfs_write_mem,
 };
 
+int
+validate_pm_config(struct qlcnic_adapter *adapter,
+			struct qlcnic_pm_func_cfg *pm_cfg, int count)
+{
+
+	u8 src_pci_func, s_esw_id, d_esw_id;
+	u8 dest_pci_func;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		src_pci_func = pm_cfg[i].pci_func;
+		dest_pci_func = pm_cfg[i].dest_npar;
+		if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
+				|| dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (!IS_VALID_MODE(pm_cfg[i].action))
+			return QL_STATUS_INVALID_PARAM;
+
+		s_esw_id = adapter->npars[src_pci_func].phy_port;
+		d_esw_id = adapter->npars[dest_pci_func].phy_port;
+
+		if (s_esw_id != d_esw_id)
+			return QL_STATUS_INVALID_PARAM;
+
+	}
+	return 0;
+
+}
+
+static ssize_t
+qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_pm_func_cfg *pm_cfg;
+	u32 id, action, pci_func;
+	int count, rem, i, ret;
+
+	count	= size / sizeof(struct qlcnic_pm_func_cfg);
+	rem	= size % sizeof(struct qlcnic_pm_func_cfg);
+	if (rem)
+		return QL_STATUS_INVALID_PARAM;
+
+	pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
+
+	ret = validate_pm_config(adapter, pm_cfg, count);
+	if (ret)
+		return ret;
+	for (i = 0; i < count; i++) {
+		pci_func = pm_cfg[i].pci_func;
+		action = pm_cfg[i].action;
+		id = adapter->npars[pci_func].phy_port;
+		ret = qlcnic_config_port_mirroring(adapter, id,
+						action, pci_func);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < count; i++) {
+		pci_func = pm_cfg[i].pci_func;
+		id = adapter->npars[pci_func].phy_port;
+		adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
+		adapter->npars[pci_func].dest_npar = id;
+	}
+	return size;
+}
+
+static ssize_t
+qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
+	int i;
+
+	if (size != sizeof(pm_cfg))
+		return QL_STATUS_INVALID_PARAM;
+
+	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+			continue;
+		pm_cfg[i].action = adapter->npars[i].enable_pm;
+		pm_cfg[i].dest_npar = 0;
+		pm_cfg[i].pci_func = i;
+	}
+	memcpy(buf, &pm_cfg, size);
+
+	return size;
+}
+
+int
+validate_esw_config(struct qlcnic_adapter *adapter,
+			struct qlcnic_esw_func_cfg *esw_cfg, int count)
+{
+	u8 pci_func;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		pci_func = esw_cfg[i].pci_func;
+		if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (esw_cfg->host_vlan_tag == 1)
+			if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
+				return QL_STATUS_INVALID_PARAM;
+
+		if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
+				|| !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
+				|| !IS_VALID_MODE(esw_cfg[i].mac_learning)
+				|| !IS_VALID_MODE(esw_cfg[i].discard_tagged))
+			return QL_STATUS_INVALID_PARAM;
+	}
+
+	return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_esw_func_cfg *esw_cfg;
+	u8 id, discard_tagged, promsc_mode, mac_learn;
+	u8 vlan_tagging, pci_func, vlan_id;
+	int count, rem, i, ret;
+
+	count	= size / sizeof(struct qlcnic_esw_func_cfg);
+	rem	= size % sizeof(struct qlcnic_esw_func_cfg);
+	if (rem)
+		return QL_STATUS_INVALID_PARAM;
+
+	esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
+	ret = validate_esw_config(adapter, esw_cfg, count);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < count; i++) {
+		pci_func = esw_cfg[i].pci_func;
+		id = adapter->npars[pci_func].phy_port;
+		vlan_tagging = esw_cfg[i].host_vlan_tag;
+		promsc_mode = esw_cfg[i].promisc_mode;
+		mac_learn = esw_cfg[i].mac_learning;
+		vlan_id	= esw_cfg[i].vlan_id;
+		discard_tagged = esw_cfg[i].discard_tagged;
+		ret = qlcnic_config_switch_port(adapter, id, vlan_tagging,
+						discard_tagged,
+						promsc_mode,
+						mac_learn,
+						pci_func,
+						vlan_id);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < count; i++) {
+		pci_func = esw_cfg[i].pci_func;
+		adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
+		adapter->npars[pci_func].mac_learning =	esw_cfg[i].mac_learning;
+		adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
+		adapter->npars[pci_func].discard_tagged	=
+						esw_cfg[i].discard_tagged;
+		adapter->npars[pci_func].host_vlan_tag =
+						esw_cfg[i].host_vlan_tag;
+	}
+
+	return size;
+}
+
+static ssize_t
+qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+	int i;
+
+	if (size != sizeof(esw_cfg))
+		return QL_STATUS_INVALID_PARAM;
+
+	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+			continue;
+
+		esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
+		esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
+		esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
+		esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
+		esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
+	}
+	memcpy(buf, &esw_cfg, size);
+
+	return size;
+}
+
+int
+validate_npar_config(struct qlcnic_adapter *adapter,
+				struct qlcnic_npar_func_cfg *np_cfg, int count)
+{
+	u8 pci_func, i;
+
+	for (i = 0; i < count; i++) {
+		pci_func = np_cfg[i].pci_func;
+		if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+			return QL_STATUS_INVALID_PARAM;
+
+		if (!IS_VALID_BW(np_cfg[i].min_bw)
+				|| !IS_VALID_BW(np_cfg[i].max_bw)
+				|| !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
+				|| !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
+			return QL_STATUS_INVALID_PARAM;
+	}
+	return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_info nic_info;
+	struct qlcnic_npar_func_cfg *np_cfg;
+	int i, count, rem, ret;
+	u8 pci_func;
+
+	count	= size / sizeof(struct qlcnic_npar_func_cfg);
+	rem	= size % sizeof(struct qlcnic_npar_func_cfg);
+	if (rem)
+		return QL_STATUS_INVALID_PARAM;
+
+	np_cfg = (struct qlcnic_npar_func_cfg *) buf;
+	ret = validate_npar_config(adapter, np_cfg, count);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < count ; i++) {
+		pci_func = np_cfg[i].pci_func;
+		ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+		if (ret)
+			return ret;
+		nic_info.pci_func = pci_func;
+		nic_info.min_tx_bw = np_cfg[i].min_bw;
+		nic_info.max_tx_bw = np_cfg[i].max_bw;
+		ret = qlcnic_set_nic_info(adapter, &nic_info);
+		if (ret)
+			return ret;
+	}
+
+	return size;
+
+}
+static ssize_t
+qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_info nic_info;
+	struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+	int i, ret;
+
+	if (size != sizeof(np_cfg))
+		return QL_STATUS_INVALID_PARAM;
+
+	for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+		if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+			continue;
+		ret = qlcnic_get_nic_info(adapter, &nic_info, i);
+		if (ret)
+			return ret;
+
+		np_cfg[i].pci_func = i;
+		np_cfg[i].op_mode = nic_info.op_mode;
+		np_cfg[i].port_num = nic_info.phys_port;
+		np_cfg[i].fw_capab = nic_info.capabilities;
+		np_cfg[i].min_bw = nic_info.min_tx_bw ;
+		np_cfg[i].max_bw = nic_info.max_tx_bw;
+		np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
+		np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
+	}
+	memcpy(buf, &np_cfg, size);
+	return size;
+}
+
+static ssize_t
+qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+	struct qlcnic_pci_info	pci_info[QLCNIC_MAX_PCI_FUNC];
+	int i, ret;
+
+	if (size != sizeof(pci_cfg))
+		return QL_STATUS_INVALID_PARAM;
+
+	ret = qlcnic_get_pci_info(adapter, pci_info);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+		pci_cfg[i].pci_func = pci_info[i].id;
+		pci_cfg[i].func_type = pci_info[i].type;
+		pci_cfg[i].port_num = pci_info[i].default_port;
+		pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
+		pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
+		memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
+	}
+	memcpy(buf, &pci_cfg, size);
+	return size;
+
+}
+static struct bin_attribute bin_attr_npar_config = {
+	.attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_npar_config,
+	.write = qlcnic_sysfs_write_npar_config,
+};
+
+static struct bin_attribute bin_attr_pci_config = {
+	.attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_pci_config,
+	.write = NULL,
+};
+
+static struct bin_attribute bin_attr_esw_config = {
+	.attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_esw_config,
+	.write = qlcnic_sysfs_write_esw_config,
+};
+
+static struct bin_attribute bin_attr_pm_config = {
+	.attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_pm_config,
+	.write = qlcnic_sysfs_write_pm_config,
+};
+
 static void
 qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
 {
@@ -2616,6 +3274,18 @@
 		dev_info(dev, "failed to create crb sysfs entry\n");
 	if (device_create_bin_file(dev, &bin_attr_mem))
 		dev_info(dev, "failed to create mem sysfs entry\n");
+	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+			adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return;
+	if (device_create_bin_file(dev, &bin_attr_pci_config))
+		dev_info(dev, "failed to create pci config sysfs entry");
+	if (device_create_bin_file(dev, &bin_attr_npar_config))
+		dev_info(dev, "failed to create npar config sysfs entry");
+	if (device_create_bin_file(dev, &bin_attr_esw_config))
+		dev_info(dev, "failed to create esw config sysfs entry");
+	if (device_create_bin_file(dev, &bin_attr_pm_config))
+		dev_info(dev, "failed to create pm config sysfs entry");
+
 }
 
 
@@ -2627,6 +3297,13 @@
 	device_remove_file(dev, &dev_attr_diag_mode);
 	device_remove_bin_file(dev, &bin_attr_crb);
 	device_remove_bin_file(dev, &bin_attr_mem);
+	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+			adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return;
+	device_remove_bin_file(dev, &bin_attr_pci_config);
+	device_remove_bin_file(dev, &bin_attr_npar_config);
+	device_remove_bin_file(dev, &bin_attr_esw_config);
+	device_remove_bin_file(dev, &bin_attr_pm_config);
 }
 
 #ifdef CONFIG_INET
@@ -2684,7 +3361,7 @@
 	if (!adapter)
 		goto done;
 
-	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
 		goto done;
 
 	qlcnic_config_indev_addr(dev, event);
@@ -2720,7 +3397,7 @@
 	if (!adapter)
 		goto done;
 
-	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
 		goto done;
 
 	switch (event) {
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 20624ba..06b2188 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
  */
 #define DRV_NAME  	"qlge"
 #define DRV_STRING 	"QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION	"v1.00.00.23.00.00-01"
+#define DRV_VERSION	"v1.00.00.25.00.00-01"
 
 #define PFX "qlge: "
 
@@ -1062,7 +1062,7 @@
 #define TX_DESC_LEN_MASK	0x000fffff
 #define TX_DESC_C	0x40000000
 #define TX_DESC_E	0x80000000
-} __attribute((packed));
+} __packed;
 
 /*
  * IOCB Definitions...
@@ -1095,7 +1095,7 @@
 	__le16 vlan_tci;
 	__le16 reserved4;
 	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __attribute((packed));
+} __packed;
 
 struct ob_mac_iocb_rsp {
 	u8 opcode;		/* */
@@ -1112,7 +1112,7 @@
 	u32 tid;
 	u32 txq_idx;
 	__le32 reserved[13];
-} __attribute((packed));
+} __packed;
 
 struct ob_mac_tso_iocb_req {
 	u8 opcode;
@@ -1140,7 +1140,7 @@
 	__le16 vlan_tci;
 	__le16 mss;
 	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __attribute((packed));
+} __packed;
 
 struct ob_mac_tso_iocb_rsp {
 	u8 opcode;
@@ -1157,7 +1157,7 @@
 	u32 tid;
 	u32 txq_idx;
 	__le32 reserved2[13];
-} __attribute((packed));
+} __packed;
 
 struct ib_mac_iocb_rsp {
 	u8 opcode;		/* 0x20 */
@@ -1216,7 +1216,7 @@
 #define IB_MAC_IOCB_RSP_HL	0x80
 	__le32 hdr_len;		/* */
 	__le64 hdr_addr;	/* */
-} __attribute((packed));
+} __packed;
 
 struct ib_ae_iocb_rsp {
 	u8 opcode;
@@ -1237,7 +1237,7 @@
 #define PCI_ERR_ANON_BUF_RD        0x40
 	u8 q_id;
 	__le32 reserved[15];
-} __attribute((packed));
+} __packed;
 
 /*
  * These three structures are for generic
@@ -1249,7 +1249,7 @@
 	__le16 length;
 	__le32 tid;
 	__le32 reserved[14];
-} __attribute((packed));
+} __packed;
 
 struct net_req_iocb {
 	u8 opcode;
@@ -1257,7 +1257,7 @@
 	__le16 flags1;
 	__le32 tid;
 	__le32 reserved1[30];
-} __attribute((packed));
+} __packed;
 
 /*
  * tx ring initialization control block for chip.
@@ -1283,7 +1283,7 @@
 	__le16 rid;
 	__le64 addr;
 	__le64 cnsmr_idx_addr;
-} __attribute((packed));
+} __packed;
 
 /*
  * rx ring initialization control block for chip.
@@ -1317,7 +1317,7 @@
 	__le64 sbq_addr;
 	__le16 sbq_buf_size;
 	__le16 sbq_len;		/* entry count */
-} __attribute((packed));
+} __packed;
 
 struct ricb {
 	u8 base_cq;
@@ -1335,7 +1335,7 @@
 	u8 hash_cq_id[1024];
 	__le32 ipv6_hash_key[10];
 	__le32 ipv4_hash_key[4];
-} __attribute((packed));
+} __packed;
 
 /* SOFTWARE/DRIVER DATA STRUCTURES. */
 
@@ -2227,7 +2227,6 @@
 		u32 ram_addr, int word_count);
 int ql_core_dump(struct ql_adapter *qdev,
 		struct ql_mpi_coredump *mpi_coredump);
-int ql_mb_sys_err(struct ql_adapter *qdev);
 int ql_mb_about_fw(struct ql_adapter *qdev);
 int ql_wol(struct ql_adapter *qdev);
 int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -2246,6 +2245,7 @@
 void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
 int ql_own_firmware(struct ql_adapter *qdev);
 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
+void qlge_set_multicast_list(struct net_device *ndev);
 
 #if 1
 #define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 68a1c9b..548e901 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1237,12 +1237,7 @@
 			  "Force Coredump can only be done from interface that is up.\n");
 		return;
 	}
-
-	if (ql_mb_sys_err(qdev)) {
-		netif_err(qdev, ifup, qdev->ndev,
-			  "Fail force coredump with ql_mb_sys_err().\n");
-		return;
-	}
+	ql_queue_fw_error(qdev);
 }
 
 void ql_gen_reg_dump(struct ql_adapter *qdev,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index d10bcef..8d63f69 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -574,6 +574,22 @@
 			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
 			break;
 		}
+	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
+		{
+			value = RT_IDX_DST_DFLT_Q | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(RT_IDX_IP_CSUM_ERR_SLOT <<
+				RT_IDX_IDX_SHIFT); /* index */
+			break;
+		}
+	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
+		{
+			value = RT_IDX_DST_DFLT_Q | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
+				RT_IDX_IDX_SHIFT); /* index */
+			break;
+		}
 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
 		{
 			value = RT_IDX_DST_DFLT_Q |	/* dest */
@@ -1521,7 +1537,7 @@
 
 	/* Frame error, so drop the packet. */
 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
-		netif_err(qdev, drv, qdev->ndev,
+		netif_info(qdev, drv, qdev->ndev,
 			  "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
 		rx_ring->rx_errors++;
 		goto err_out;
@@ -1618,7 +1634,7 @@
 
 	/* Frame error, so drop the packet. */
 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
-		netif_err(qdev, drv, qdev->ndev,
+		netif_info(qdev, drv, qdev->ndev,
 			  "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
 		dev_kfree_skb_any(skb);
 		rx_ring->rx_errors++;
@@ -1677,7 +1693,7 @@
 			/* Unfragmented ipv4 UDP frame. */
 			struct iphdr *iph = (struct iphdr *) skb->data;
 			if (!(iph->frag_off &
-				cpu_to_be16(IP_MF|IP_OFFSET))) {
+				ntohs(IP_MF|IP_OFFSET))) {
 				skb->ip_summed = CHECKSUM_UNNECESSARY;
 				netif_printk(qdev, rx_status, KERN_DEBUG,
 					     qdev->ndev,
@@ -1939,7 +1955,7 @@
 
 	/* Frame error, so drop the packet. */
 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
-		netif_err(qdev, drv, qdev->ndev,
+		netif_info(qdev, drv, qdev->ndev,
 			  "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
 		dev_kfree_skb_any(skb);
 		rx_ring->rx_errors++;
@@ -1997,7 +2013,7 @@
 		/* Unfragmented ipv4 UDP frame. */
 			struct iphdr *iph = (struct iphdr *) skb->data;
 			if (!(iph->frag_off &
-				cpu_to_be16(IP_MF|IP_OFFSET))) {
+				ntohs(IP_MF|IP_OFFSET))) {
 				skb->ip_summed = CHECKSUM_UNNECESSARY;
 				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 					     "TCP checksum done!\n");
@@ -3587,10 +3603,20 @@
 	if (status)
 		return status;
 
-	status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
+	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
+						RT_IDX_IP_CSUM_ERR, 1);
 	if (status) {
 		netif_err(qdev, ifup, qdev->ndev,
-			  "Failed to init routing register for error packets.\n");
+			"Failed to init routing register "
+			"for IP CSUM error packets.\n");
+		goto exit;
+	}
+	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
+						RT_IDX_TU_CSUM_ERR, 1);
+	if (status) {
+		netif_err(qdev, ifup, qdev->ndev,
+			"Failed to init routing register "
+			"for TCP/UDP CSUM error packets.\n");
 		goto exit;
 	}
 	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
@@ -3919,6 +3945,11 @@
 	if ((ql_read32(qdev, STS) & qdev->port_init) &&
 			(ql_read32(qdev, STS) & qdev->port_link_up))
 		ql_link_on(qdev);
+	/* Restore rx mode. */
+	clear_bit(QL_ALLMULTI, &qdev->flags);
+	clear_bit(QL_PROMISCUOUS, &qdev->flags);
+	qlge_set_multicast_list(qdev->ndev);
+
 	ql_enable_interrupts(qdev);
 	ql_enable_all_completion_interrupts(qdev);
 	netif_tx_start_all_queues(qdev->ndev);
@@ -4204,7 +4235,7 @@
 	return &ndev->stats;
 }
 
-static void qlge_set_multicast_list(struct net_device *ndev)
+void qlge_set_multicast_list(struct net_device *ndev)
 {
 	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
 	struct netdev_hw_addr *ha;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 3c00462..f84e857 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -606,23 +606,6 @@
 	return status;
 }
 
-int ql_mb_sys_err(struct ql_adapter *qdev)
-{
-	struct mbox_params mbc;
-	struct mbox_params *mbcp = &mbc;
-	int status;
-
-	memset(mbcp, 0, sizeof(struct mbox_params));
-
-	mbcp->in_count = 1;
-	mbcp->out_count = 0;
-
-	mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
-
-	status = ql_mailbox_command(qdev, mbcp);
-	return status;
-}
-
 /* Get MPI firmware version. This will be used for
  * driver banner and for ethtool info.
  * Returns zero on success.
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 9a251ac..7d482a2 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -44,12 +44,13 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/uaccess.h>
+#include <linux/phy.h>
 
 #include <asm/processor.h>
 
 #define DRV_NAME	"r6040"
-#define DRV_VERSION	"0.25"
-#define DRV_RELDATE	"20Aug2009"
+#define DRV_VERSION	"0.26"
+#define DRV_RELDATE	"30May2010"
 
 /* PHY CHIP Address */
 #define PHY1_ADDR	1	/* For MAC1 */
@@ -179,7 +180,6 @@
 
 struct r6040_private {
 	spinlock_t lock;		/* driver lock */
-	struct timer_list timer;
 	struct pci_dev *pdev;
 	struct r6040_descriptor *rx_insert_ptr;
 	struct r6040_descriptor *rx_remove_ptr;
@@ -189,13 +189,15 @@
 	struct r6040_descriptor *tx_ring;
 	dma_addr_t rx_ring_dma;
 	dma_addr_t tx_ring_dma;
-	u16	tx_free_desc, phy_addr, phy_mode;
+	u16	tx_free_desc, phy_addr;
 	u16	mcr0, mcr1;
-	u16	switch_sig;
 	struct net_device *dev;
-	struct mii_if_info mii_if;
+	struct mii_bus *mii_bus;
 	struct napi_struct napi;
 	void __iomem *base;
+	struct phy_device *phydev;
+	int old_link;
+	int old_duplex;
 };
 
 static char version[] __devinitdata = KERN_INFO DRV_NAME
@@ -238,20 +240,30 @@
 	}
 }
 
-static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg)
+static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
 {
+	struct net_device *dev = bus->priv;
 	struct r6040_private *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
 
-	return (r6040_phy_read(ioaddr, lp->phy_addr, reg));
+	return r6040_phy_read(ioaddr, phy_addr, reg);
 }
 
-static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val)
+static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
+						int reg, u16 value)
 {
+	struct net_device *dev = bus->priv;
 	struct r6040_private *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
 
-	r6040_phy_write(ioaddr, lp->phy_addr, reg, val);
+	r6040_phy_write(ioaddr, phy_addr, reg, value);
+
+	return 0;
+}
+
+static int r6040_mdiobus_reset(struct mii_bus *bus)
+{
+	return 0;
 }
 
 static void r6040_free_txbufs(struct net_device *dev)
@@ -408,10 +420,9 @@
 	void __iomem *ioaddr = priv->base;
 
 	netdev_warn(dev, "transmit timed out, int enable %4.4x "
-		"status %4.4x, PHY status %4.4x\n",
+		"status %4.4x\n",
 		ioread16(ioaddr + MIER),
-		ioread16(ioaddr + MISR),
-		r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
+		ioread16(ioaddr + MISR));
 
 	dev->stats.tx_errors++;
 
@@ -463,9 +474,6 @@
 	struct r6040_private *lp = netdev_priv(dev);
 	struct pci_dev *pdev = lp->pdev;
 
-	/* deleted timer */
-	del_timer_sync(&lp->timer);
-
 	spin_lock_irq(&lp->lock);
 	napi_disable(&lp->napi);
 	netif_stop_queue(dev);
@@ -495,64 +503,14 @@
 	return 0;
 }
 
-/* Status of PHY CHIP */
-static int r6040_phy_mode_chk(struct net_device *dev)
-{
-	struct r6040_private *lp = netdev_priv(dev);
-	void __iomem *ioaddr = lp->base;
-	int phy_dat;
-
-	/* PHY Link Status Check */
-	phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
-	if (!(phy_dat & 0x4))
-		phy_dat = 0x8000;	/* Link Failed, full duplex */
-
-	/* PHY Chip Auto-Negotiation Status */
-	phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
-	if (phy_dat & 0x0020) {
-		/* Auto Negotiation Mode */
-		phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5);
-		phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4);
-		if (phy_dat & 0x140)
-			/* Force full duplex */
-			phy_dat = 0x8000;
-		else
-			phy_dat = 0;
-	} else {
-		/* Force Mode */
-		phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0);
-		if (phy_dat & 0x100)
-			phy_dat = 0x8000;
-		else
-			phy_dat = 0x0000;
-	}
-
-	return phy_dat;
-};
-
-static void r6040_set_carrier(struct mii_if_info *mii)
-{
-	if (r6040_phy_mode_chk(mii->dev)) {
-		/* autoneg is off: Link is always assumed to be up */
-		if (!netif_carrier_ok(mii->dev))
-			netif_carrier_on(mii->dev);
-	} else
-		r6040_phy_mode_chk(mii->dev);
-}
-
 static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct r6040_private *lp = netdev_priv(dev);
-	struct mii_ioctl_data *data = if_mii(rq);
-	int rc;
 
-	if (!netif_running(dev))
+	if (!lp->phydev)
 		return -EINVAL;
-	spin_lock_irq(&lp->lock);
-	rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
-	spin_unlock_irq(&lp->lock);
-	r6040_set_carrier(&lp->mii_if);
-	return rc;
+
+	return phy_mii_ioctl(lp->phydev, if_mii(rq), cmd);
 }
 
 static int r6040_rx(struct net_device *dev, int limit)
@@ -751,26 +709,6 @@
 	if (ret)
 		return ret;
 
-	/* Read the PHY ID */
-	lp->switch_sig = r6040_phy_read(ioaddr, 0, 2);
-
-	if (lp->switch_sig  == ICPLUS_PHY_ID) {
-		r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */
-		lp->phy_mode = 0x8000;
-	} else {
-		/* PHY Mode Check */
-		r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP);
-		r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE);
-
-		if (PHY_MODE == 0x3100)
-			lp->phy_mode = r6040_phy_mode_chk(dev);
-		else
-			lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
-	}
-
-	/* Set duplex mode */
-	lp->mcr0 |= lp->phy_mode;
-
 	/* improve performance (by RDC guys) */
 	r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
 	r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
@@ -783,35 +721,6 @@
 	return 0;
 }
 
-/*
-  A periodic timer routine
-	Polling PHY Chip Link Status
-*/
-static void r6040_timer(unsigned long data)
-{
-	struct net_device *dev = (struct net_device *)data;
-	struct r6040_private *lp = netdev_priv(dev);
-	void __iomem *ioaddr = lp->base;
-	u16 phy_mode;
-
-	/* Polling PHY Chip Status */
-	if (PHY_MODE == 0x3100)
-		phy_mode = r6040_phy_mode_chk(dev);
-	else
-		phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
-
-	if (phy_mode != lp->phy_mode) {
-		lp->phy_mode = phy_mode;
-		lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode;
-		iowrite16(lp->mcr0, ioaddr);
-	}
-
-	/* Timer active again */
-	mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
-
-	/* Check media */
-	mii_check_media(&lp->mii_if, 1, 1);
-}
 
 /* Read/set MAC address routines */
 static void r6040_mac_address(struct net_device *dev)
@@ -873,10 +782,6 @@
 	napi_enable(&lp->napi);
 	netif_start_queue(dev);
 
-	/* set and active a timer process */
-	setup_timer(&lp->timer, r6040_timer, (unsigned long) dev);
-	if (lp->switch_sig != ICPLUS_PHY_ID)
-		mod_timer(&lp->timer, jiffies + HZ);
 	return 0;
 }
 
@@ -1015,40 +920,22 @@
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct r6040_private *rp = netdev_priv(dev);
-	int rc;
 
-	spin_lock_irq(&rp->lock);
-	rc = mii_ethtool_gset(&rp->mii_if, cmd);
-	spin_unlock_irq(&rp->lock);
-
-	return rc;
+	return  phy_ethtool_gset(rp->phydev, cmd);
 }
 
 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct r6040_private *rp = netdev_priv(dev);
-	int rc;
 
-	spin_lock_irq(&rp->lock);
-	rc = mii_ethtool_sset(&rp->mii_if, cmd);
-	spin_unlock_irq(&rp->lock);
-	r6040_set_carrier(&rp->mii_if);
-
-	return rc;
-}
-
-static u32 netdev_get_link(struct net_device *dev)
-{
-	struct r6040_private *rp = netdev_priv(dev);
-
-	return mii_link_ok(&rp->mii_if);
+	return phy_ethtool_sset(rp->phydev, cmd);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
 	.get_drvinfo		= netdev_get_drvinfo,
 	.get_settings		= netdev_get_settings,
 	.set_settings		= netdev_set_settings,
-	.get_link		= netdev_get_link,
+	.get_link		= ethtool_op_get_link,
 };
 
 static const struct net_device_ops r6040_netdev_ops = {
@@ -1067,6 +954,79 @@
 #endif
 };
 
+static void r6040_adjust_link(struct net_device *dev)
+{
+	struct r6040_private *lp = netdev_priv(dev);
+	struct phy_device *phydev = lp->phydev;
+	int status_changed = 0;
+	void __iomem *ioaddr = lp->base;
+
+	BUG_ON(!phydev);
+
+	if (lp->old_link != phydev->link) {
+		status_changed = 1;
+		lp->old_link = phydev->link;
+	}
+
+	/* reflect duplex change */
+	if (phydev->link && (lp->old_duplex != phydev->duplex)) {
+		lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
+		iowrite16(lp->mcr0, ioaddr);
+
+		status_changed = 1;
+		lp->old_duplex = phydev->duplex;
+	}
+
+	if (status_changed) {
+		pr_info("%s: link %s", dev->name, phydev->link ?
+			"UP" : "DOWN");
+		if (phydev->link)
+			pr_cont(" - %d/%s", phydev->speed,
+			DUPLEX_FULL == phydev->duplex ? "full" : "half");
+		pr_cont("\n");
+	}
+}
+
+static int r6040_mii_probe(struct net_device *dev)
+{
+	struct r6040_private *lp = netdev_priv(dev);
+	struct phy_device *phydev = NULL;
+
+	phydev = phy_find_first(lp->mii_bus);
+	if (!phydev) {
+		dev_err(&lp->pdev->dev, "no PHY found\n");
+		return -ENODEV;
+	}
+
+	phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
+				0, PHY_INTERFACE_MODE_MII);
+
+	if (IS_ERR(phydev)) {
+		dev_err(&lp->pdev->dev, "could not attach to PHY\n");
+		return PTR_ERR(phydev);
+	}
+
+	/* mask with MAC supported features */
+	phydev->supported &= (SUPPORTED_10baseT_Half
+				| SUPPORTED_10baseT_Full
+				| SUPPORTED_100baseT_Half
+				| SUPPORTED_100baseT_Full
+				| SUPPORTED_Autoneg
+				| SUPPORTED_MII
+				| SUPPORTED_TP);
+
+	phydev->advertising = phydev->supported;
+	lp->phydev = phydev;
+	lp->old_link = 0;
+	lp->old_duplex = -1;
+
+	dev_info(&lp->pdev->dev, "attached PHY driver [%s] "
+		"(mii_bus:phy_addr=%s)\n",
+		phydev->drv->name, dev_name(&phydev->dev));
+
+	return 0;
+}
+
 static int __devinit r6040_init_one(struct pci_dev *pdev,
 					 const struct pci_device_id *ent)
 {
@@ -1077,6 +1037,7 @@
 	static int card_idx = -1;
 	int bar = 0;
 	u16 *adrp;
+	int i;
 
 	printk("%s\n", version);
 
@@ -1163,7 +1124,6 @@
 	/* Init RDC private data */
 	lp->mcr0 = 0x1002;
 	lp->phy_addr = phy_table[card_idx];
-	lp->switch_sig = 0;
 
 	/* The RDC-specific entries in the device structure. */
 	dev->netdev_ops = &r6040_netdev_ops;
@@ -1171,28 +1131,54 @@
 	dev->watchdog_timeo = TX_TIMEOUT;
 
 	netif_napi_add(dev, &lp->napi, r6040_poll, 64);
-	lp->mii_if.dev = dev;
-	lp->mii_if.mdio_read = r6040_mdio_read;
-	lp->mii_if.mdio_write = r6040_mdio_write;
-	lp->mii_if.phy_id = lp->phy_addr;
-	lp->mii_if.phy_id_mask = 0x1f;
-	lp->mii_if.reg_num_mask = 0x1f;
 
-	/* Check the vendor ID on the PHY, if 0xffff assume none attached */
-	if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
-		dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
-		err = -ENODEV;
+	lp->mii_bus = mdiobus_alloc();
+	if (!lp->mii_bus) {
+		dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
 		goto err_out_unmap;
 	}
 
+	lp->mii_bus->priv = dev;
+	lp->mii_bus->read = r6040_mdiobus_read;
+	lp->mii_bus->write = r6040_mdiobus_write;
+	lp->mii_bus->reset = r6040_mdiobus_reset;
+	lp->mii_bus->name = "r6040_eth_mii";
+	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
+	lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+	if (!lp->mii_bus->irq) {
+		dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
+		goto err_out_mdio;
+	}
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		lp->mii_bus->irq[i] = PHY_POLL;
+
+	err = mdiobus_register(lp->mii_bus);
+	if (err) {
+		dev_err(&pdev->dev, "failed to register MII bus\n");
+		goto err_out_mdio_irq;
+	}
+
+	err = r6040_mii_probe(dev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to probe MII bus\n");
+		goto err_out_mdio_unregister;
+	}
+
 	/* Register net device. After this dev->name assign */
 	err = register_netdev(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Failed to register net device\n");
-		goto err_out_unmap;
+		goto err_out_mdio_unregister;
 	}
 	return 0;
 
+err_out_mdio_unregister:
+	mdiobus_unregister(lp->mii_bus);
+err_out_mdio_irq:
+	kfree(lp->mii_bus->irq);
+err_out_mdio:
+	mdiobus_free(lp->mii_bus);
 err_out_unmap:
 	pci_iounmap(pdev, ioaddr);
 err_out_free_res:
@@ -1206,8 +1192,12 @@
 static void __devexit r6040_remove_one(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
+	struct r6040_private *lp = netdev_priv(dev);
 
 	unregister_netdev(dev);
+	mdiobus_unregister(lp->mii_bus);
+	kfree(lp->mii_bus->irq);
+	mdiobus_free(lp->mii_bus);
 	pci_release_regions(pdev);
 	free_netdev(dev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 96b6cfb..239d7ef 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -88,7 +88,7 @@
 #define RTL_W32(reg, val32)	writel ((val32), ioaddr + (reg))
 #define RTL_R8(reg)		readb (ioaddr + (reg))
 #define RTL_R16(reg)		readw (ioaddr + (reg))
-#define RTL_R32(reg)		((unsigned long) readl (ioaddr + (reg)))
+#define RTL_R32(reg)		readl (ioaddr + (reg))
 
 enum mac_version {
 	RTL_GIGA_MAC_NONE   = 0x00,
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 1d37f0c..d0af924 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -38,7 +38,7 @@
  * Tx descriptors that can be associated with each corresponding FIFO.
  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
  *     2(MSI_X). Default value is '2(MSI_X)'
- * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
+ * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
  *     Possible values '1' for enable '0' for disable. Default is '0'
  * lro_max_pkts: This parameter defines maximum number of packets can be
  *     aggregated as a single large packet
@@ -90,7 +90,7 @@
 #include "s2io.h"
 #include "s2io-regs.h"
 
-#define DRV_VERSION "2.0.26.25"
+#define DRV_VERSION "2.0.26.26"
 
 /* S2io Driver name & version. */
 static char s2io_driver_name[] = "Neterion";
@@ -496,7 +496,7 @@
 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
 S2IO_PARM_INT(intr_type, 2);
 /* Large receive offload feature */
-static unsigned int lro_enable;
+static unsigned int lro_enable = 1;
 module_param_named(lro, lro_enable, uint, 0);
 
 /* Max pkts to be aggregated by LRO at one time. If not specified,
@@ -795,7 +795,6 @@
 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
 		ring->nic = nic;
 		ring->ring_no = i;
-		ring->lro = lro_enable;
 
 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
 		/*  Allocating all the Rx blocks */
@@ -6707,6 +6706,7 @@
 {
 	return (dev->features & NETIF_F_TSO) != 0;
 }
+
 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
 {
 	if (data)
@@ -6717,6 +6717,42 @@
 	return 0;
 }
 
+static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
+{
+	struct s2io_nic *sp = netdev_priv(dev);
+	int rc = 0;
+	int changed = 0;
+
+	if (data & ~ETH_FLAG_LRO)
+		return -EINVAL;
+
+	if (data & ETH_FLAG_LRO) {
+		if (lro_enable) {
+			if (!(dev->features & NETIF_F_LRO)) {
+				dev->features |= NETIF_F_LRO;
+				changed = 1;
+			}
+		} else
+			rc = -EINVAL;
+	} else if (dev->features & NETIF_F_LRO) {
+		dev->features &= ~NETIF_F_LRO;
+		changed = 1;
+	}
+
+	if (changed && netif_running(dev)) {
+		s2io_stop_all_tx_queue(sp);
+		s2io_card_down(sp);
+		sp->lro = !!(dev->features & NETIF_F_LRO);
+		rc = s2io_card_up(sp);
+		if (rc)
+			s2io_reset(sp);
+		else
+			s2io_start_all_tx_queue(sp);
+	}
+
+	return rc;
+}
+
 static const struct ethtool_ops netdev_ethtool_ops = {
 	.get_settings = s2io_ethtool_gset,
 	.set_settings = s2io_ethtool_sset,
@@ -6733,6 +6769,8 @@
 	.get_rx_csum = s2io_ethtool_get_rx_csum,
 	.set_rx_csum = s2io_ethtool_set_rx_csum,
 	.set_tx_csum = s2io_ethtool_op_set_tx_csum,
+	.set_flags = s2io_ethtool_set_flags,
+	.get_flags = ethtool_op_get_flags,
 	.set_sg = ethtool_op_set_sg,
 	.get_tso = s2io_ethtool_op_get_tso,
 	.set_tso = s2io_ethtool_op_set_tso,
@@ -7261,6 +7299,7 @@
 		struct ring_info *ring = &mac_control->rings[i];
 
 		ring->mtu = dev->mtu;
+		ring->lro = sp->lro;
 		ret = fill_rx_buffers(sp, ring, 1);
 		if (ret) {
 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
@@ -8001,7 +8040,8 @@
 	dev->netdev_ops = &s2io_netdev_ops;
 	SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-
+	if (lro_enable)
+		dev->features |= NETIF_F_LRO;
 	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
 	if (sp->high_dma_flag == true)
 		dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 1564605..ba674c5 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -27,6 +27,7 @@
 #include "nic.h"
 
 #include "mcdi.h"
+#include "workarounds.h"
 
 /**************************************************************************
  *
@@ -92,13 +93,6 @@
 
 #define EFX_MAX_MTU (9 * 1024)
 
-/* RX slow fill workqueue. If memory allocation fails in the fast path,
- * a work item is pushed onto this work queue to retry the allocation later,
- * to avoid the NIC being starved of RX buffers. Since this is a per cpu
- * workqueue, there is nothing to be gained in making it per NIC
- */
-static struct workqueue_struct *refill_workqueue;
-
 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
  * queued onto this work queue. This is not a per-nic work queue, because
  * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -195,6 +189,13 @@
 MODULE_PARM_DESC(irq_adapt_high_thresh,
 		 "Threshold score for increasing IRQ moderation");
 
+static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
 /**************************************************************************
  *
  * Utility functions and prototypes
@@ -278,16 +279,16 @@
 {
 	struct efx_channel *channel =
 		container_of(napi, struct efx_channel, napi_str);
+	struct efx_nic *efx = channel->efx;
 	int spent;
 
-	EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
-		  channel->channel, raw_smp_processor_id());
+	netif_vdbg(efx, intr, efx->net_dev,
+		   "channel %d NAPI poll executing on CPU %d\n",
+		   channel->channel, raw_smp_processor_id());
 
 	spent = efx_process_channel(channel, budget);
 
 	if (spent < budget) {
-		struct efx_nic *efx = channel->efx;
-
 		if (channel->channel < efx->n_rx_channels &&
 		    efx->irq_rx_adaptive &&
 		    unlikely(++channel->irq_count == 1000)) {
@@ -363,7 +364,8 @@
  */
 static int efx_probe_eventq(struct efx_channel *channel)
 {
-	EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
+	netif_dbg(channel->efx, probe, channel->efx->net_dev,
+		  "chan %d create event queue\n", channel->channel);
 
 	return efx_nic_probe_eventq(channel);
 }
@@ -371,7 +373,8 @@
 /* Prepare channel's event queue */
 static void efx_init_eventq(struct efx_channel *channel)
 {
-	EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
+	netif_dbg(channel->efx, drv, channel->efx->net_dev,
+		  "chan %d init event queue\n", channel->channel);
 
 	channel->eventq_read_ptr = 0;
 
@@ -380,14 +383,16 @@
 
 static void efx_fini_eventq(struct efx_channel *channel)
 {
-	EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
+	netif_dbg(channel->efx, drv, channel->efx->net_dev,
+		  "chan %d fini event queue\n", channel->channel);
 
 	efx_nic_fini_eventq(channel);
 }
 
 static void efx_remove_eventq(struct efx_channel *channel)
 {
-	EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
+	netif_dbg(channel->efx, drv, channel->efx->net_dev,
+		  "chan %d remove event queue\n", channel->channel);
 
 	efx_nic_remove_eventq(channel);
 }
@@ -404,7 +409,8 @@
 	struct efx_rx_queue *rx_queue;
 	int rc;
 
-	EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
+	netif_dbg(channel->efx, probe, channel->efx->net_dev,
+		  "creating channel %d\n", channel->channel);
 
 	rc = efx_probe_eventq(channel);
 	if (rc)
@@ -474,12 +480,15 @@
 	 */
 	efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
 			      EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+			      efx->type->rx_buffer_hash_size +
 			      efx->type->rx_buffer_padding);
-	efx->rx_buffer_order = get_order(efx->rx_buffer_len);
+	efx->rx_buffer_order = get_order(efx->rx_buffer_len +
+					 sizeof(struct efx_rx_page_state));
 
 	/* Initialise the channels */
 	efx_for_each_channel(channel, efx) {
-		EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
+		netif_dbg(channel->efx, drv, channel->efx->net_dev,
+			  "init chan %d\n", channel->channel);
 
 		efx_init_eventq(channel);
 
@@ -506,7 +515,8 @@
 {
 	struct efx_rx_queue *rx_queue;
 
-	EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
+	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+		  "starting chan %d\n", channel->channel);
 
 	/* The interrupt handler for this channel may set work_pending
 	 * as soon as we enable it.  Make sure it's cleared before
@@ -515,11 +525,11 @@
 	channel->enabled = true;
 	smp_wmb();
 
-	napi_enable(&channel->napi_str);
-
-	/* Load up RX descriptors */
+	/* Fill the queues before enabling NAPI */
 	efx_for_each_channel_rx_queue(rx_queue, channel)
 		efx_fast_push_rx_descriptors(rx_queue);
+
+	napi_enable(&channel->napi_str);
 }
 
 /* This disables event queue processing and packet transmission.
@@ -528,21 +538,14 @@
  */
 static void efx_stop_channel(struct efx_channel *channel)
 {
-	struct efx_rx_queue *rx_queue;
-
 	if (!channel->enabled)
 		return;
 
-	EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
+	netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
+		  "stop chan %d\n", channel->channel);
 
 	channel->enabled = false;
 	napi_disable(&channel->napi_str);
-
-	/* Ensure that any worker threads have exited or will be no-ops */
-	efx_for_each_channel_rx_queue(rx_queue, channel) {
-		spin_lock_bh(&rx_queue->add_lock);
-		spin_unlock_bh(&rx_queue->add_lock);
-	}
 }
 
 static void efx_fini_channels(struct efx_nic *efx)
@@ -556,13 +559,24 @@
 	BUG_ON(efx->port_enabled);
 
 	rc = efx_nic_flush_queues(efx);
-	if (rc)
-		EFX_ERR(efx, "failed to flush queues\n");
-	else
-		EFX_LOG(efx, "successfully flushed all queues\n");
+	if (rc && EFX_WORKAROUND_7803(efx)) {
+		/* Schedule a reset to recover from the flush failure. The
+		 * descriptor caches reference memory we're about to free,
+		 * but falcon_reconfigure_mac_wrapper() won't reconnect
+		 * the MACs because of the pending reset. */
+		netif_err(efx, drv, efx->net_dev,
+			  "Resetting to recover from flush failure\n");
+		efx_schedule_reset(efx, RESET_TYPE_ALL);
+	} else if (rc) {
+		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+	} else {
+		netif_dbg(efx, drv, efx->net_dev,
+			  "successfully flushed all queues\n");
+	}
 
 	efx_for_each_channel(channel, efx) {
-		EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
+		netif_dbg(channel->efx, drv, channel->efx->net_dev,
+			  "shut down chan %d\n", channel->channel);
 
 		efx_for_each_channel_rx_queue(rx_queue, channel)
 			efx_fini_rx_queue(rx_queue);
@@ -577,7 +591,8 @@
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
 
-	EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
+	netif_dbg(channel->efx, drv, channel->efx->net_dev,
+		  "destroy chan %d\n", channel->channel);
 
 	efx_for_each_channel_rx_queue(rx_queue, channel)
 		efx_remove_rx_queue(rx_queue);
@@ -586,9 +601,9 @@
 	efx_remove_eventq(channel);
 }
 
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
 {
-	queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
+	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
 }
 
 /**************************************************************************
@@ -628,12 +643,13 @@
 
 	/* Status message for kernel log */
 	if (link_state->up) {
-		EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
-			 link_state->speed, link_state->fd ? "full" : "half",
-			 efx->net_dev->mtu,
-			 (efx->promiscuous ? " [PROMISC]" : ""));
+		netif_info(efx, link, efx->net_dev,
+			   "link up at %uMbps %s-duplex (MTU %d)%s\n",
+			   link_state->speed, link_state->fd ? "full" : "half",
+			   efx->net_dev->mtu,
+			   (efx->promiscuous ? " [PROMISC]" : ""));
 	} else {
-		EFX_INFO(efx, "link down\n");
+		netif_info(efx, link, efx->net_dev, "link down\n");
 	}
 
 }
@@ -737,7 +753,7 @@
 {
 	int rc;
 
-	EFX_LOG(efx, "create port\n");
+	netif_dbg(efx, probe, efx->net_dev, "create port\n");
 
 	if (phy_flash_cfg)
 		efx->phy_mode = PHY_MODE_SPECIAL;
@@ -751,15 +767,16 @@
 	if (is_valid_ether_addr(efx->mac_address)) {
 		memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
 	} else {
-		EFX_ERR(efx, "invalid MAC address %pM\n",
-			efx->mac_address);
+		netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
+			  efx->mac_address);
 		if (!allow_bad_hwaddr) {
 			rc = -EINVAL;
 			goto err;
 		}
 		random_ether_addr(efx->net_dev->dev_addr);
-		EFX_INFO(efx, "using locally-generated MAC %pM\n",
-			 efx->net_dev->dev_addr);
+		netif_info(efx, probe, efx->net_dev,
+			   "using locally-generated MAC %pM\n",
+			   efx->net_dev->dev_addr);
 	}
 
 	return 0;
@@ -773,7 +790,7 @@
 {
 	int rc;
 
-	EFX_LOG(efx, "init port\n");
+	netif_dbg(efx, drv, efx->net_dev, "init port\n");
 
 	mutex_lock(&efx->mac_lock);
 
@@ -804,7 +821,7 @@
 
 static void efx_start_port(struct efx_nic *efx)
 {
-	EFX_LOG(efx, "start port\n");
+	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
 	BUG_ON(efx->port_enabled);
 
 	mutex_lock(&efx->mac_lock);
@@ -821,7 +838,7 @@
 /* Prevent efx_mac_work() and efx_monitor() from working */
 static void efx_stop_port(struct efx_nic *efx)
 {
-	EFX_LOG(efx, "stop port\n");
+	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
 
 	mutex_lock(&efx->mac_lock);
 	efx->port_enabled = false;
@@ -836,7 +853,7 @@
 
 static void efx_fini_port(struct efx_nic *efx)
 {
-	EFX_LOG(efx, "shut down port\n");
+	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
 
 	if (!efx->port_initialized)
 		return;
@@ -850,7 +867,7 @@
 
 static void efx_remove_port(struct efx_nic *efx)
 {
-	EFX_LOG(efx, "destroying port\n");
+	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
 
 	efx->type->remove_port(efx);
 }
@@ -868,11 +885,12 @@
 	dma_addr_t dma_mask = efx->type->max_dma_mask;
 	int rc;
 
-	EFX_LOG(efx, "initialising I/O\n");
+	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
 
 	rc = pci_enable_device(pci_dev);
 	if (rc) {
-		EFX_ERR(efx, "failed to enable PCI device\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "failed to enable PCI device\n");
 		goto fail1;
 	}
 
@@ -890,39 +908,45 @@
 		dma_mask >>= 1;
 	}
 	if (rc) {
-		EFX_ERR(efx, "could not find a suitable DMA mask\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "could not find a suitable DMA mask\n");
 		goto fail2;
 	}
-	EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
+	netif_dbg(efx, probe, efx->net_dev,
+		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
 	rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
 	if (rc) {
 		/* pci_set_consistent_dma_mask() is not *allowed* to
 		 * fail with a mask that pci_set_dma_mask() accepted,
 		 * but just in case...
 		 */
-		EFX_ERR(efx, "failed to set consistent DMA mask\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "failed to set consistent DMA mask\n");
 		goto fail2;
 	}
 
 	efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
 	rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
 	if (rc) {
-		EFX_ERR(efx, "request for memory BAR failed\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "request for memory BAR failed\n");
 		rc = -EIO;
 		goto fail3;
 	}
 	efx->membase = ioremap_nocache(efx->membase_phys,
 				       efx->type->mem_map_size);
 	if (!efx->membase) {
-		EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
-			(unsigned long long)efx->membase_phys,
-			efx->type->mem_map_size);
+		netif_err(efx, probe, efx->net_dev,
+			  "could not map memory BAR at %llx+%x\n",
+			  (unsigned long long)efx->membase_phys,
+			  efx->type->mem_map_size);
 		rc = -ENOMEM;
 		goto fail4;
 	}
-	EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
-		(unsigned long long)efx->membase_phys,
-		efx->type->mem_map_size, efx->membase);
+	netif_dbg(efx, probe, efx->net_dev,
+		  "memory BAR at %llx+%x (virtual %p)\n",
+		  (unsigned long long)efx->membase_phys,
+		  efx->type->mem_map_size, efx->membase);
 
 	return 0;
 
@@ -938,7 +962,7 @@
 
 static void efx_fini_io(struct efx_nic *efx)
 {
-	EFX_LOG(efx, "shutting down I/O\n");
+	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
 
 	if (efx->membase) {
 		iounmap(efx->membase);
@@ -1002,9 +1026,11 @@
 			xentries[i].entry = i;
 		rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
 		if (rc > 0) {
-			EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
-				" available (%d < %d).\n", rc, n_channels);
-			EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
+			netif_err(efx, drv, efx->net_dev,
+				  "WARNING: Insufficient MSI-X vectors"
+				  " available (%d < %d).\n", rc, n_channels);
+			netif_err(efx, drv, efx->net_dev,
+				  "WARNING: Performance may be reduced.\n");
 			EFX_BUG_ON_PARANOID(rc >= n_channels);
 			n_channels = rc;
 			rc = pci_enable_msix(efx->pci_dev, xentries,
@@ -1028,7 +1054,8 @@
 		} else {
 			/* Fall back to single channel MSI */
 			efx->interrupt_mode = EFX_INT_MODE_MSI;
-			EFX_ERR(efx, "could not enable MSI-X\n");
+			netif_err(efx, drv, efx->net_dev,
+				  "could not enable MSI-X\n");
 		}
 	}
 
@@ -1041,7 +1068,8 @@
 		if (rc == 0) {
 			efx->channel[0].irq = efx->pci_dev->irq;
 		} else {
-			EFX_ERR(efx, "could not enable MSI\n");
+			netif_err(efx, drv, efx->net_dev,
+				  "could not enable MSI\n");
 			efx->interrupt_mode = EFX_INT_MODE_LEGACY;
 		}
 	}
@@ -1093,9 +1121,10 @@
 
 static int efx_probe_nic(struct efx_nic *efx)
 {
+	size_t i;
 	int rc;
 
-	EFX_LOG(efx, "creating NIC\n");
+	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
 
 	/* Carry out hardware-type specific initialisation */
 	rc = efx->type->probe(efx);
@@ -1106,6 +1135,11 @@
 	 * in MSI-X interrupts. */
 	efx_probe_interrupts(efx);
 
+	if (efx->n_channels > 1)
+		get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
+	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+		efx->rx_indir_table[i] = i % efx->n_rx_channels;
+
 	efx_set_channels(efx);
 	efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
 
@@ -1117,7 +1151,7 @@
 
 static void efx_remove_nic(struct efx_nic *efx)
 {
-	EFX_LOG(efx, "destroying NIC\n");
+	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
 
 	efx_remove_interrupts(efx);
 	efx->type->remove(efx);
@@ -1137,14 +1171,14 @@
 	/* Create NIC */
 	rc = efx_probe_nic(efx);
 	if (rc) {
-		EFX_ERR(efx, "failed to create NIC\n");
+		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
 		goto fail1;
 	}
 
 	/* Create port */
 	rc = efx_probe_port(efx);
 	if (rc) {
-		EFX_ERR(efx, "failed to create port\n");
+		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
 		goto fail2;
 	}
 
@@ -1152,8 +1186,9 @@
 	efx_for_each_channel(channel, efx) {
 		rc = efx_probe_channel(channel);
 		if (rc) {
-			EFX_ERR(efx, "failed to create channel %d\n",
-				channel->channel);
+			netif_err(efx, probe, efx->net_dev,
+				  "failed to create channel %d\n",
+				  channel->channel);
 			goto fail3;
 		}
 	}
@@ -1233,15 +1268,8 @@
  * since we're holding the rtnl_lock at this point. */
 static void efx_flush_all(struct efx_nic *efx)
 {
-	struct efx_rx_queue *rx_queue;
-
 	/* Make sure the hardware monitor is stopped */
 	cancel_delayed_work_sync(&efx->monitor_work);
-
-	/* Ensure that all RX slow refills are complete. */
-	efx_for_each_rx_queue(rx_queue, efx)
-		cancel_delayed_work_sync(&rx_queue->work);
-
 	/* Stop scheduled port reconfigurations */
 	cancel_work_sync(&efx->mac_work);
 }
@@ -1356,8 +1384,9 @@
 	struct efx_nic *efx = container_of(data, struct efx_nic,
 					   monitor_work.work);
 
-	EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
-		  raw_smp_processor_id());
+	netif_vdbg(efx, timer, efx->net_dev,
+		   "hardware monitor executing on CPU %d\n",
+		   raw_smp_processor_id());
 	BUG_ON(efx->type->monitor == NULL);
 
 	/* If the mac_lock is already held then it is likely a port
@@ -1464,8 +1493,8 @@
 	struct efx_nic *efx = netdev_priv(net_dev);
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
-	EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
-		raw_smp_processor_id());
+	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
+		  raw_smp_processor_id());
 
 	if (efx->state == STATE_DISABLED)
 		return -EIO;
@@ -1490,8 +1519,8 @@
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 
-	EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
-		raw_smp_processor_id());
+	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+		  raw_smp_processor_id());
 
 	if (efx->state != STATE_DISABLED) {
 		/* Stop the device and flush all the channels */
@@ -1504,11 +1533,10 @@
 }
 
 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
+static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
-	struct net_device_stats *stats = &net_dev->stats;
 
 	spin_lock_bh(&efx->stats_lock);
 	efx->type->update_stats(efx);
@@ -1530,11 +1558,8 @@
 	stats->tx_window_errors = mac_stats->tx_late_collision;
 
 	stats->rx_errors = (stats->rx_length_errors +
-			    stats->rx_over_errors +
 			    stats->rx_crc_errors +
 			    stats->rx_frame_errors +
-			    stats->rx_fifo_errors +
-			    stats->rx_missed_errors +
 			    mac_stats->rx_symbol_error);
 	stats->tx_errors = (stats->tx_window_errors +
 			    mac_stats->tx_bad);
@@ -1547,8 +1572,9 @@
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 
-	EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
-		efx->port_enabled);
+	netif_err(efx, tx_err, efx->net_dev,
+		  "TX stuck with port_enabled=%d: resetting channels\n",
+		  efx->port_enabled);
 
 	efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
 }
@@ -1567,7 +1593,7 @@
 
 	efx_stop_all(efx);
 
-	EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
+	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 
 	efx_fini_channels(efx);
 
@@ -1593,8 +1619,9 @@
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
 	if (!is_valid_ether_addr(new_addr)) {
-		EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
-			new_addr);
+		netif_err(efx, drv, efx->net_dev,
+			  "invalid ethernet MAC address requested: %pM\n",
+			  new_addr);
 		return -EINVAL;
 	}
 
@@ -1645,7 +1672,7 @@
 static const struct net_device_ops efx_netdev_ops = {
 	.ndo_open		= efx_net_open,
 	.ndo_stop		= efx_net_stop,
-	.ndo_get_stats		= efx_net_stats,
+	.ndo_get_stats64	= efx_net_stats,
 	.ndo_tx_timeout		= efx_watchdog,
 	.ndo_start_xmit		= efx_hard_start_xmit,
 	.ndo_validate_addr	= eth_validate_addr,
@@ -1697,7 +1724,6 @@
 	net_dev->watchdog_timeo = 5 * HZ;
 	net_dev->irq = efx->pci_dev->irq;
 	net_dev->netdev_ops = &efx_netdev_ops;
-	SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
 	SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
 
 	/* Clear MAC statistics */
@@ -1722,7 +1748,8 @@
 
 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
 	if (rc) {
-		EFX_ERR(efx, "failed to init net dev attributes\n");
+		netif_err(efx, drv, efx->net_dev,
+			  "failed to init net dev attributes\n");
 		goto fail_registered;
 	}
 
@@ -1730,7 +1757,7 @@
 
 fail_locked:
 	rtnl_unlock();
-	EFX_ERR(efx, "could not register net dev\n");
+	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
 	return rc;
 
 fail_registered:
@@ -1795,7 +1822,7 @@
 
 	rc = efx->type->init(efx);
 	if (rc) {
-		EFX_ERR(efx, "failed to initialise NIC\n");
+		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
 		goto fail;
 	}
 
@@ -1807,7 +1834,8 @@
 		if (rc)
 			goto fail;
 		if (efx->phy_op->reconfigure(efx))
-			EFX_ERR(efx, "could not restore PHY settings\n");
+			netif_err(efx, drv, efx->net_dev,
+				  "could not restore PHY settings\n");
 	}
 
 	efx->mac_op->reconfigure(efx);
@@ -1840,13 +1868,14 @@
 	int rc, rc2;
 	bool disabled;
 
-	EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
+	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+		   RESET_TYPE(method));
 
 	efx_reset_down(efx, method);
 
 	rc = efx->type->reset(efx, method);
 	if (rc) {
-		EFX_ERR(efx, "failed to reset hardware\n");
+		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
 		goto out;
 	}
 
@@ -1871,10 +1900,10 @@
 
 	if (disabled) {
 		dev_close(efx->net_dev);
-		EFX_ERR(efx, "has been disabled\n");
+		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
 		efx->state = STATE_DISABLED;
 	} else {
-		EFX_LOG(efx, "reset complete\n");
+		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
 	}
 	return rc;
 }
@@ -1886,10 +1915,14 @@
 {
 	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
 
+	if (efx->reset_pending == RESET_TYPE_NONE)
+		return;
+
 	/* If we're not RUNNING then don't reset. Leave the reset_pending
 	 * flag set so that efx_pci_probe_main will be retried */
 	if (efx->state != STATE_RUNNING) {
-		EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
+		netif_info(efx, drv, efx->net_dev,
+			   "scheduled reset quenched. NIC not RUNNING\n");
 		return;
 	}
 
@@ -1903,7 +1936,8 @@
 	enum reset_type method;
 
 	if (efx->reset_pending != RESET_TYPE_NONE) {
-		EFX_INFO(efx, "quenching already scheduled reset\n");
+		netif_info(efx, drv, efx->net_dev,
+			   "quenching already scheduled reset\n");
 		return;
 	}
 
@@ -1927,10 +1961,12 @@
 	}
 
 	if (method != type)
-		EFX_LOG(efx, "scheduling %s reset for %s\n",
-			RESET_TYPE(method), RESET_TYPE(type));
+		netif_dbg(efx, drv, efx->net_dev,
+			  "scheduling %s reset for %s\n",
+			  RESET_TYPE(method), RESET_TYPE(type));
 	else
-		EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method));
+		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+			  RESET_TYPE(method));
 
 	efx->reset_pending = method;
 
@@ -2017,6 +2053,7 @@
 	INIT_WORK(&efx->reset_work, efx_reset_work);
 	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
 	efx->pci_dev = pci_dev;
+	efx->msg_enable = debug;
 	efx->state = STATE_INIT;
 	efx->reset_pending = RESET_TYPE_NONE;
 	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
@@ -2052,8 +2089,8 @@
 		rx_queue->queue = i;
 		rx_queue->channel = &efx->channel[0]; /* for safety */
 		rx_queue->buffer = NULL;
-		spin_lock_init(&rx_queue->add_lock);
-		INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
+		setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+			    (unsigned long)rx_queue);
 	}
 
 	efx->type = type;
@@ -2136,7 +2173,7 @@
 	efx_pci_remove_main(efx);
 
 	efx_fini_io(efx);
-	EFX_LOG(efx, "shutdown successful\n");
+	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
 
 	pci_set_drvdata(pci_dev, NULL);
 	efx_fini_struct(efx);
@@ -2161,13 +2198,15 @@
 
 	rc = efx->type->init(efx);
 	if (rc) {
-		EFX_ERR(efx, "failed to initialise NIC\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "failed to initialise NIC\n");
 		goto fail3;
 	}
 
 	rc = efx_init_port(efx);
 	if (rc) {
-		EFX_ERR(efx, "failed to initialise port\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "failed to initialise port\n");
 		goto fail4;
 	}
 
@@ -2223,11 +2262,13 @@
 				   NETIF_F_HIGHDMA | NETIF_F_TSO);
 	efx = netdev_priv(net_dev);
 	pci_set_drvdata(pci_dev, efx);
+	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
 	rc = efx_init_struct(efx, type, pci_dev, net_dev);
 	if (rc)
 		goto fail1;
 
-	EFX_INFO(efx, "Solarflare Communications NIC detected\n");
+	netif_info(efx, probe, efx->net_dev,
+		   "Solarflare Communications NIC detected\n");
 
 	/* Set up basic I/O (BAR mappings etc) */
 	rc = efx_init_io(efx);
@@ -2265,7 +2306,7 @@
 	}
 
 	if (rc) {
-		EFX_ERR(efx, "Could not reset NIC\n");
+		netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
 		goto fail4;
 	}
 
@@ -2277,7 +2318,7 @@
 	if (rc)
 		goto fail5;
 
-	EFX_LOG(efx, "initialisation successful\n");
+	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
 
 	rtnl_lock();
 	efx_mtd_probe(efx); /* allowed to fail */
@@ -2293,7 +2334,7 @@
 	efx_fini_struct(efx);
  fail1:
 	WARN_ON(rc > 0);
-	EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
+	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
 	free_netdev(net_dev);
 	return rc;
 }
@@ -2332,6 +2373,9 @@
 
 	efx->type->resume_wol(efx);
 
+	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+	queue_work(reset_workqueue, &efx->reset_work);
+
 	return 0;
 }
 
@@ -2394,7 +2438,7 @@
 };
 
 static struct pci_driver efx_pci_driver = {
-	.name		= EFX_DRIVER_NAME,
+	.name		= KBUILD_MODNAME,
 	.id_table	= efx_pci_table,
 	.probe		= efx_pci_probe,
 	.remove		= efx_pci_remove,
@@ -2421,11 +2465,6 @@
 	if (rc)
 		goto err_notifier;
 
-	refill_workqueue = create_workqueue("sfc_refill");
-	if (!refill_workqueue) {
-		rc = -ENOMEM;
-		goto err_refill;
-	}
 	reset_workqueue = create_singlethread_workqueue("sfc_reset");
 	if (!reset_workqueue) {
 		rc = -ENOMEM;
@@ -2441,8 +2480,6 @@
  err_pci:
 	destroy_workqueue(reset_workqueue);
  err_reset:
-	destroy_workqueue(refill_workqueue);
- err_refill:
 	unregister_netdevice_notifier(&efx_netdev_notifier);
  err_notifier:
 	return rc;
@@ -2454,7 +2491,6 @@
 
 	pci_unregister_driver(&efx_pci_driver);
 	destroy_workqueue(reset_workqueue);
-	destroy_workqueue(refill_workqueue);
 	unregister_netdevice_notifier(&efx_netdev_notifier);
 
 }
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index ffd708c..060dc95 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -47,12 +47,12 @@
 extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
 extern void efx_rx_strategy(struct efx_channel *channel);
 extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_work(struct work_struct *data);
+extern void efx_rx_slow_fill(unsigned long context);
 extern void __efx_rx_packet(struct efx_channel *channel,
 			    struct efx_rx_buffer *rx_buf, bool checksummed);
 extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 			  unsigned int len, bool checksummed, bool discard);
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
+extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 #define EFX_RXQ_SIZE 1024
 #define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
 
@@ -106,8 +106,9 @@
 
 static inline void efx_schedule_channel(struct efx_channel *channel)
 {
-	EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
-		  channel->channel, raw_smp_processor_id());
+	netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+		   "channel %d scheduling NAPI poll on CPU%d\n",
+		   channel->channel, raw_smp_processor_id());
 	channel->work_pending = true;
 
 	napi_schedule(&channel->napi_str);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 22026bf..fd19d6a 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -218,8 +218,8 @@
 
 	/* GMAC does not support 1000Mbps HD */
 	if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
-		EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
-			" setting\n");
+		netif_dbg(efx, drv, efx->net_dev,
+			  "rejecting unsupported 1000Mbps HD setting\n");
 		return -EINVAL;
 	}
 
@@ -234,7 +234,7 @@
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 
-	strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
 	strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
 		siena_print_fwver(efx, info->fw_version,
@@ -242,6 +242,32 @@
 	strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
 }
 
+static int efx_ethtool_get_regs_len(struct net_device *net_dev)
+{
+	return efx_nic_get_regs_len(netdev_priv(net_dev));
+}
+
+static void efx_ethtool_get_regs(struct net_device *net_dev,
+				 struct ethtool_regs *regs, void *buf)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	regs->version = efx->type->revision;
+	efx_nic_get_regs(efx, buf);
+}
+
+static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	return efx->msg_enable;
+}
+
+static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	efx->msg_enable = msg_enable;
+}
+
 /**
  * efx_fill_test - fill in an individual self-test entry
  * @test_index:		Index of the test
@@ -443,12 +469,13 @@
 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
 	struct efx_ethtool_stat *stat;
 	struct efx_channel *channel;
+	struct rtnl_link_stats64 temp;
 	int i;
 
 	EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
 
 	/* Update MAC and NIC statistics */
-	dev_get_stats(net_dev);
+	dev_get_stats(net_dev, &temp);
 
 	/* Fill detailed statistics buffer */
 	for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
@@ -520,6 +547,14 @@
 	return efx->rx_checksum_enabled;
 }
 
+static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH;
+
+	return ethtool_op_set_flags(net_dev, data, supported);
+}
+
 static void efx_ethtool_self_test(struct net_device *net_dev,
 				  struct ethtool_test *test, u64 *data)
 {
@@ -539,7 +574,8 @@
 	if (!already_up) {
 		rc = dev_open(efx->net_dev);
 		if (rc) {
-			EFX_ERR(efx, "failed opening device.\n");
+			netif_err(efx, drv, efx->net_dev,
+				  "failed opening device.\n");
 			goto fail2;
 		}
 	}
@@ -551,9 +587,9 @@
 	if (!already_up)
 		dev_close(efx->net_dev);
 
-	EFX_LOG(efx, "%s %sline self-tests\n",
-		rc == 0 ? "passed" : "failed",
-		(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+	netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+		  rc == 0 ? "passed" : "failed",
+		  (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
 
  fail2:
  fail1:
@@ -679,8 +715,8 @@
 		return -EOPNOTSUPP;
 
 	if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
-		EFX_ERR(efx, "invalid coalescing setting. "
-			"Only rx/tx_coalesce_usecs_irq are supported\n");
+		netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. "
+			  "Only rx/tx_coalesce_usecs_irq are supported\n");
 		return -EOPNOTSUPP;
 	}
 
@@ -692,8 +728,8 @@
 	efx_for_each_tx_queue(tx_queue, efx) {
 		if ((tx_queue->channel->channel < efx->n_rx_channels) &&
 		    tx_usecs) {
-			EFX_ERR(efx, "Channel is shared. "
-				"Only RX coalescing may be set\n");
+			netif_err(efx, drv, efx->net_dev, "Channel is shared. "
+				  "Only RX coalescing may be set\n");
 			return -EOPNOTSUPP;
 		}
 	}
@@ -721,13 +757,15 @@
 		     (pause->autoneg ? EFX_FC_AUTO : 0));
 
 	if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
-		EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n");
+		netif_dbg(efx, drv, efx->net_dev,
+			  "Flow control unsupported: tx ON rx OFF\n");
 		rc = -EINVAL;
 		goto out;
 	}
 
 	if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
-		EFX_LOG(efx, "Autonegotiation is disabled\n");
+		netif_dbg(efx, drv, efx->net_dev,
+			  "Autonegotiation is disabled\n");
 		rc = -EINVAL;
 		goto out;
 	}
@@ -758,8 +796,9 @@
 	    (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
 		rc = efx->phy_op->reconfigure(efx);
 		if (rc) {
-			EFX_ERR(efx, "Unable to advertise requested flow "
-				"control setting\n");
+			netif_err(efx, drv, efx->net_dev,
+				  "Unable to advertise requested flow "
+				  "control setting\n");
 			goto out;
 		}
 	}
@@ -830,10 +869,101 @@
 	return efx_reset(efx, method);
 }
 
+static int
+efx_ethtool_get_rxnfc(struct net_device *net_dev,
+		      struct ethtool_rxnfc *info, void *rules __always_unused)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = efx->n_rx_channels;
+		return 0;
+
+	case ETHTOOL_GRXFH: {
+		unsigned min_revision = 0;
+
+		info->data = 0;
+		switch (info->flow_type) {
+		case TCP_V4_FLOW:
+			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+			/* fall through */
+		case UDP_V4_FLOW:
+		case SCTP_V4_FLOW:
+		case AH_ESP_V4_FLOW:
+		case IPV4_FLOW:
+			info->data |= RXH_IP_SRC | RXH_IP_DST;
+			min_revision = EFX_REV_FALCON_B0;
+			break;
+		case TCP_V6_FLOW:
+			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+			/* fall through */
+		case UDP_V6_FLOW:
+		case SCTP_V6_FLOW:
+		case AH_ESP_V6_FLOW:
+		case IPV6_FLOW:
+			info->data |= RXH_IP_SRC | RXH_IP_DST;
+			min_revision = EFX_REV_SIENA_A0;
+			break;
+		default:
+			break;
+		}
+		if (efx_nic_rev(efx) < min_revision)
+			info->data = 0;
+		return 0;
+	}
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
+				      struct ethtool_rxfh_indir *indir)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	size_t copy_size =
+		min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
+
+	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+		return -EOPNOTSUPP;
+
+	indir->size = ARRAY_SIZE(efx->rx_indir_table);
+	memcpy(indir->ring_index, efx->rx_indir_table,
+	       copy_size * sizeof(indir->ring_index[0]));
+	return 0;
+}
+
+static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
+				      const struct ethtool_rxfh_indir *indir)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	size_t i;
+
+	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+		return -EOPNOTSUPP;
+
+	/* Validate size and indices */
+	if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
+		return -EINVAL;
+	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+		if (indir->ring_index[i] >= efx->n_rx_channels)
+			return -EINVAL;
+
+	memcpy(efx->rx_indir_table, indir->ring_index,
+	       sizeof(efx->rx_indir_table));
+	efx_nic_push_rx_indir_table(efx);
+	return 0;
+}
+
 const struct ethtool_ops efx_ethtool_ops = {
 	.get_settings		= efx_ethtool_get_settings,
 	.set_settings		= efx_ethtool_set_settings,
 	.get_drvinfo		= efx_ethtool_get_drvinfo,
+	.get_regs_len		= efx_ethtool_get_regs_len,
+	.get_regs		= efx_ethtool_get_regs,
+	.get_msglevel		= efx_ethtool_get_msglevel,
+	.set_msglevel		= efx_ethtool_set_msglevel,
 	.nway_reset		= efx_ethtool_nway_reset,
 	.get_link		= efx_ethtool_get_link,
 	.get_eeprom_len		= efx_ethtool_get_eeprom_len,
@@ -854,7 +984,7 @@
 	/* Need to enable/disable TSO-IPv6 too */
 	.set_tso		= efx_ethtool_set_tso,
 	.get_flags		= ethtool_op_get_flags,
-	.set_flags		= ethtool_op_set_flags,
+	.set_flags		= efx_ethtool_set_flags,
 	.get_sset_count		= efx_ethtool_get_sset_count,
 	.self_test		= efx_ethtool_self_test,
 	.get_strings		= efx_ethtool_get_strings,
@@ -863,4 +993,7 @@
 	.get_wol                = efx_ethtool_get_wol,
 	.set_wol                = efx_ethtool_set_wol,
 	.reset			= efx_ethtool_reset,
+	.get_rxnfc		= efx_ethtool_get_rxnfc,
+	.get_rxfh_indir		= efx_ethtool_get_rxfh_indir,
+	.set_rxfh_indir		= efx_ethtool_set_rxfh_indir,
 };
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 655b697..4f9d33f 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -167,13 +167,15 @@
 	 * exit without having touched the hardware.
 	 */
 	if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
-		EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
-			  raw_smp_processor_id());
+		netif_vdbg(efx, intr, efx->net_dev,
+			   "IRQ %d on CPU %d not for me\n", irq,
+			   raw_smp_processor_id());
 		return IRQ_NONE;
 	}
 	efx->last_irq_cpu = raw_smp_processor_id();
-	EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
-		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+	netif_vdbg(efx, intr, efx->net_dev,
+		   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+		   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
 
 	/* Determine interrupting queues, clear interrupt status
 	 * register and acknowledge the device interrupt.
@@ -239,7 +241,8 @@
 		if (!falcon_spi_poll(efx))
 			return 0;
 		if (time_after_eq(jiffies, timeout)) {
-			EFX_ERR(efx, "timed out waiting for SPI\n");
+			netif_err(efx, hw, efx->net_dev,
+				  "timed out waiting for SPI\n");
 			return -ETIMEDOUT;
 		}
 		schedule_timeout_uninterruptible(1);
@@ -333,9 +336,10 @@
 		if (!(status & SPI_STATUS_NRDY))
 			return 0;
 		if (time_after_eq(jiffies, timeout)) {
-			EFX_ERR(efx, "SPI write timeout on device %d"
-				" last status=0x%02x\n",
-				spi->device_id, status);
+			netif_err(efx, hw, efx->net_dev,
+				  "SPI write timeout on device %d"
+				  " last status=0x%02x\n",
+				  spi->device_id, status);
 			return -ETIMEDOUT;
 		}
 		schedule_timeout_uninterruptible(1);
@@ -469,7 +473,8 @@
 				udelay(10);
 			}
 
-			EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
+			netif_err(efx, hw, efx->net_dev,
+				  "timed out waiting for XMAC core reset\n");
 		}
 	}
 
@@ -492,12 +497,13 @@
 		if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
 		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
 		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
-			EFX_LOG(efx, "Completed MAC reset after %d loops\n",
-				count);
+			netif_dbg(efx, hw, efx->net_dev,
+				  "Completed MAC reset after %d loops\n",
+				  count);
 			break;
 		}
 		if (count > 20) {
-			EFX_ERR(efx, "MAC reset failed\n");
+			netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
 			break;
 		}
 		count++;
@@ -548,7 +554,9 @@
 {
 	struct efx_link_state *link_state = &efx->link_state;
 	efx_oword_t reg;
-	int link_speed;
+	int link_speed, isolate;
+
+	isolate = (efx->reset_pending != RESET_TYPE_NONE);
 
 	switch (link_state->speed) {
 	case 10000: link_speed = 3; break;
@@ -570,7 +578,7 @@
 	 * discarded. */
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
 		EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
-				    !link_state->up);
+				    !link_state->up || isolate);
 	}
 
 	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
@@ -584,7 +592,7 @@
 	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
 	/* Unisolate the MAC -> RX */
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
-		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
 	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
 }
 
@@ -625,7 +633,8 @@
 		rmb(); /* read the done flag before the stats */
 		efx->mac_op->update_stats(efx);
 	} else {
-		EFX_ERR(efx, "timed out waiting for statistics\n");
+		netif_err(efx, hw, efx->net_dev,
+			  "timed out waiting for statistics\n");
 	}
 }
 
@@ -715,16 +724,17 @@
 		if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
 			if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
 			    EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
-				EFX_ERR(efx, "error from GMII access "
-					EFX_OWORD_FMT"\n",
-					EFX_OWORD_VAL(md_stat));
+				netif_err(efx, hw, efx->net_dev,
+					  "error from GMII access "
+					  EFX_OWORD_FMT"\n",
+					  EFX_OWORD_VAL(md_stat));
 				return -EIO;
 			}
 			return 0;
 		}
 		udelay(10);
 	}
-	EFX_ERR(efx, "timed out waiting for GMII\n");
+	netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
 	return -ETIMEDOUT;
 }
 
@@ -736,7 +746,8 @@
 	efx_oword_t reg;
 	int rc;
 
-	EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "writing MDIO %d register %d.%d with 0x%04x\n",
 		    prtad, devad, addr, value);
 
 	mutex_lock(&efx->mdio_lock);
@@ -810,8 +821,9 @@
 	if (rc == 0) {
 		efx_reado(efx, &reg, FR_AB_MD_RXD);
 		rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
-		EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
-			    prtad, devad, addr, rc);
+		netif_vdbg(efx, hw, efx->net_dev,
+			   "read from MDIO %d register %d.%d, got %04x\n",
+			   prtad, devad, addr, rc);
 	} else {
 		/* Abort the read operation */
 		EFX_POPULATE_OWORD_2(reg,
@@ -819,8 +831,9 @@
 				     FRF_AB_MD_GC, 1);
 		efx_writeo(efx, &reg, FR_AB_MD_CS);
 
-		EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
-			prtad, devad, addr, rc);
+		netif_dbg(efx, hw, efx->net_dev,
+			  "read from MDIO %d register %d.%d, got error %d\n",
+			  prtad, devad, addr, rc);
 	}
 
 out:
@@ -871,7 +884,8 @@
 
 	falcon_clock_mac(efx);
 
-	EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
+	netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
+		  EFX_IS10G(efx) ? 'X' : 'G');
 	/* Not all macs support a mac-level link state */
 	efx->xmac_poll_required = false;
 	falcon_reset_macs(efx);
@@ -895,8 +909,8 @@
 		efx->phy_op = &falcon_qt202x_phy_ops;
 		break;
 	default:
-		EFX_ERR(efx, "Unknown PHY type %d\n",
-			efx->phy_type);
+		netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
+			  efx->phy_type);
 		return -ENODEV;
 	}
 
@@ -924,10 +938,11 @@
 				  FALCON_MAC_STATS_SIZE);
 	if (rc)
 		return rc;
-	EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
-		(u64)efx->stats_buffer.dma_addr,
-		efx->stats_buffer.addr,
-		(u64)virt_to_phys(efx->stats_buffer.addr));
+	netif_dbg(efx, probe, efx->net_dev,
+		  "stats buffer at %llx (virt %p phys %llx)\n",
+		  (u64)efx->stats_buffer.dma_addr,
+		  efx->stats_buffer.addr,
+		  (u64)virt_to_phys(efx->stats_buffer.addr));
 
 	return 0;
 }
@@ -967,8 +982,8 @@
 	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
 	mutex_unlock(&efx->spi_lock);
 	if (rc) {
-		EFX_ERR(efx, "Failed to read %s\n",
-			efx->spi_flash ? "flash" : "EEPROM");
+		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
+			  efx->spi_flash ? "flash" : "EEPROM");
 		rc = -EIO;
 		goto out;
 	}
@@ -978,11 +993,13 @@
 
 	rc = -EINVAL;
 	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
-		EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
+		netif_err(efx, hw, efx->net_dev,
+			  "NVRAM bad magic 0x%x\n", magic_num);
 		goto out;
 	}
 	if (struct_ver < 2) {
-		EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
+		netif_err(efx, hw, efx->net_dev,
+			  "NVRAM has ancient version 0x%x\n", struct_ver);
 		goto out;
 	} else if (struct_ver < 4) {
 		word = &nvconfig->board_magic_num;
@@ -995,7 +1012,8 @@
 		csum += le16_to_cpu(*word);
 
 	if (~csum & 0xffff) {
-		EFX_ERR(efx, "NVRAM has incorrect checksum\n");
+		netif_err(efx, hw, efx->net_dev,
+			  "NVRAM has incorrect checksum\n");
 		goto out;
 	}
 
@@ -1073,22 +1091,25 @@
 	efx_oword_t glb_ctl_reg_ker;
 	int rc;
 
-	EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
+	netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
+		  RESET_TYPE(method));
 
 	/* Initiate device reset */
 	if (method == RESET_TYPE_WORLD) {
 		rc = pci_save_state(efx->pci_dev);
 		if (rc) {
-			EFX_ERR(efx, "failed to backup PCI state of primary "
-				"function prior to hardware reset\n");
+			netif_err(efx, drv, efx->net_dev,
+				  "failed to backup PCI state of primary "
+				  "function prior to hardware reset\n");
 			goto fail1;
 		}
 		if (efx_nic_is_dual_func(efx)) {
 			rc = pci_save_state(nic_data->pci_dev2);
 			if (rc) {
-				EFX_ERR(efx, "failed to backup PCI state of "
-					"secondary function prior to "
-					"hardware reset\n");
+				netif_err(efx, drv, efx->net_dev,
+					  "failed to backup PCI state of "
+					  "secondary function prior to "
+					  "hardware reset\n");
 				goto fail2;
 			}
 		}
@@ -1113,7 +1134,7 @@
 	}
 	efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
 
-	EFX_LOG(efx, "waiting for hardware reset\n");
+	netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
 	schedule_timeout_uninterruptible(HZ / 20);
 
 	/* Restore PCI configuration if needed */
@@ -1121,28 +1142,32 @@
 		if (efx_nic_is_dual_func(efx)) {
 			rc = pci_restore_state(nic_data->pci_dev2);
 			if (rc) {
-				EFX_ERR(efx, "failed to restore PCI config for "
-					"the secondary function\n");
+				netif_err(efx, drv, efx->net_dev,
+					  "failed to restore PCI config for "
+					  "the secondary function\n");
 				goto fail3;
 			}
 		}
 		rc = pci_restore_state(efx->pci_dev);
 		if (rc) {
-			EFX_ERR(efx, "failed to restore PCI config for the "
-				"primary function\n");
+			netif_err(efx, drv, efx->net_dev,
+				  "failed to restore PCI config for the "
+				  "primary function\n");
 			goto fail4;
 		}
-		EFX_LOG(efx, "successfully restored PCI config\n");
+		netif_dbg(efx, drv, efx->net_dev,
+			  "successfully restored PCI config\n");
 	}
 
 	/* Assert that reset complete */
 	efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
 	if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
 		rc = -ETIMEDOUT;
-		EFX_ERR(efx, "timed out waiting for hardware reset\n");
+		netif_err(efx, hw, efx->net_dev,
+			  "timed out waiting for hardware reset\n");
 		goto fail5;
 	}
-	EFX_LOG(efx, "hardware reset complete\n");
+	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
 
 	return 0;
 
@@ -1165,8 +1190,9 @@
 
 	rc = falcon_board(efx)->type->monitor(efx);
 	if (rc) {
-		EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
-			(rc == -ERANGE) ? "reported fault" : "failed");
+		netif_err(efx, hw, efx->net_dev,
+			  "Board sensor %s; shutting down PHY\n",
+			  (rc == -ERANGE) ? "reported fault" : "failed");
 		efx->phy_mode |= PHY_MODE_LOW_POWER;
 		rc = __efx_reconfigure_port(efx);
 		WARN_ON(rc);
@@ -1217,7 +1243,8 @@
 	/* Wait for SRAM reset to complete */
 	count = 0;
 	do {
-		EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
+		netif_dbg(efx, hw, efx->net_dev,
+			  "waiting for SRAM reset (attempt %d)...\n", count);
 
 		/* SRAM reset is slow; expect around 16ms */
 		schedule_timeout_uninterruptible(HZ / 50);
@@ -1225,13 +1252,14 @@
 		/* Check for reset complete */
 		efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
 		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
-			EFX_LOG(efx, "SRAM reset complete\n");
+			netif_dbg(efx, hw, efx->net_dev,
+				  "SRAM reset complete\n");
 
 			return 0;
 		}
 	} while (++count < 20);	/* wait upto 0.4 sec */
 
-	EFX_ERR(efx, "timed out waiting for SRAM reset\n");
+	netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
 	return -ETIMEDOUT;
 }
 
@@ -1290,7 +1318,8 @@
 
 	rc = falcon_read_nvram(efx, nvconfig);
 	if (rc == -EINVAL) {
-		EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "NVRAM is invalid therefore using defaults\n");
 		efx->phy_type = PHY_TYPE_NONE;
 		efx->mdio.prtad = MDIO_PRTAD_NONE;
 		board_rev = 0;
@@ -1324,7 +1353,8 @@
 	/* Read the MAC addresses */
 	memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
 
-	EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
+	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
+		  efx->phy_type, efx->mdio.prtad);
 
 	rc = falcon_probe_board(efx, board_rev);
 	if (rc)
@@ -1353,14 +1383,16 @@
 	if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
 		boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
 			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
-		EFX_LOG(efx, "Booted from %s\n",
-			boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
+		netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
+			  boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
+			  "flash" : "EEPROM");
 	} else {
 		/* Disable VPD and set clock dividers to safe
 		 * values for initial programming. */
 		boot_dev = -1;
-		EFX_LOG(efx, "Booted from internal ASIC settings;"
-			" setting SPI config\n");
+		netif_dbg(efx, probe, efx->net_dev,
+			  "Booted from internal ASIC settings;"
+			  " setting SPI config\n");
 		EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
 				     /* 125 MHz / 7 ~= 20 MHz */
 				     FRF_AB_EE_SF_CLOCK_DIV, 7,
@@ -1394,7 +1426,8 @@
 	rc = -ENODEV;
 
 	if (efx_nic_fpga_ver(efx) != 0) {
-		EFX_ERR(efx, "Falcon FPGA not supported\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "Falcon FPGA not supported\n");
 		goto fail1;
 	}
 
@@ -1404,16 +1437,19 @@
 		u8 pci_rev = efx->pci_dev->revision;
 
 		if ((pci_rev == 0xff) || (pci_rev == 0)) {
-			EFX_ERR(efx, "Falcon rev A0 not supported\n");
+			netif_err(efx, probe, efx->net_dev,
+				  "Falcon rev A0 not supported\n");
 			goto fail1;
 		}
 		efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
 		if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
-			EFX_ERR(efx, "Falcon rev A1 1G not supported\n");
+			netif_err(efx, probe, efx->net_dev,
+				  "Falcon rev A1 1G not supported\n");
 			goto fail1;
 		}
 		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
-			EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
+			netif_err(efx, probe, efx->net_dev,
+				  "Falcon rev A1 PCI-X not supported\n");
 			goto fail1;
 		}
 
@@ -1427,7 +1463,8 @@
 			}
 		}
 		if (!nic_data->pci_dev2) {
-			EFX_ERR(efx, "failed to find secondary function\n");
+			netif_err(efx, probe, efx->net_dev,
+				  "failed to find secondary function\n");
 			rc = -ENODEV;
 			goto fail2;
 		}
@@ -1436,7 +1473,7 @@
 	/* Now we can reset the NIC */
 	rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
 	if (rc) {
-		EFX_ERR(efx, "failed to reset NIC\n");
+		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
 		goto fail3;
 	}
 
@@ -1446,9 +1483,11 @@
 		goto fail4;
 	BUG_ON(efx->irq_status.dma_addr & 0x0f);
 
-	EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
-		(u64)efx->irq_status.dma_addr,
-		efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
+	netif_dbg(efx, probe, efx->net_dev,
+		  "INT_KER at %llx (virt %p phys %llx)\n",
+		  (u64)efx->irq_status.dma_addr,
+		  efx->irq_status.addr,
+		  (u64)virt_to_phys(efx->irq_status.addr));
 
 	falcon_probe_spi_devices(efx);
 
@@ -1472,7 +1511,8 @@
 
 	rc = falcon_board(efx)->type->init(efx);
 	if (rc) {
-		EFX_ERR(efx, "failed to initialise board\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "failed to initialise board\n");
 		goto fail6;
 	}
 
@@ -1542,6 +1582,13 @@
 		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
 		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
 		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+
+		/* Enable hash insertion. This is broken for the
+		 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
+		 * IPv4 hashes. */
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
 	}
 	/* Always enable XOFF signal from RX FIFO.  We enable
 	 * or disable transmission of pause frames at the MAC. */
@@ -1615,8 +1662,12 @@
 
 	falcon_init_rx_cfg(efx);
 
-	/* Set destination of both TX and RX Flush events */
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+		/* Set hash key for IPv4 */
+		memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+		efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+		/* Set destination of both TX and RX Flush events */
 		EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
 		efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
 	}
@@ -1821,6 +1872,7 @@
 	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
 	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
 	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+	.rx_buffer_hash_size = 0x10,
 	.rx_buffer_padding = 0,
 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
 	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
@@ -1828,7 +1880,7 @@
 				   * channels */
 	.tx_dc_base = 0x130000,
 	.rx_dc_base = 0x100000,
-	.offload_features = NETIF_F_IP_CSUM,
+	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH,
 	.reset_world_flags = ETH_RESET_IRQ,
 };
 
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index c7a933a..3d950c2 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -106,12 +106,17 @@
 	alarms1 &= mask;
 	alarms2 &= mask >> 8;
 	if (alarms1 || alarms2) {
-		EFX_ERR(efx,
-			"LM87 detected a hardware failure (status %02x:%02x)"
-			"%s%s\n",
-			alarms1, alarms2,
-			(alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
-			(alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
+		netif_err(efx, hw, efx->net_dev,
+			  "LM87 detected a hardware failure (status %02x:%02x)"
+			  "%s%s%s\n",
+			  alarms1, alarms2,
+			  (alarms1 & LM87_ALARM_TEMP_INT) ?
+			  "; board is overheating" : "",
+			  (alarms1 & LM87_ALARM_TEMP_EXT1) ?
+			  "; controller is overheating" : "",
+			  (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1)
+			   || alarms2) ?
+			  "; electrical fault" : "");
 		return -ERANGE;
 	}
 
@@ -243,7 +248,7 @@
 		       (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
 		       (0 << P0_EN_1V0X_LBN));
 	if (rc != out) {
-		EFX_INFO(efx, "power-cycling PHY\n");
+		netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n");
 		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
 		if (rc)
 			goto fail_on;
@@ -269,7 +274,8 @@
 		if (rc)
 			goto fail_on;
 
-		EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
+		netif_info(efx, hw, efx->net_dev,
+			   "waiting for DSP boot (attempt %d)...\n", i);
 
 		/* In flash config mode, DSP does not turn on AFE, so
 		 * just wait 1 second.
@@ -291,7 +297,7 @@
 		}
 	}
 
-	EFX_INFO(efx, "timed out waiting for DSP boot\n");
+	netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n");
 	rc = -ETIMEDOUT;
 fail_on:
 	sfe4001_poweroff(efx);
@@ -377,7 +383,7 @@
 {
 	struct falcon_board *board = falcon_board(efx);
 
-	EFX_INFO(efx, "%s\n", __func__);
+	netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
 
 	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
 	sfe4001_poweroff(efx);
@@ -461,7 +467,7 @@
 	if (rc)
 		goto fail_on;
 
-	EFX_INFO(efx, "PHY is powered on\n");
+	netif_info(efx, hw, efx->net_dev, "PHY is powered on\n");
 	return 0;
 
 fail_on:
@@ -493,7 +499,7 @@
 
 static void sfn4111t_fini(struct efx_nic *efx)
 {
-	EFX_INFO(efx, "%s\n", __func__);
+	netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
 
 	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
 	i2c_unregister_device(falcon_board(efx)->hwmon_client);
@@ -742,13 +748,14 @@
 			board->type = &board_types[i];
 
 	if (board->type) {
-		EFX_INFO(efx, "board is %s rev %c%d\n",
+		netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
 			 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
 			 ? board->type->ref_model : board->type->gen_type,
 			 'A' + board->major, board->minor);
 		return 0;
 	} else {
-		EFX_ERR(efx, "unknown board type %d\n", type_id);
+		netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
+			  type_id);
 		return -ENODEV;
 	}
 }
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index c84a2ce..bae656d 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -81,7 +81,8 @@
 		}
 		udelay(10);
 	}
-	EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
+	netif_err(efx, hw, efx->net_dev,
+		  "timed out waiting for XAUI/XGXS reset\n");
 	return -ETIMEDOUT;
 }
 
@@ -256,7 +257,7 @@
 	falcon_stop_nic_stats(efx);
 
 	while (!mac_up && tries) {
-		EFX_LOG(efx, "bashing xaui\n");
+		netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
 		falcon_reset_xaui(efx);
 		udelay(200);
 
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index b89177c..85a99fe 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -78,8 +78,9 @@
 {
 	unsigned long flags __attribute__ ((unused));
 
-	EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
-		    EFX_OWORD_VAL(*value));
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "writing register %x with " EFX_OWORD_FMT "\n", reg,
+		   EFX_OWORD_VAL(*value));
 
 	spin_lock_irqsave(&efx->biu_lock, flags);
 #ifdef EFX_USE_QWORD_IO
@@ -105,8 +106,9 @@
 	unsigned int addr = index * sizeof(*value);
 	unsigned long flags __attribute__ ((unused));
 
-	EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n",
-		    addr, EFX_QWORD_VAL(*value));
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+		   addr, EFX_QWORD_VAL(*value));
 
 	spin_lock_irqsave(&efx->biu_lock, flags);
 #ifdef EFX_USE_QWORD_IO
@@ -129,8 +131,9 @@
 static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
 			      unsigned int reg)
 {
-	EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
-		    reg, EFX_DWORD_VAL(*value));
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "writing partial register %x with "EFX_DWORD_FMT"\n",
+		   reg, EFX_DWORD_VAL(*value));
 
 	/* No lock required */
 	_efx_writed(efx, value->u32[0], reg);
@@ -155,8 +158,9 @@
 	value->u32[3] = _efx_readd(efx, reg + 12);
 	spin_unlock_irqrestore(&efx->biu_lock, flags);
 
-	EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
-		    EFX_OWORD_VAL(*value));
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+		   EFX_OWORD_VAL(*value));
 }
 
 /* Read an 8-byte SRAM entry through supplied mapping,
@@ -177,8 +181,9 @@
 #endif
 	spin_unlock_irqrestore(&efx->biu_lock, flags);
 
-	EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
-		    addr, EFX_QWORD_VAL(*value));
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+		   addr, EFX_QWORD_VAL(*value));
 }
 
 /* Read dword from register that allows partial writes (sic) */
@@ -186,8 +191,9 @@
 				unsigned int reg)
 {
 	value->u32[0] = _efx_readd(efx, reg);
-	EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
-		    reg, EFX_DWORD_VAL(*value));
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "read from register %x, got "EFX_DWORD_FMT"\n",
+		   reg, EFX_DWORD_VAL(*value));
 }
 
 /* Write to a register forming part of a table */
@@ -211,6 +217,13 @@
 	efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
 }
 
+/* Read from a dword register forming part of a table */
+static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
+				   unsigned int reg, unsigned int index)
+{
+	efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
+}
+
 /* Page-mapped register block size */
 #define EFX_PAGE_BLOCK_SIZE 0x2000
 
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 93cc3c1..3912b8f 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -168,11 +168,12 @@
 	error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
 
 	if (error && mcdi->resplen == 0) {
-		EFX_ERR(efx, "MC rebooted\n");
+		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
 		rc = EIO;
 	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
-		EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
-			respseq, mcdi->seqno);
+		netif_err(efx, hw, efx->net_dev,
+			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+			  respseq, mcdi->seqno);
 		rc = EIO;
 	} else if (error) {
 		efx_readd(efx, &reg, pdu + 4);
@@ -303,8 +304,9 @@
 			/* The request has been cancelled */
 			--mcdi->credits;
 		else
-			EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx "
-				"seq 0x%x\n", seqno, mcdi->seqno);
+			netif_err(efx, hw, efx->net_dev,
+				  "MC response mismatch tx seq 0x%x rx "
+				  "seq 0x%x\n", seqno, mcdi->seqno);
 	} else {
 		mcdi->resprc = errno;
 		mcdi->resplen = datalen;
@@ -352,8 +354,9 @@
 		++mcdi->credits;
 		spin_unlock_bh(&mcdi->iface_lock);
 
-		EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n",
-			cmd, (int)inlen, mcdi->mode);
+		netif_err(efx, hw, efx->net_dev,
+			  "MC command 0x%x inlen %d mode %d timed out\n",
+			  cmd, (int)inlen, mcdi->mode);
 	} else {
 		size_t resplen;
 
@@ -374,11 +377,13 @@
 		} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
 			; /* Don't reset if MC_CMD_REBOOT returns EIO */
 		else if (rc == -EIO || rc == -EINTR) {
-			EFX_ERR(efx, "MC fatal error %d\n", -rc);
+			netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+				  -rc);
 			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
 		} else
-			EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n",
-				cmd, (int)inlen, -rc);
+			netif_err(efx, hw, efx->net_dev,
+				  "MC command 0x%x inlen %d failed rc=%d\n",
+				  cmd, (int)inlen, -rc);
 	}
 
 	efx_mcdi_release(mcdi);
@@ -534,8 +539,9 @@
 	EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
 	state_txt = sensor_status_names[state];
 
-	EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n",
-		monitor, name, state_txt, value);
+	netif_err(efx, hw, efx->net_dev,
+		  "Sensor %d (%s) reports condition '%s' for raw value %d\n",
+		  monitor, name, state_txt, value);
 }
 
 /* Called from  falcon_process_eventq for MCDI events */
@@ -548,12 +554,13 @@
 
 	switch (code) {
 	case MCDI_EVENT_CODE_BADSSERT:
-		EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data);
+		netif_err(efx, hw, efx->net_dev,
+			  "MC watchdog or assertion failure at 0x%x\n", data);
 		efx_mcdi_ev_death(efx, EINTR);
 		break;
 
 	case MCDI_EVENT_CODE_PMNOTICE:
-		EFX_INFO(efx, "MCDI PM event.\n");
+		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
 		break;
 
 	case MCDI_EVENT_CODE_CMDDONE:
@@ -570,10 +577,11 @@
 		efx_mcdi_sensor_event(efx, event);
 		break;
 	case MCDI_EVENT_CODE_SCHEDERR:
-		EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data);
+		netif_info(efx, hw, efx->net_dev,
+			   "MC Scheduler error address=0x%x\n", data);
 		break;
 	case MCDI_EVENT_CODE_REBOOT:
-		EFX_INFO(efx, "MC Reboot\n");
+		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
 		efx_mcdi_ev_death(efx, EIO);
 		break;
 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
@@ -581,7 +589,8 @@
 		break;
 
 	default:
-		EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code);
+		netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
+			  code);
 	}
 }
 
@@ -627,7 +636,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -657,7 +666,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -695,7 +704,8 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
+		  __func__, rc, (int)outlen);
 
 	return rc;
 }
@@ -724,7 +734,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -749,8 +759,8 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n",
-		__func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+		  __func__, rc);
 	return rc;
 }
 
@@ -781,7 +791,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -802,7 +812,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -827,7 +837,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -853,7 +863,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -877,7 +887,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -898,7 +908,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -948,9 +958,10 @@
 	return 0;
 
 fail2:
-	EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
+	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
+		  __func__, type);
 fail1:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -994,14 +1005,15 @@
 		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
 		? "watchdog reset"
 		: "unknown assertion";
-	EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
-		MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
-		MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
+	netif_err(efx, hw, efx->net_dev,
+		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
+		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
 
 	/* Print out the registers */
 	ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
 	for (index = 1; index < 32; index++) {
-		EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
+		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
 			MCDI_DWORD2(outbuf, ofst));
 		ofst += sizeof(efx_dword_t);
 	}
@@ -1050,14 +1062,16 @@
 	rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
 			  NULL, 0, NULL);
 	if (rc)
-		EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+			  __func__, rc);
 }
 
 int efx_mcdi_reset_port(struct efx_nic *efx)
 {
 	int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
 	if (rc)
-		EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+			  __func__, rc);
 	return rc;
 }
 
@@ -1075,7 +1089,7 @@
 		return 0;
 	if (rc == 0)
 		rc = -EIO;
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1108,7 +1122,7 @@
 
 fail:
 	*id_out = -1;
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 
 }
@@ -1143,7 +1157,7 @@
 
 fail:
 	*id_out = -1;
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1163,7 +1177,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1179,7 +1193,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 3918263..f88f4bf 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -69,8 +69,8 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n",
-		__func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+		  __func__, rc);
 	return rc;
 }
 
@@ -110,8 +110,8 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: %s failed rc=%d\n",
-		__func__, enable ? "enable" : "disable", rc);
+	netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
+		  __func__, enable ? "enable" : "disable", rc);
 	return rc;
 }
 
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 6032c0e..0121e71 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -20,7 +20,7 @@
 #include "nic.h"
 #include "selftest.h"
 
-struct efx_mcdi_phy_cfg {
+struct efx_mcdi_phy_data {
 	u32 flags;
 	u32 type;
 	u32 supported_cap;
@@ -35,7 +35,7 @@
 };
 
 static int
-efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
+efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
 {
 	u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
 	size_t outlen;
@@ -71,7 +71,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -97,7 +97,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -122,7 +122,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -150,7 +150,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -178,7 +178,7 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -259,7 +259,7 @@
 
 static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	enum efx_phy_mode mode, supported;
 	u32 flags;
 
@@ -307,7 +307,7 @@
 
 static int efx_mcdi_phy_probe(struct efx_nic *efx)
 {
-	struct efx_mcdi_phy_cfg *phy_data;
+	struct efx_mcdi_phy_data *phy_data;
 	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
 	u32 caps;
 	int rc;
@@ -395,6 +395,7 @@
 	efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
 	if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
 		efx->wanted_fc |= EFX_FC_AUTO;
+	efx_link_set_wanted_fc(efx, efx->wanted_fc);
 
 	return 0;
 
@@ -405,7 +406,7 @@
 
 int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 caps = (efx->link_advertising ?
 		    ethtool_to_mcdi_cap(efx->link_advertising) :
 		    phy_cfg->forced_cap);
@@ -446,7 +447,7 @@
  */
 void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 rmtadv;
 
 	/* The link partner capabilities are only relevent if the
@@ -465,8 +466,8 @@
 		rmtadv |=  ADVERTISED_Asym_Pause;
 
 	if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
-		EFX_ERR(efx, "warning: link partner doesn't support "
-			"pause frames");
+		netif_err(efx, link, efx->net_dev,
+			  "warning: link partner doesn't support pause frames");
 }
 
 static bool efx_mcdi_phy_poll(struct efx_nic *efx)
@@ -482,7 +483,8 @@
 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
 			  outbuf, sizeof(outbuf), NULL);
 	if (rc) {
-		EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+			  __func__, rc);
 		efx->link_state.up = false;
 	} else {
 		efx_mcdi_phy_decode_link(
@@ -505,7 +507,7 @@
 
 static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
 	int rc;
 
@@ -525,7 +527,8 @@
 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
 			  outbuf, sizeof(outbuf), NULL);
 	if (rc) {
-		EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+			  __func__, rc);
 		return;
 	}
 	ecmd->lp_advertising =
@@ -535,7 +538,7 @@
 
 static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 caps;
 	int rc;
 
@@ -674,7 +677,7 @@
 static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
 				  unsigned flags)
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 mode;
 	int rc;
 
@@ -712,7 +715,7 @@
 
 const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 
 	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
 		if (index == 0)
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 0548fcb..eeaf0bd 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -63,7 +63,8 @@
 		/* Read MMD STATUS2 to check it is responding. */
 		status = efx_mdio_read(efx, mmd, MDIO_STAT2);
 		if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
-			EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd);
+			netif_err(efx, hw, efx->net_dev,
+				  "PHY MMD %d not responding.\n", mmd);
 			return -EIO;
 		}
 	}
@@ -72,12 +73,14 @@
 	status = efx_mdio_read(efx, mmd, MDIO_STAT1);
 	if (status & MDIO_STAT1_FAULT) {
 		if (fault_fatal) {
-			EFX_ERR(efx, "PHY MMD %d reporting fatal"
-				" fault: status %x\n", mmd, status);
+			netif_err(efx, hw, efx->net_dev,
+				  "PHY MMD %d reporting fatal"
+				  " fault: status %x\n", mmd, status);
 			return -EIO;
 		} else {
-			EFX_LOG(efx, "PHY MMD %d reporting status"
-				" %x (expected)\n", mmd, status);
+			netif_dbg(efx, hw, efx->net_dev,
+				  "PHY MMD %d reporting status"
+				  " %x (expected)\n", mmd, status);
 		}
 	}
 	return 0;
@@ -103,8 +106,9 @@
 			if (mask & 1) {
 				stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
 				if (stat < 0) {
-					EFX_ERR(efx, "failed to read status of"
-						" MMD %d\n", mmd);
+					netif_err(efx, hw, efx->net_dev,
+						  "failed to read status of"
+						  " MMD %d\n", mmd);
 					return -EIO;
 				}
 				if (stat & MDIO_CTRL1_RESET)
@@ -119,8 +123,9 @@
 		msleep(spintime);
 	}
 	if (in_reset != 0) {
-		EFX_ERR(efx, "not all MMDs came out of reset in time."
-			" MMDs still in reset: %x\n", in_reset);
+		netif_err(efx, hw, efx->net_dev,
+			  "not all MMDs came out of reset in time."
+			  " MMDs still in reset: %x\n", in_reset);
 		rc = -ETIMEDOUT;
 	}
 	return rc;
@@ -142,16 +147,18 @@
 	devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
 	devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
 	if (devs1 < 0 || devs2 < 0) {
-		EFX_ERR(efx, "failed to read devices present\n");
+		netif_err(efx, hw, efx->net_dev,
+			  "failed to read devices present\n");
 		return -EIO;
 	}
 	devices = devs1 | (devs2 << 16);
 	if ((devices & mmd_mask) != mmd_mask) {
-		EFX_ERR(efx, "required MMDs not present: got %x, "
-			"wanted %x\n", devices, mmd_mask);
+		netif_err(efx, hw, efx->net_dev,
+			  "required MMDs not present: got %x, wanted %x\n",
+			  devices, mmd_mask);
 		return -ENODEV;
 	}
-	EFX_TRACE(efx, "Devices present: %x\n", devices);
+	netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices);
 
 	/* Check all required MMDs are responding and happy. */
 	while (mmd_mask) {
@@ -219,7 +226,7 @@
 {
 	int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
 
-	EFX_TRACE(efx, "Setting low power mode for MMD %d to %d\n",
+	netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
 		  mmd, lpower);
 
 	if (stat & MDIO_STAT1_LPOWERABLE) {
@@ -349,8 +356,8 @@
 
 	if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
 	    (physid2 == 0x0000) || (physid2 == 0xffff)) {
-		EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
-			efx->mdio.prtad);
+		netif_err(efx, hw, efx->net_dev,
+			  "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
 		rc = -EINVAL;
 	} else {
 		rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f89e719..75791d3 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -51,7 +51,8 @@
 
 	sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
 	if (!sync)
-		EFX_LOG(efx, "XGXS lane status: %x\n", lane_status);
+		netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n",
+			  lane_status);
 	return sync;
 }
 
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index f3ac7f3..02e54b4 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/rtnetlink.h>
 
-#define EFX_DRIVER_NAME "sfc_mtd"
 #include "net_driver.h"
 #include "spi.h"
 #include "efx.h"
@@ -71,8 +70,10 @@
 
 /* SPI utilities */
 
-static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
+static int
+efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
 {
+	struct efx_mtd *efx_mtd = part->mtd.priv;
 	const struct efx_spi_device *spi = efx_mtd->spi;
 	struct efx_nic *efx = efx_mtd->efx;
 	u8 status;
@@ -92,7 +93,7 @@
 		if (signal_pending(current))
 			return -EINTR;
 	}
-	EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name);
+	pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
 	return -ETIMEDOUT;
 }
 
@@ -131,8 +132,10 @@
 	return 0;
 }
 
-static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
+static int
+efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
 {
+	struct efx_mtd *efx_mtd = part->mtd.priv;
 	const struct efx_spi_device *spi = efx_mtd->spi;
 	struct efx_nic *efx = efx_mtd->efx;
 	unsigned pos, block_len;
@@ -156,7 +159,7 @@
 			    NULL, 0);
 	if (rc)
 		return rc;
-	rc = efx_spi_slow_wait(efx_mtd, false);
+	rc = efx_spi_slow_wait(part, false);
 
 	/* Verify the entire region has been wiped */
 	memset(empty, 0xff, sizeof(empty));
@@ -198,13 +201,14 @@
 
 static void efx_mtd_sync(struct mtd_info *mtd)
 {
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
 	struct efx_mtd *efx_mtd = mtd->priv;
-	struct efx_nic *efx = efx_mtd->efx;
 	int rc;
 
 	rc = efx_mtd->ops->sync(mtd);
 	if (rc)
-		EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
+		pr_err("%s: %s sync failed (%d)\n",
+		       part->name, efx_mtd->name, rc);
 }
 
 static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -338,7 +342,7 @@
 	rc = mutex_lock_interruptible(&efx->spi_lock);
 	if (rc)
 		return rc;
-	rc = efx_spi_erase(efx_mtd, part->offset + start, len);
+	rc = efx_spi_erase(part, part->offset + start, len);
 	mutex_unlock(&efx->spi_lock);
 	return rc;
 }
@@ -363,12 +367,13 @@
 
 static int falcon_mtd_sync(struct mtd_info *mtd)
 {
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
 	struct efx_mtd *efx_mtd = mtd->priv;
 	struct efx_nic *efx = efx_mtd->efx;
 	int rc;
 
 	mutex_lock(&efx->spi_lock);
-	rc = efx_spi_slow_wait(efx_mtd, true);
+	rc = efx_spi_slow_wait(part, true);
 	mutex_unlock(&efx->spi_lock);
 	return rc;
 }
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 4762c91..bab836c 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -13,11 +13,16 @@
 #ifndef EFX_NET_DRIVER_H
 #define EFX_NET_DRIVER_H
 
+#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG)
+#define DEBUG
+#endif
+
 #include <linux/version.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/timer.h>
 #include <linux/mdio.h>
 #include <linux/list.h>
 #include <linux/pci.h>
@@ -34,9 +39,7 @@
  * Build definitions
  *
  **************************************************************************/
-#ifndef EFX_DRIVER_NAME
-#define EFX_DRIVER_NAME	"sfc"
-#endif
+
 #define EFX_DRIVER_VERSION	"3.0"
 
 #ifdef EFX_ENABLE_DEBUG
@@ -47,35 +50,6 @@
 #define EFX_WARN_ON_PARANOID(x) do {} while (0)
 #endif
 
-/* Un-rate-limited logging */
-#define EFX_ERR(efx, fmt, args...) \
-dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
-
-#define EFX_INFO(efx, fmt, args...) \
-dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
-
-#ifdef EFX_ENABLE_DEBUG
-#define EFX_LOG(efx, fmt, args...) \
-dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
-#else
-#define EFX_LOG(efx, fmt, args...) \
-dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
-#endif
-
-#define EFX_TRACE(efx, fmt, args...) do {} while (0)
-
-#define EFX_REGDUMP(efx, fmt, args...) do {} while (0)
-
-/* Rate-limited logging */
-#define EFX_ERR_RL(efx, fmt, args...) \
-do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0)
-
-#define EFX_INFO_RL(efx, fmt, args...) \
-do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
-
-#define EFX_LOG_RL(efx, fmt, args...) \
-do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
-
 /**************************************************************************
  *
  * Efx data structures
@@ -221,7 +195,6 @@
  *	If both this and skb are %NULL, the buffer slot is currently free.
  * @data: Pointer to ethernet header
  * @len: Buffer length, in bytes.
- * @unmap_addr: DMA address to unmap
  */
 struct efx_rx_buffer {
 	dma_addr_t dma_addr;
@@ -229,7 +202,24 @@
 	struct page *page;
 	char *data;
 	unsigned int len;
-	dma_addr_t unmap_addr;
+};
+
+/**
+ * struct efx_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @refcnt: Number of struct efx_rx_buffer's referencing this page.
+ *	When refcnt falls to zero, the page is unmapped for dma
+ * @dma_addr: The dma address of this page.
+ */
+struct efx_rx_page_state {
+	unsigned refcnt;
+	dma_addr_t dma_addr;
+
+	unsigned int __pad[0] ____cacheline_aligned;
 };
 
 /**
@@ -242,10 +232,6 @@
  * @added_count: Number of buffers added to the receive queue.
  * @notified_count: Number of buffers given to NIC (<= @added_count).
  * @removed_count: Number of buffers removed from the receive queue.
- * @add_lock: Receive queue descriptor add spin lock.
- *	This lock must be held in order to add buffers to the RX
- *	descriptor ring (rxd and buffer) and to update added_count (but
- *	not removed_count).
  * @max_fill: RX descriptor maximum fill level (<= ring size)
  * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
  *	(<= @max_fill)
@@ -259,12 +245,7 @@
  *	overflow was observed.  It should never be set.
  * @alloc_page_count: RX allocation strategy counter.
  * @alloc_skb_count: RX allocation strategy counter.
- * @work: Descriptor push work thread
- * @buf_page: Page for next RX buffer.
- *	We can use a single page for multiple RX buffers. This tracks
- *	the remaining space in the allocation.
- * @buf_dma_addr: Page's DMA address.
- * @buf_data: Page's host address.
+ * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
  * @flushed: Use when handling queue flushing
  */
 struct efx_rx_queue {
@@ -277,7 +258,6 @@
 	int added_count;
 	int notified_count;
 	int removed_count;
-	spinlock_t add_lock;
 	unsigned int max_fill;
 	unsigned int fast_fill_trigger;
 	unsigned int fast_fill_limit;
@@ -285,12 +265,9 @@
 	unsigned int min_overfill;
 	unsigned int alloc_page_count;
 	unsigned int alloc_skb_count;
-	struct delayed_work work;
+	struct timer_list slow_fill;
 	unsigned int slow_fill_count;
 
-	struct page *buf_page;
-	dma_addr_t buf_dma_addr;
-	char *buf_data;
 	enum efx_flush_state flushed;
 };
 
@@ -336,7 +313,7 @@
  * @eventq: Event queue buffer
  * @eventq_read_ptr: Event queue read pointer
  * @last_eventq_read_ptr: Last event queue read pointer value.
- * @eventq_magic: Event queue magic value for driver-generated test events
+ * @magic_count: Event queue test event count
  * @irq_count: Number of IRQs since last adaptive moderation decision
  * @irq_mod_score: IRQ moderation score
  * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -367,7 +344,7 @@
 	struct efx_special_buffer eventq;
 	unsigned int eventq_read_ptr;
 	unsigned int last_eventq_read_ptr;
-	unsigned int eventq_magic;
+	unsigned int magic_count;
 
 	unsigned int irq_count;
 	unsigned int irq_mod_score;
@@ -645,6 +622,7 @@
  * struct efx_nic - an Efx NIC
  * @name: Device name (net device name or bus id before net device registered)
  * @pci_dev: The PCI device
+ * @port_num: Index of this host port within the controller
  * @type: Controller type attributes
  * @legacy_irq: IRQ number
  * @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -658,6 +636,7 @@
  * @interrupt_mode: Interrupt mode
  * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
  * @irq_rx_moderation: IRQ moderation time for RX event queues
+ * @msg_enable: Log message enable flags
  * @state: Device state flag. Serialised by the rtnl_lock.
  * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
  * @tx_queue: TX DMA queues
@@ -669,6 +648,7 @@
  * @n_tx_channels: Number of channels used for TX
  * @rx_buffer_len: RX buffer length
  * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_indir_table: Indirection table for RSS
  * @int_error_count: Number of internal errors seen recently
  * @int_error_expire: Time at which error count will be expired
  * @irq_status: Interrupt status buffer
@@ -728,6 +708,7 @@
 struct efx_nic {
 	char name[IFNAMSIZ];
 	struct pci_dev *pci_dev;
+	unsigned port_num;
 	const struct efx_nic_type *type;
 	int legacy_irq;
 	struct workqueue_struct *workqueue;
@@ -740,6 +721,7 @@
 	enum efx_int_mode interrupt_mode;
 	bool irq_rx_adaptive;
 	unsigned int irq_rx_moderation;
+	u32 msg_enable;
 
 	enum nic_state state;
 	enum reset_type reset_pending;
@@ -754,6 +736,8 @@
 	unsigned n_tx_channels;
 	unsigned int rx_buffer_len;
 	unsigned int rx_buffer_order;
+	u8 rx_hash_key[40];
+	u32 rx_indir_table[128];
 
 	unsigned int_error_count;
 	unsigned long int_error_expire;
@@ -866,7 +850,8 @@
  * @evq_ptr_tbl_base: Event queue pointer table base address
  * @evq_rptr_tbl_base: Event queue read-pointer table base address
  * @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_padding: Padding added to each RX buffer
+ * @rx_buffer_hash_size: Size of hash at start of RX buffer
+ * @rx_buffer_padding: Size of padding at end of RX buffer
  * @max_interrupt_mode: Highest capability interrupt mode supported
  *	from &enum efx_init_mode.
  * @phys_addr_channels: Number of channels with physically addressed
@@ -910,6 +895,7 @@
 	unsigned int evq_ptr_tbl_base;
 	unsigned int evq_rptr_tbl_base;
 	u64 max_dma_mask;
+	unsigned int rx_buffer_hash_size;
 	unsigned int rx_buffer_padding;
 	unsigned int max_interrupt_mode;
 	unsigned int phys_addr_channels;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 5d3aaec..f595d92 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -79,6 +79,14 @@
 /* Depth of RX flush request fifo */
 #define EFX_RX_FLUSH_COUNT 4
 
+/* Generated event code for efx_generate_test_event() */
+#define EFX_CHANNEL_MAGIC_TEST(_channel)	\
+	(0x00010100 + (_channel)->channel)
+
+/* Generated event code for efx_generate_fill_event() */
+#define EFX_CHANNEL_MAGIC_FILL(_channel)	\
+	(0x00010200 + (_channel)->channel)
+
 /**************************************************************************
  *
  * Solarstorm hardware access
@@ -171,9 +179,10 @@
 	return 0;
 
 fail:
-	EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
-		" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
-		EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+	netif_err(efx, hw, efx->net_dev,
+		  "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+		  " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+		  EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
 	return -EIO;
 }
 
@@ -206,8 +215,9 @@
 	for (i = 0; i < buffer->entries; i++) {
 		index = buffer->index + i;
 		dma_addr = buffer->dma_addr + (i * 4096);
-		EFX_LOG(efx, "mapping special buffer %d at %llx\n",
-			index, (unsigned long long)dma_addr);
+		netif_dbg(efx, probe, efx->net_dev,
+			  "mapping special buffer %d at %llx\n",
+			  index, (unsigned long long)dma_addr);
 		EFX_POPULATE_QWORD_3(buf_desc,
 				     FRF_AZ_BUF_ADR_REGION, 0,
 				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
@@ -227,8 +237,8 @@
 	if (!buffer->entries)
 		return;
 
-	EFX_LOG(efx, "unmapping special buffers %d-%d\n",
-		buffer->index, buffer->index + buffer->entries - 1);
+	netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+		  buffer->index, buffer->index + buffer->entries - 1);
 
 	EFX_POPULATE_OWORD_4(buf_tbl_upd,
 			     FRF_AZ_BUF_UPD_CMD, 0,
@@ -268,11 +278,12 @@
 	buffer->index = efx->next_buffer_table;
 	efx->next_buffer_table += buffer->entries;
 
-	EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
-		"(virt %p phys %llx)\n", buffer->index,
-		buffer->index + buffer->entries - 1,
-		(u64)buffer->dma_addr, len,
-		buffer->addr, (u64)virt_to_phys(buffer->addr));
+	netif_dbg(efx, probe, efx->net_dev,
+		  "allocating special buffers %d-%d at %llx+%x "
+		  "(virt %p phys %llx)\n", buffer->index,
+		  buffer->index + buffer->entries - 1,
+		  (u64)buffer->dma_addr, len,
+		  buffer->addr, (u64)virt_to_phys(buffer->addr));
 
 	return 0;
 }
@@ -283,11 +294,12 @@
 	if (!buffer->addr)
 		return;
 
-	EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
-		"(virt %p phys %llx)\n", buffer->index,
-		buffer->index + buffer->entries - 1,
-		(u64)buffer->dma_addr, buffer->len,
-		buffer->addr, (u64)virt_to_phys(buffer->addr));
+	netif_dbg(efx, hw, efx->net_dev,
+		  "deallocating special buffers %d-%d at %llx+%x "
+		  "(virt %p phys %llx)\n", buffer->index,
+		  buffer->index + buffer->entries - 1,
+		  (u64)buffer->dma_addr, buffer->len,
+		  buffer->addr, (u64)virt_to_phys(buffer->addr));
 
 	pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
 			    buffer->dma_addr);
@@ -547,9 +559,10 @@
 	bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
 	bool iscsi_digest_en = is_b0;
 
-	EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
-		rx_queue->queue, rx_queue->rxd.index,
-		rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+	netif_dbg(efx, hw, efx->net_dev,
+		  "RX queue %d ring in special buffers %d-%d\n",
+		  rx_queue->queue, rx_queue->rxd.index,
+		  rx_queue->rxd.index + rx_queue->rxd.entries - 1);
 
 	rx_queue->flushed = FLUSH_NONE;
 
@@ -686,9 +699,10 @@
 		   EFX_WORKAROUND_10727(efx)) {
 		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
 	} else {
-		EFX_ERR(efx, "channel %d unexpected TX event "
-			EFX_QWORD_FMT"\n", channel->channel,
-			EFX_QWORD_VAL(*event));
+		netif_err(efx, tx_err, efx->net_dev,
+			  "channel %d unexpected TX event "
+			  EFX_QWORD_FMT"\n", channel->channel,
+			  EFX_QWORD_VAL(*event));
 	}
 
 	return tx_packets;
@@ -751,20 +765,21 @@
 	 * to a FIFO overflow.
 	 */
 #ifdef EFX_ENABLE_DEBUG
-	if (rx_ev_other_err) {
-		EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
-			    EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
-			    rx_queue->queue, EFX_QWORD_VAL(*event),
-			    rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
-			    rx_ev_ip_hdr_chksum_err ?
-			    " [IP_HDR_CHKSUM_ERR]" : "",
-			    rx_ev_tcp_udp_chksum_err ?
-			    " [TCP_UDP_CHKSUM_ERR]" : "",
-			    rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
-			    rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
-			    rx_ev_drib_nib ? " [DRIB_NIB]" : "",
-			    rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
-			    rx_ev_pause_frm ? " [PAUSE]" : "");
+	if (rx_ev_other_err && net_ratelimit()) {
+		netif_dbg(efx, rx_err, efx->net_dev,
+			  " RX queue %d unexpected RX event "
+			  EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+			  rx_queue->queue, EFX_QWORD_VAL(*event),
+			  rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+			  rx_ev_ip_hdr_chksum_err ?
+			  " [IP_HDR_CHKSUM_ERR]" : "",
+			  rx_ev_tcp_udp_chksum_err ?
+			  " [TCP_UDP_CHKSUM_ERR]" : "",
+			  rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+			  rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+			  rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+			  rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+			  rx_ev_pause_frm ? " [PAUSE]" : "");
 	}
 #endif
 }
@@ -778,8 +793,9 @@
 
 	expected = rx_queue->removed_count & EFX_RXQ_MASK;
 	dropped = (index - expected) & EFX_RXQ_MASK;
-	EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
-		dropped, index, expected);
+	netif_info(efx, rx_err, efx->net_dev,
+		   "dropped %d events (index=%d expected=%d)\n",
+		   dropped, index, expected);
 
 	efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
 			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
@@ -850,6 +866,26 @@
 		      checksummed, discard);
 }
 
+static void
+efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
+{
+	struct efx_nic *efx = channel->efx;
+	unsigned code;
+
+	code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+	if (code == EFX_CHANNEL_MAGIC_TEST(channel))
+		++channel->magic_count;
+	else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
+		/* The queue must be empty, so we won't receive any rx
+		 * events, so efx_process_channel() won't refill the
+		 * queue. Refill it here */
+		efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
+	else
+		netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+			  "generated event "EFX_QWORD_FMT"\n",
+			  channel->channel, EFX_QWORD_VAL(*event));
+}
+
 /* Global events are basically PHY events */
 static void
 efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
@@ -873,8 +909,9 @@
 	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
 	    EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
 	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
-		EFX_ERR(efx, "channel %d seen global RX_RESET "
-			"event. Resetting.\n", channel->channel);
+		netif_err(efx, rx_err, efx->net_dev,
+			  "channel %d seen global RX_RESET event. Resetting.\n",
+			  channel->channel);
 
 		atomic_inc(&efx->rx_reset);
 		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
@@ -883,9 +920,10 @@
 	}
 
 	if (!handled)
-		EFX_ERR(efx, "channel %d unknown global event "
-			EFX_QWORD_FMT "\n", channel->channel,
-			EFX_QWORD_VAL(*event));
+		netif_err(efx, hw, efx->net_dev,
+			  "channel %d unknown global event "
+			  EFX_QWORD_FMT "\n", channel->channel,
+			  EFX_QWORD_VAL(*event));
 }
 
 static void
@@ -900,31 +938,35 @@
 
 	switch (ev_sub_code) {
 	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
-		EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
-			  channel->channel, ev_sub_data);
+		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+			   channel->channel, ev_sub_data);
 		break;
 	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
-		EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
-			  channel->channel, ev_sub_data);
+		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+			   channel->channel, ev_sub_data);
 		break;
 	case FSE_AZ_EVQ_INIT_DONE_EV:
-		EFX_LOG(efx, "channel %d EVQ %d initialised\n",
-			channel->channel, ev_sub_data);
+		netif_dbg(efx, hw, efx->net_dev,
+			  "channel %d EVQ %d initialised\n",
+			  channel->channel, ev_sub_data);
 		break;
 	case FSE_AZ_SRM_UPD_DONE_EV:
-		EFX_TRACE(efx, "channel %d SRAM update done\n",
-			  channel->channel);
+		netif_vdbg(efx, hw, efx->net_dev,
+			   "channel %d SRAM update done\n", channel->channel);
 		break;
 	case FSE_AZ_WAKE_UP_EV:
-		EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
-			  channel->channel, ev_sub_data);
+		netif_vdbg(efx, hw, efx->net_dev,
+			   "channel %d RXQ %d wakeup event\n",
+			   channel->channel, ev_sub_data);
 		break;
 	case FSE_AZ_TIMER_EV:
-		EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
-			  channel->channel, ev_sub_data);
+		netif_vdbg(efx, hw, efx->net_dev,
+			   "channel %d RX queue %d timer expired\n",
+			   channel->channel, ev_sub_data);
 		break;
 	case FSE_AA_RX_RECOVER_EV:
-		EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
+		netif_err(efx, rx_err, efx->net_dev,
+			  "channel %d seen DRIVER RX_RESET event. "
 			"Resetting.\n", channel->channel);
 		atomic_inc(&efx->rx_reset);
 		efx_schedule_reset(efx,
@@ -933,19 +975,22 @@
 				   RESET_TYPE_DISABLE);
 		break;
 	case FSE_BZ_RX_DSC_ERROR_EV:
-		EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
-			" RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+		netif_err(efx, rx_err, efx->net_dev,
+			  "RX DMA Q %d reports descriptor fetch error."
+			  " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
 		efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
 		break;
 	case FSE_BZ_TX_DSC_ERROR_EV:
-		EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
-			" TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+		netif_err(efx, tx_err, efx->net_dev,
+			  "TX DMA Q %d reports descriptor fetch error."
+			  " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
 		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
 		break;
 	default:
-		EFX_TRACE(efx, "channel %d unknown driver event code %d "
-			  "data %04x\n", channel->channel, ev_sub_code,
-			  ev_sub_data);
+		netif_vdbg(efx, hw, efx->net_dev,
+			   "channel %d unknown driver event code %d "
+			   "data %04x\n", channel->channel, ev_sub_code,
+			   ev_sub_data);
 		break;
 	}
 }
@@ -968,8 +1013,9 @@
 			/* End of events */
 			break;
 
-		EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
-			  channel->channel, EFX_QWORD_VAL(event));
+		netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+			   "channel %d event is "EFX_QWORD_FMT"\n",
+			   channel->channel, EFX_QWORD_VAL(event));
 
 		/* Clear this event by marking it all ones */
 		EFX_SET_QWORD(*p_event);
@@ -993,11 +1039,7 @@
 			}
 			break;
 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
-			channel->eventq_magic = EFX_QWORD_FIELD(
-				event, FSF_AZ_DRV_GEN_EV_MAGIC);
-			EFX_LOG(channel->efx, "channel %d received generated "
-				"event "EFX_QWORD_FMT"\n", channel->channel,
-				EFX_QWORD_VAL(event));
+			efx_handle_generated_event(channel, &event);
 			break;
 		case FSE_AZ_EV_CODE_GLOBAL_EV:
 			efx_handle_global_event(channel, &event);
@@ -1009,9 +1051,10 @@
 			efx_mcdi_process_event(channel, &event);
 			break;
 		default:
-			EFX_ERR(channel->efx, "channel %d unknown event type %d"
-				" (data " EFX_QWORD_FMT ")\n", channel->channel,
-				ev_code, EFX_QWORD_VAL(event));
+			netif_err(channel->efx, hw, channel->efx->net_dev,
+				  "channel %d unknown event type %d (data "
+				  EFX_QWORD_FMT ")\n", channel->channel,
+				  ev_code, EFX_QWORD_VAL(event));
 		}
 	}
 
@@ -1036,9 +1079,10 @@
 	efx_oword_t reg;
 	struct efx_nic *efx = channel->efx;
 
-	EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
-		channel->channel, channel->eventq.index,
-		channel->eventq.index + channel->eventq.entries - 1);
+	netif_dbg(efx, hw, efx->net_dev,
+		  "channel %d event queue in special buffers %d-%d\n",
+		  channel->channel, channel->eventq.index,
+		  channel->eventq.index + channel->eventq.entries - 1);
 
 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
 		EFX_POPULATE_OWORD_3(reg,
@@ -1088,12 +1132,20 @@
 }
 
 
-/* Generates a test event on the event queue.  A subsequent call to
- * process_eventq() should pick up the event and place the value of
- * "magic" into channel->eventq_magic;
- */
-void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
+void efx_nic_generate_test_event(struct efx_channel *channel)
 {
+	unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
+	efx_qword_t test_event;
+
+	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+			     FSE_AZ_EV_CODE_DRV_GEN_EV,
+			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+	efx_generate_event(channel, &test_event);
+}
+
+void efx_nic_generate_fill_event(struct efx_channel *channel)
+{
+	unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
 	efx_qword_t test_event;
 
 	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
@@ -1208,20 +1260,19 @@
 	 * leading to a reset, or fake up success anyway */
 	efx_for_each_tx_queue(tx_queue, efx) {
 		if (tx_queue->flushed != FLUSH_DONE)
-			EFX_ERR(efx, "tx queue %d flush command timed out\n",
-				tx_queue->queue);
+			netif_err(efx, hw, efx->net_dev,
+				  "tx queue %d flush command timed out\n",
+				  tx_queue->queue);
 		tx_queue->flushed = FLUSH_DONE;
 	}
 	efx_for_each_rx_queue(rx_queue, efx) {
 		if (rx_queue->flushed != FLUSH_DONE)
-			EFX_ERR(efx, "rx queue %d flush command timed out\n",
-				rx_queue->queue);
+			netif_err(efx, hw, efx->net_dev,
+				  "rx queue %d flush command timed out\n",
+				  rx_queue->queue);
 		rx_queue->flushed = FLUSH_DONE;
 	}
 
-	if (EFX_WORKAROUND_7803(efx))
-		return 0;
-
 	return -ETIMEDOUT;
 }
 
@@ -1290,10 +1341,10 @@
 	efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
 	error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
 
-	EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
-		EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
-		EFX_OWORD_VAL(fatal_intr),
-		error ? "disabling bus mastering" : "no recognised error");
+	netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+		  EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+		  EFX_OWORD_VAL(fatal_intr),
+		  error ? "disabling bus mastering" : "no recognised error");
 
 	/* If this is a memory parity error dump which blocks are offending */
 	mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
@@ -1301,8 +1352,9 @@
 	if (mem_perr) {
 		efx_oword_t reg;
 		efx_reado(efx, &reg, FR_AZ_MEM_STAT);
-		EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
-			EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
+		netif_err(efx, hw, efx->net_dev,
+			  "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+			  EFX_OWORD_VAL(reg));
 	}
 
 	/* Disable both devices */
@@ -1319,11 +1371,13 @@
 			jiffies + EFX_INT_ERROR_EXPIRE * HZ;
 	}
 	if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
-		EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
+		netif_err(efx, hw, efx->net_dev,
+			  "SYSTEM ERROR - reset scheduled\n");
 		efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
 	} else {
-		EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
-			"NIC will be disabled\n");
+		netif_err(efx, hw, efx->net_dev,
+			  "SYSTEM ERROR - max number of errors seen."
+			  "NIC will be disabled\n");
 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
 	}
 
@@ -1386,8 +1440,9 @@
 
 	if (result == IRQ_HANDLED) {
 		efx->last_irq_cpu = raw_smp_processor_id();
-		EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
-			  irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+		netif_vdbg(efx, intr, efx->net_dev,
+			   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+			   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
 	}
 
 	return result;
@@ -1408,8 +1463,9 @@
 	int syserr;
 
 	efx->last_irq_cpu = raw_smp_processor_id();
-	EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
-		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+	netif_vdbg(efx, intr, efx->net_dev,
+		   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+		   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
 
 	/* Check to see if we have a serious error condition */
 	if (channel->channel == efx->fatal_irq_level) {
@@ -1428,22 +1484,21 @@
 /* Setup RSS indirection table.
  * This maps from the hash value of the packet to RXQ
  */
-static void efx_setup_rss_indir_table(struct efx_nic *efx)
+void efx_nic_push_rx_indir_table(struct efx_nic *efx)
 {
-	int i = 0;
-	unsigned long offset;
+	size_t i = 0;
 	efx_dword_t dword;
 
 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
 		return;
 
-	for (offset = FR_BZ_RX_INDIRECTION_TBL;
-	     offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
-	     offset += 0x10) {
+	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+	for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
 		EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
-				     i % efx->n_rx_channels);
-		efx_writed(efx, &dword, offset);
-		i++;
+				     efx->rx_indir_table[i]);
+		efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
 	}
 }
 
@@ -1465,8 +1520,9 @@
 		rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
 				 efx->name, efx);
 		if (rc) {
-			EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
-				efx->pci_dev->irq);
+			netif_err(efx, drv, efx->net_dev,
+				  "failed to hook legacy IRQ %d\n",
+				  efx->pci_dev->irq);
 			goto fail1;
 		}
 		return 0;
@@ -1478,7 +1534,8 @@
 				 IRQF_PROBE_SHARED, /* Not shared */
 				 channel->name, channel);
 		if (rc) {
-			EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
+			netif_err(efx, drv, efx->net_dev,
+				  "failed to hook IRQ %d\n", channel->irq);
 			goto fail2;
 		}
 	}
@@ -1576,7 +1633,7 @@
 	EFX_INVERT_OWORD(temp);
 	efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
 
-	efx_setup_rss_indir_table(efx);
+	efx_nic_push_rx_indir_table(efx);
 
 	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
 	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
@@ -1598,3 +1655,269 @@
 		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
 	efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
 }
+
+/* Register dump */
+
+#define REGISTER_REVISION_A	1
+#define REGISTER_REVISION_B	2
+#define REGISTER_REVISION_C	3
+#define REGISTER_REVISION_Z	3	/* latest revision */
+
+struct efx_nic_reg {
+	u32 offset:24;
+	u32 min_revision:2, max_revision:2;
+};
+
+#define REGISTER(name, min_rev, max_rev) {				\
+	FR_ ## min_rev ## max_rev ## _ ## name,				\
+	REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev	\
+}
+#define REGISTER_AA(name) REGISTER(name, A, A)
+#define REGISTER_AB(name) REGISTER(name, A, B)
+#define REGISTER_AZ(name) REGISTER(name, A, Z)
+#define REGISTER_BB(name) REGISTER(name, B, B)
+#define REGISTER_BZ(name) REGISTER(name, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, C, Z)
+
+static const struct efx_nic_reg efx_nic_regs[] = {
+	REGISTER_AZ(ADR_REGION),
+	REGISTER_AZ(INT_EN_KER),
+	REGISTER_BZ(INT_EN_CHAR),
+	REGISTER_AZ(INT_ADR_KER),
+	REGISTER_BZ(INT_ADR_CHAR),
+	/* INT_ACK_KER is WO */
+	/* INT_ISR0 is RC */
+	REGISTER_AZ(HW_INIT),
+	REGISTER_CZ(USR_EV_CFG),
+	REGISTER_AB(EE_SPI_HCMD),
+	REGISTER_AB(EE_SPI_HADR),
+	REGISTER_AB(EE_SPI_HDATA),
+	REGISTER_AB(EE_BASE_PAGE),
+	REGISTER_AB(EE_VPD_CFG0),
+	/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
+	/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
+	/* PCIE_CORE_INDIRECT is indirect */
+	REGISTER_AB(NIC_STAT),
+	REGISTER_AB(GPIO_CTL),
+	REGISTER_AB(GLB_CTL),
+	/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
+	REGISTER_BZ(DP_CTRL),
+	REGISTER_AZ(MEM_STAT),
+	REGISTER_AZ(CS_DEBUG),
+	REGISTER_AZ(ALTERA_BUILD),
+	REGISTER_AZ(CSR_SPARE),
+	REGISTER_AB(PCIE_SD_CTL0123),
+	REGISTER_AB(PCIE_SD_CTL45),
+	REGISTER_AB(PCIE_PCS_CTL_STAT),
+	/* DEBUG_DATA_OUT is not used */
+	/* DRV_EV is WO */
+	REGISTER_AZ(EVQ_CTL),
+	REGISTER_AZ(EVQ_CNT1),
+	REGISTER_AZ(EVQ_CNT2),
+	REGISTER_AZ(BUF_TBL_CFG),
+	REGISTER_AZ(SRM_RX_DC_CFG),
+	REGISTER_AZ(SRM_TX_DC_CFG),
+	REGISTER_AZ(SRM_CFG),
+	/* BUF_TBL_UPD is WO */
+	REGISTER_AZ(SRM_UPD_EVQ),
+	REGISTER_AZ(SRAM_PARITY),
+	REGISTER_AZ(RX_CFG),
+	REGISTER_BZ(RX_FILTER_CTL),
+	/* RX_FLUSH_DESCQ is WO */
+	REGISTER_AZ(RX_DC_CFG),
+	REGISTER_AZ(RX_DC_PF_WM),
+	REGISTER_BZ(RX_RSS_TKEY),
+	/* RX_NODESC_DROP is RC */
+	REGISTER_AA(RX_SELF_RST),
+	/* RX_DEBUG, RX_PUSH_DROP are not used */
+	REGISTER_CZ(RX_RSS_IPV6_REG1),
+	REGISTER_CZ(RX_RSS_IPV6_REG2),
+	REGISTER_CZ(RX_RSS_IPV6_REG3),
+	/* TX_FLUSH_DESCQ is WO */
+	REGISTER_AZ(TX_DC_CFG),
+	REGISTER_AA(TX_CHKSM_CFG),
+	REGISTER_AZ(TX_CFG),
+	/* TX_PUSH_DROP is not used */
+	REGISTER_AZ(TX_RESERVED),
+	REGISTER_BZ(TX_PACE),
+	/* TX_PACE_DROP_QID is RC */
+	REGISTER_BB(TX_VLAN),
+	REGISTER_BZ(TX_IPFIL_PORTEN),
+	REGISTER_AB(MD_TXD),
+	REGISTER_AB(MD_RXD),
+	REGISTER_AB(MD_CS),
+	REGISTER_AB(MD_PHY_ADR),
+	REGISTER_AB(MD_ID),
+	/* MD_STAT is RC */
+	REGISTER_AB(MAC_STAT_DMA),
+	REGISTER_AB(MAC_CTRL),
+	REGISTER_BB(GEN_MODE),
+	REGISTER_AB(MAC_MC_HASH_REG0),
+	REGISTER_AB(MAC_MC_HASH_REG1),
+	REGISTER_AB(GM_CFG1),
+	REGISTER_AB(GM_CFG2),
+	/* GM_IPG and GM_HD are not used */
+	REGISTER_AB(GM_MAX_FLEN),
+	/* GM_TEST is not used */
+	REGISTER_AB(GM_ADR1),
+	REGISTER_AB(GM_ADR2),
+	REGISTER_AB(GMF_CFG0),
+	REGISTER_AB(GMF_CFG1),
+	REGISTER_AB(GMF_CFG2),
+	REGISTER_AB(GMF_CFG3),
+	REGISTER_AB(GMF_CFG4),
+	REGISTER_AB(GMF_CFG5),
+	REGISTER_BB(TX_SRC_MAC_CTL),
+	REGISTER_AB(XM_ADR_LO),
+	REGISTER_AB(XM_ADR_HI),
+	REGISTER_AB(XM_GLB_CFG),
+	REGISTER_AB(XM_TX_CFG),
+	REGISTER_AB(XM_RX_CFG),
+	REGISTER_AB(XM_MGT_INT_MASK),
+	REGISTER_AB(XM_FC),
+	REGISTER_AB(XM_PAUSE_TIME),
+	REGISTER_AB(XM_TX_PARAM),
+	REGISTER_AB(XM_RX_PARAM),
+	/* XM_MGT_INT_MSK (note no 'A') is RC */
+	REGISTER_AB(XX_PWR_RST),
+	REGISTER_AB(XX_SD_CTL),
+	REGISTER_AB(XX_TXDRV_CTL),
+	/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
+	/* XX_CORE_STAT is partly RC */
+};
+
+struct efx_nic_reg_table {
+	u32 offset:24;
+	u32 min_revision:2, max_revision:2;
+	u32 step:6, rows:21;
+};
+
+#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+	offset,								\
+	REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev,	\
+	step, rows							\
+}
+#define REGISTER_TABLE(name, min_rev, max_rev) 				\
+	REGISTER_TABLE_DIMENSIONS(					\
+		name, FR_ ## min_rev ## max_rev ## _ ## name,		\
+		min_rev, max_rev,					\
+		FR_ ## min_rev ## max_rev ## _ ## name ## _STEP,	\
+		FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+#define REGISTER_TABLE_BB_CZ(name)					\
+	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B,		\
+				  FR_BZ_ ## name ## _STEP,		\
+				  FR_BB_ ## name ## _ROWS),		\
+	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z,		\
+				  FR_BZ_ ## name ## _STEP,		\
+				  FR_CZ_ ## name ## _ROWS)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+
+static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
+	/* DRIVER is not used */
+	/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
+	REGISTER_TABLE_BB(TX_IPFIL_TBL),
+	REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
+	REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
+	REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
+	REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
+	REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
+	REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
+	REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
+	/* The register buffer is allocated with slab, so we can't
+	 * reasonably read all of the buffer table (up to 8MB!).
+	 * However this driver will only use a few entries.  Reading
+	 * 1K entries allows for some expansion of queue count and
+	 * size before we need to change the version. */
+	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
+				  A, A, 8, 1024),
+	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
+				  B, Z, 8, 1024),
+	/* RX_FILTER_TBL{0,1} is huge and not used by this driver */
+	REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
+	REGISTER_TABLE_BB_CZ(TIMER_TBL),
+	REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
+	REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
+	/* TX_FILTER_TBL0 is huge and not used by this driver */
+	REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
+	REGISTER_TABLE_CZ(MC_TREG_SMEM),
+	/* MSIX_PBA_TABLE is not mapped */
+	/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+};
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx)
+{
+	const struct efx_nic_reg *reg;
+	const struct efx_nic_reg_table *table;
+	size_t len = 0;
+
+	for (reg = efx_nic_regs;
+	     reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+	     reg++)
+		if (efx->type->revision >= reg->min_revision &&
+		    efx->type->revision <= reg->max_revision)
+			len += sizeof(efx_oword_t);
+
+	for (table = efx_nic_reg_tables;
+	     table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+	     table++)
+		if (efx->type->revision >= table->min_revision &&
+		    efx->type->revision <= table->max_revision)
+			len += table->rows * min_t(size_t, table->step, 16);
+
+	return len;
+}
+
+void efx_nic_get_regs(struct efx_nic *efx, void *buf)
+{
+	const struct efx_nic_reg *reg;
+	const struct efx_nic_reg_table *table;
+
+	for (reg = efx_nic_regs;
+	     reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+	     reg++) {
+		if (efx->type->revision >= reg->min_revision &&
+		    efx->type->revision <= reg->max_revision) {
+			efx_reado(efx, (efx_oword_t *)buf, reg->offset);
+			buf += sizeof(efx_oword_t);
+		}
+	}
+
+	for (table = efx_nic_reg_tables;
+	     table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+	     table++) {
+		size_t size, i;
+
+		if (!(efx->type->revision >= table->min_revision &&
+		      efx->type->revision <= table->max_revision))
+			continue;
+
+		size = min_t(size_t, table->step, 16);
+
+		for (i = 0; i < table->rows; i++) {
+			switch (table->step) {
+			case 4: /* 32-bit register or SRAM */
+				efx_readd_table(efx, buf, table->offset, i);
+				break;
+			case 8: /* 64-bit SRAM */
+				efx_sram_readq(efx,
+					       efx->membase + table->offset,
+					       buf, i);
+				break;
+			case 16: /* 128-bit register */
+				efx_reado_table(efx, buf, table->offset, i);
+				break;
+			case 32: /* 128-bit register, interleaved */
+				efx_reado_table(efx, buf, table->offset, 2 * i);
+				break;
+			default:
+				WARN_ON(1);
+				return;
+			}
+			buf += size;
+		}
+	}
+}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index bbc2c0c..0438dc9 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -142,7 +142,6 @@
 	u32 fw_build;
 	struct efx_mcdi_iface mcdi;
 	int wol_filter_id;
-	u8 ipv6_rss_key[40];
 };
 
 extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -190,8 +189,8 @@
 /* Interrupts and test events */
 extern int efx_nic_init_interrupt(struct efx_nic *efx);
 extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_generate_test_event(struct efx_channel *channel,
-					unsigned int magic);
+extern void efx_nic_generate_test_event(struct efx_channel *channel);
+extern void efx_nic_generate_fill_event(struct efx_channel *channel);
 extern void efx_nic_generate_interrupt(struct efx_nic *efx);
 extern void efx_nic_disable_interrupts(struct efx_nic *efx);
 extern void efx_nic_fini_interrupt(struct efx_nic *efx);
@@ -208,6 +207,7 @@
 extern void falcon_setup_xaui(struct efx_nic *efx);
 extern int falcon_reset_xaui(struct efx_nic *efx);
 extern void efx_nic_init_common(struct efx_nic *efx);
+extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
 
 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
 			 unsigned int len);
@@ -222,6 +222,9 @@
 				  const struct efx_nic_register_test *regs,
 				  size_t n_regs);
 
+extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
+extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+
 /**************************************************************************
  *
  * Falcon MAC stats
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index e077bef..68813d1 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -91,9 +91,10 @@
 		if (time_after(jiffies, timeout)) {
 			/* Some cables have EEPROMs that conflict with the
 			 * PHY's on-board EEPROM so it cannot load firmware */
-			EFX_ERR(efx, "If an SFP+ direct attach cable is"
-				" connected, please check that it complies"
-				" with the SFP+ specification\n");
+			netif_err(efx, hw, efx->net_dev,
+				  "If an SFP+ direct attach cable is"
+				  " connected, please check that it complies"
+				  " with the SFP+ specification\n");
 			return -ETIMEDOUT;
 		}
 		msleep(QT2025C_HEARTB_WAIT);
@@ -145,7 +146,8 @@
 		/* Bug 17689: occasionally heartbeat starts but firmware status
 		 * code never progresses beyond 0x00.  Try again, once, after
 		 * restarting execution of the firmware image. */
-		EFX_LOG(efx, "bashing QT2025C microcontroller\n");
+		netif_dbg(efx, hw, efx->net_dev,
+			  "bashing QT2025C microcontroller\n");
 		qt2025c_restart_firmware(efx);
 		rc = qt2025c_wait_heartbeat(efx);
 		if (rc != 0)
@@ -165,11 +167,12 @@
 	for (i = 0; i < sizeof(firmware_id); i++)
 		firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
 					       PCS_FW_PRODUCT_CODE_1 + i);
-	EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
-		 (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
-		 firmware_id[3] >> 4, firmware_id[3] & 0xf,
-		 firmware_id[4], firmware_id[5],
-		 firmware_id[6], firmware_id[7], firmware_id[8]);
+	netif_info(efx, probe, efx->net_dev,
+		   "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+		   (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+		   firmware_id[3] >> 4, firmware_id[3] & 0xf,
+		   firmware_id[4], firmware_id[5],
+		   firmware_id[6], firmware_id[7], firmware_id[8]);
 	phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
 				 ((firmware_id[3] & 0x0f) << 16) |
 				 (firmware_id[4] << 8) | firmware_id[5];
@@ -198,7 +201,7 @@
 	}
 
 	if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
-		EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
+		netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
 		efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
 				  MDIO_PMA_CTRL1_LOOPBACK, true);
 		msleep(100);
@@ -231,7 +234,8 @@
 	reg = efx_mdio_read(efx, 1, 0xc319);
 	if ((reg & 0x0038) == phy_op_mode)
 		return 0;
-	EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
+	netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
+		  phy_op_mode);
 
 	/* This sequence replicates the register writes configured in the boot
 	 * EEPROM (including the differences between board revisions), except
@@ -287,8 +291,9 @@
 	/* Wait for the microcontroller to be ready again */
 	rc = qt2025c_wait_reset(efx);
 	if (rc < 0) {
-		EFX_ERR(efx, "PHY microcontroller reset during mode switch "
-				"timed out\n");
+		netif_err(efx, hw, efx->net_dev,
+			  "PHY microcontroller reset during mode switch "
+			  "timed out\n");
 		return rc;
 	}
 
@@ -324,7 +329,7 @@
 	return 0;
 
  fail:
-	EFX_ERR(efx, "PHY reset timed out\n");
+	netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n");
 	return rc;
 }
 
@@ -353,14 +358,15 @@
 
 	rc = qt202x_reset_phy(efx);
 	if (rc) {
-		EFX_ERR(efx, "PHY init failed\n");
+		netif_err(efx, probe, efx->net_dev, "PHY init failed\n");
 		return rc;
 	}
 
 	devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
-	EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
-		 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
-		 efx_mdio_id_rev(devid));
+	netif_info(efx, probe, efx->net_dev,
+		   "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
+		   devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
+		   efx_mdio_id_rev(devid));
 
 	if (efx->phy_type == PHY_TYPE_QT2025C)
 		qt2025c_firmware_id(efx);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index e308818..799c461 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -25,6 +25,9 @@
 /* Number of RX descriptors pushed at once. */
 #define EFX_RX_BATCH  8
 
+/* Maximum size of a buffer sharing a page */
+#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+
 /* Size of buffer allocated for skb header area. */
 #define EFX_SKB_HEADERS  64u
 
@@ -98,155 +101,151 @@
 	return PAGE_SIZE << efx->rx_buffer_order;
 }
 
+static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
+	return __le32_to_cpup((const __le32 *)(buf->data - 4));
+#else
+	const u8 *data = (const u8 *)(buf->data - 4);
+	return ((u32)data[0]       |
+		(u32)data[1] << 8  |
+		(u32)data[2] << 16 |
+		(u32)data[3] << 24);
+#endif
+}
 
 /**
- * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
+ * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
  *
  * @rx_queue:		Efx RX queue
- * @rx_buf:		RX buffer structure to populate
  *
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information.  Return a negative error code or 0 on success.
+ * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
+ * struct efx_rx_buffer for each one. Return a negative error code or 0
+ * on success. May fail having only inserted fewer than EFX_RX_BATCH
+ * buffers.
  */
-static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
-				  struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
 {
 	struct efx_nic *efx = rx_queue->efx;
 	struct net_device *net_dev = efx->net_dev;
+	struct efx_rx_buffer *rx_buf;
 	int skb_len = efx->rx_buffer_len;
+	unsigned index, count;
 
-	rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
-	if (unlikely(!rx_buf->skb))
-		return -ENOMEM;
+	for (count = 0; count < EFX_RX_BATCH; ++count) {
+		index = rx_queue->added_count & EFX_RXQ_MASK;
+		rx_buf = efx_rx_buffer(rx_queue, index);
 
-	/* Adjust the SKB for padding and checksum */
-	skb_reserve(rx_buf->skb, NET_IP_ALIGN);
-	rx_buf->len = skb_len - NET_IP_ALIGN;
-	rx_buf->data = (char *)rx_buf->skb->data;
-	rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+		rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
+		if (unlikely(!rx_buf->skb))
+			return -ENOMEM;
+		rx_buf->page = NULL;
 
-	rx_buf->dma_addr = pci_map_single(efx->pci_dev,
-					  rx_buf->data, rx_buf->len,
-					  PCI_DMA_FROMDEVICE);
+		/* Adjust the SKB for padding and checksum */
+		skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+		rx_buf->len = skb_len - NET_IP_ALIGN;
+		rx_buf->data = (char *)rx_buf->skb->data;
+		rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-	if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
-		dev_kfree_skb_any(rx_buf->skb);
-		rx_buf->skb = NULL;
-		return -EIO;
+		rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+						  rx_buf->data, rx_buf->len,
+						  PCI_DMA_FROMDEVICE);
+		if (unlikely(pci_dma_mapping_error(efx->pci_dev,
+						   rx_buf->dma_addr))) {
+			dev_kfree_skb_any(rx_buf->skb);
+			rx_buf->skb = NULL;
+			return -EIO;
+		}
+
+		++rx_queue->added_count;
+		++rx_queue->alloc_skb_count;
 	}
 
 	return 0;
 }
 
 /**
- * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
+ * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
  *
  * @rx_queue:		Efx RX queue
- * @rx_buf:		RX buffer structure to populate
  *
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information.  Return a negative error code or 0 on success.
+ * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
+ * and populates struct efx_rx_buffers for each one. Return a negative error
+ * code or 0 on success. If a single page can be split between two buffers,
+ * then the page will either be inserted fully, or not at at all.
  */
-static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
-				   struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
 {
 	struct efx_nic *efx = rx_queue->efx;
-	int bytes, space, offset;
+	struct efx_rx_buffer *rx_buf;
+	struct page *page;
+	void *page_addr;
+	struct efx_rx_page_state *state;
+	dma_addr_t dma_addr;
+	unsigned index, count;
 
-	bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+	/* We can split a page between two buffers */
+	BUILD_BUG_ON(EFX_RX_BATCH & 1);
 
-	/* If there is space left in the previously allocated page,
-	 * then use it. Otherwise allocate a new one */
-	rx_buf->page = rx_queue->buf_page;
-	if (rx_buf->page == NULL) {
-		dma_addr_t dma_addr;
-
-		rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
-					   efx->rx_buffer_order);
-		if (unlikely(rx_buf->page == NULL))
+	for (count = 0; count < EFX_RX_BATCH; ++count) {
+		page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+				   efx->rx_buffer_order);
+		if (unlikely(page == NULL))
 			return -ENOMEM;
-
-		dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
-					0, efx_rx_buf_size(efx),
+		dma_addr = pci_map_page(efx->pci_dev, page, 0,
+					efx_rx_buf_size(efx),
 					PCI_DMA_FROMDEVICE);
-
 		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
-			__free_pages(rx_buf->page, efx->rx_buffer_order);
-			rx_buf->page = NULL;
+			__free_pages(page, efx->rx_buffer_order);
 			return -EIO;
 		}
+		page_addr = page_address(page);
+		state = page_addr;
+		state->refcnt = 0;
+		state->dma_addr = dma_addr;
 
-		rx_queue->buf_page = rx_buf->page;
-		rx_queue->buf_dma_addr = dma_addr;
-		rx_queue->buf_data = (page_address(rx_buf->page) +
-				      EFX_PAGE_IP_ALIGN);
-	}
+		page_addr += sizeof(struct efx_rx_page_state);
+		dma_addr += sizeof(struct efx_rx_page_state);
 
-	rx_buf->len = bytes;
-	rx_buf->data = rx_queue->buf_data;
-	offset = efx_rx_buf_offset(rx_buf);
-	rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
+	split:
+		index = rx_queue->added_count & EFX_RXQ_MASK;
+		rx_buf = efx_rx_buffer(rx_queue, index);
+		rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+		rx_buf->skb = NULL;
+		rx_buf->page = page;
+		rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+		++rx_queue->added_count;
+		++rx_queue->alloc_page_count;
+		++state->refcnt;
 
-	/* Try to pack multiple buffers per page */
-	if (efx->rx_buffer_order == 0) {
-		/* The next buffer starts on the next 512 byte boundary */
-		rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
-		offset += ((bytes + 0x1ff) & ~0x1ff);
-
-		space = efx_rx_buf_size(efx) - offset;
-		if (space >= bytes) {
-			/* Refs dropped on kernel releasing each skb */
-			get_page(rx_queue->buf_page);
-			goto out;
+		if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
+			/* Use the second half of the page */
+			get_page(page);
+			dma_addr += (PAGE_SIZE >> 1);
+			page_addr += (PAGE_SIZE >> 1);
+			++count;
+			goto split;
 		}
 	}
 
-	/* This is the final RX buffer for this page, so mark it for
-	 * unmapping */
-	rx_queue->buf_page = NULL;
-	rx_buf->unmap_addr = rx_queue->buf_dma_addr;
-
- out:
 	return 0;
 }
 
-/* This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information.
- */
-static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
-			      struct efx_rx_buffer *new_rx_buf)
-{
-	int rc = 0;
-
-	if (rx_queue->channel->rx_alloc_push_pages) {
-		new_rx_buf->skb = NULL;
-		rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
-		rx_queue->alloc_page_count++;
-	} else {
-		new_rx_buf->page = NULL;
-		rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
-		rx_queue->alloc_skb_count++;
-	}
-
-	if (unlikely(rc < 0))
-		EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
-			   rx_queue->queue, rc);
-	return rc;
-}
-
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
 				struct efx_rx_buffer *rx_buf)
 {
 	if (rx_buf->page) {
+		struct efx_rx_page_state *state;
+
 		EFX_BUG_ON_PARANOID(rx_buf->skb);
-		if (rx_buf->unmap_addr) {
-			pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
+
+		state = page_address(rx_buf->page);
+		if (--state->refcnt == 0) {
+			pci_unmap_page(efx->pci_dev,
+				       state->dma_addr,
 				       efx_rx_buf_size(efx),
 				       PCI_DMA_FROMDEVICE);
-			rx_buf->unmap_addr = 0;
 		}
 	} else if (likely(rx_buf->skb)) {
 		pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
@@ -273,31 +272,84 @@
 	efx_free_rx_buffer(rx_queue->efx, rx_buf);
 }
 
+/* Attempt to resurrect the other receive buffer that used to share this page,
+ * which had previously been passed up to the kernel and freed. */
+static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
+				    struct efx_rx_buffer *rx_buf)
+{
+	struct efx_rx_page_state *state = page_address(rx_buf->page);
+	struct efx_rx_buffer *new_buf;
+	unsigned fill_level, index;
+
+	/* +1 because efx_rx_packet() incremented removed_count. +1 because
+	 * we'd like to insert an additional descriptor whilst leaving
+	 * EFX_RXD_HEAD_ROOM for the non-recycle path */
+	fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
+	if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
+		/* We could place "state" on a list, and drain the list in
+		 * efx_fast_push_rx_descriptors(). For now, this will do. */
+		return;
+	}
+
+	++state->refcnt;
+	get_page(rx_buf->page);
+
+	index = rx_queue->added_count & EFX_RXQ_MASK;
+	new_buf = efx_rx_buffer(rx_queue, index);
+	new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
+	new_buf->skb = NULL;
+	new_buf->page = rx_buf->page;
+	new_buf->data = (void *)
+		((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+	new_buf->len = rx_buf->len;
+	++rx_queue->added_count;
+}
+
+/* Recycle the given rx buffer directly back into the rx_queue. There is
+ * always room to add this buffer, because we've just popped a buffer. */
+static void efx_recycle_rx_buffer(struct efx_channel *channel,
+				  struct efx_rx_buffer *rx_buf)
+{
+	struct efx_nic *efx = channel->efx;
+	struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
+	struct efx_rx_buffer *new_buf;
+	unsigned index;
+
+	if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+	    page_count(rx_buf->page) == 1)
+		efx_resurrect_rx_buffer(rx_queue, rx_buf);
+
+	index = rx_queue->added_count & EFX_RXQ_MASK;
+	new_buf = efx_rx_buffer(rx_queue, index);
+
+	memcpy(new_buf, rx_buf, sizeof(*new_buf));
+	rx_buf->page = NULL;
+	rx_buf->skb = NULL;
+	++rx_queue->added_count;
+}
+
 /**
  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
  * @rx_queue:		RX descriptor queue
- * @retry:              Recheck the fill level
  * This will aim to fill the RX descriptor queue up to
  * @rx_queue->@fast_fill_limit. If there is insufficient atomic
- * memory to do so, the caller should retry.
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
  */
-static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
-					  int retry)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
 {
-	struct efx_rx_buffer *rx_buf;
-	unsigned fill_level, index;
-	int i, space, rc = 0;
+	struct efx_channel *channel = rx_queue->channel;
+	unsigned fill_level;
+	int space, rc = 0;
 
-	/* Calculate current fill level.  Do this outside the lock,
-	 * because most of the time we'll end up not wanting to do the
-	 * fill anyway.
-	 */
+	/* Calculate current fill level, and exit if we don't need to fill */
 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
 	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
-
-	/* Don't fill if we don't need to */
 	if (fill_level >= rx_queue->fast_fill_trigger)
-		return 0;
+		goto out;
 
 	/* Record minimum fill level */
 	if (unlikely(fill_level < rx_queue->min_fill)) {
@@ -305,99 +357,47 @@
 			rx_queue->min_fill = fill_level;
 	}
 
-	/* Acquire RX add lock.  If this lock is contended, then a fast
-	 * fill must already be in progress (e.g. in the refill
-	 * tasklet), so we don't need to do anything
-	 */
-	if (!spin_trylock_bh(&rx_queue->add_lock))
-		return -1;
-
- retry:
-	/* Recalculate current fill level now that we have the lock */
-	fill_level = (rx_queue->added_count - rx_queue->removed_count);
-	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
 	space = rx_queue->fast_fill_limit - fill_level;
 	if (space < EFX_RX_BATCH)
-		goto out_unlock;
+		goto out;
 
-	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
-		  " level %d to level %d using %s allocation\n",
-		  rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
-		  rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
+	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+		   "RX queue %d fast-filling descriptor ring from"
+		   " level %d to level %d using %s allocation\n",
+		   rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
+		   channel->rx_alloc_push_pages ? "page" : "skb");
 
 	do {
-		for (i = 0; i < EFX_RX_BATCH; ++i) {
-			index = rx_queue->added_count & EFX_RXQ_MASK;
-			rx_buf = efx_rx_buffer(rx_queue, index);
-			rc = efx_init_rx_buffer(rx_queue, rx_buf);
-			if (unlikely(rc))
-				goto out;
-			++rx_queue->added_count;
+		if (channel->rx_alloc_push_pages)
+			rc = efx_init_rx_buffers_page(rx_queue);
+		else
+			rc = efx_init_rx_buffers_skb(rx_queue);
+		if (unlikely(rc)) {
+			/* Ensure that we don't leave the rx queue empty */
+			if (rx_queue->added_count == rx_queue->removed_count)
+				efx_schedule_slow_fill(rx_queue);
+			goto out;
 		}
 	} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
 
-	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
-		  "to level %d\n", rx_queue->queue,
-		  rx_queue->added_count - rx_queue->removed_count);
+	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+		   "RX queue %d fast-filled descriptor ring "
+		   "to level %d\n", rx_queue->queue,
+		   rx_queue->added_count - rx_queue->removed_count);
 
  out:
-	/* Send write pointer to card. */
-	efx_nic_notify_rx_desc(rx_queue);
-
-	/* If the fast fill is running inside from the refill tasklet, then
-	 * for SMP systems it may be running on a different CPU to
-	 * RX event processing, which means that the fill level may now be
-	 * out of date. */
-	if (unlikely(retry && (rc == 0)))
-		goto retry;
-
- out_unlock:
-	spin_unlock_bh(&rx_queue->add_lock);
-
-	return rc;
+	if (rx_queue->notified_count != rx_queue->added_count)
+		efx_nic_notify_rx_desc(rx_queue);
 }
 
-/**
- * efx_fast_push_rx_descriptors - push new RX descriptors quickly
- * @rx_queue:		RX descriptor queue
- *
- * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@fast_fill_limit.  If there is insufficient memory to do so,
- * it will schedule a work item to immediately continue the fast fill
- */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+void efx_rx_slow_fill(unsigned long context)
 {
-	int rc;
+	struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
+	struct efx_channel *channel = rx_queue->channel;
 
-	rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
-	if (unlikely(rc)) {
-		/* Schedule the work item to run immediately. The hope is
-		 * that work is immediately pending to free some memory
-		 * (e.g. an RX event or TX completion)
-		 */
-		efx_schedule_slow_fill(rx_queue, 0);
-	}
-}
-
-void efx_rx_work(struct work_struct *data)
-{
-	struct efx_rx_queue *rx_queue;
-	int rc;
-
-	rx_queue = container_of(data, struct efx_rx_queue, work.work);
-
-	if (unlikely(!rx_queue->channel->enabled))
-		return;
-
-	EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
-		  "%d\n", rx_queue->queue, raw_smp_processor_id());
-
+	/* Post an event to cause NAPI to run and refill the queue */
+	efx_nic_generate_fill_event(channel);
 	++rx_queue->slow_fill_count;
-	/* Push new RX descriptors, allowing at least 1 jiffy for
-	 * the kernel to free some more memory. */
-	rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
-	if (rc)
-		efx_schedule_slow_fill(rx_queue, 1);
 }
 
 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
@@ -417,10 +417,12 @@
 	*discard = true;
 
 	if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
-		EFX_ERR_RL(efx, " RX queue %d seriously overlength "
-			   "RX event (0x%x > 0x%x+0x%x). Leaking\n",
-			   rx_queue->queue, len, max_len,
-			   efx->type->rx_buffer_padding);
+		if (net_ratelimit())
+			netif_err(efx, rx_err, efx->net_dev,
+				  " RX queue %d seriously overlength "
+				  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
+				  rx_queue->queue, len, max_len,
+				  efx->type->rx_buffer_padding);
 		/* If this buffer was skb-allocated, then the meta
 		 * data at the end of the skb will be trashed. So
 		 * we have no choice but to leak the fragment.
@@ -428,8 +430,11 @@
 		*leak_packet = (rx_buf->skb != NULL);
 		efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
 	} else {
-		EFX_ERR_RL(efx, " RX queue %d overlength RX event "
-			   "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
+		if (net_ratelimit())
+			netif_err(efx, rx_err, efx->net_dev,
+				  " RX queue %d overlength RX event "
+				  "(0x%x > 0x%x)\n",
+				  rx_queue->queue, len, max_len);
 	}
 
 	rx_queue->channel->n_rx_overlength++;
@@ -449,6 +454,7 @@
 
 	/* Pass the skb/page into the LRO engine */
 	if (rx_buf->page) {
+		struct efx_nic *efx = channel->efx;
 		struct page *page = rx_buf->page;
 		struct sk_buff *skb;
 
@@ -461,6 +467,9 @@
 			return;
 		}
 
+		if (efx->net_dev->features & NETIF_F_RXHASH)
+			skb->rxhash = efx_rx_buf_hash(rx_buf);
+
 		skb_shinfo(skb)->frags[0].page = page;
 		skb_shinfo(skb)->frags[0].page_offset =
 			efx_rx_buf_offset(rx_buf);
@@ -498,6 +507,7 @@
 		   unsigned int len, bool checksummed, bool discard)
 {
 	struct efx_nic *efx = rx_queue->efx;
+	struct efx_channel *channel = rx_queue->channel;
 	struct efx_rx_buffer *rx_buf;
 	bool leak_packet = false;
 
@@ -516,21 +526,23 @@
 	efx_rx_packet__check_len(rx_queue, rx_buf, len,
 				 &discard, &leak_packet);
 
-	EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
-		  rx_queue->queue, index,
-		  (unsigned long long)rx_buf->dma_addr, len,
-		  (checksummed ? " [SUMMED]" : ""),
-		  (discard ? " [DISCARD]" : ""));
+	netif_vdbg(efx, rx_status, efx->net_dev,
+		   "RX queue %d received id %x at %llx+%x %s%s\n",
+		   rx_queue->queue, index,
+		   (unsigned long long)rx_buf->dma_addr, len,
+		   (checksummed ? " [SUMMED]" : ""),
+		   (discard ? " [DISCARD]" : ""));
 
 	/* Discard packet, if instructed to do so */
 	if (unlikely(discard)) {
 		if (unlikely(leak_packet))
-			rx_queue->channel->n_skbuff_leaks++;
+			channel->n_skbuff_leaks++;
 		else
-			/* We haven't called efx_unmap_rx_buffer yet,
-			 * so fini the entire rx_buffer here */
-			efx_fini_rx_buffer(rx_queue, rx_buf);
-		return;
+			efx_recycle_rx_buffer(channel, rx_buf);
+
+		/* Don't hold off the previous receive */
+		rx_buf = NULL;
+		goto out;
 	}
 
 	/* Release card resources - assumes all RX buffers consumed in-order
@@ -547,6 +559,7 @@
 	 * prefetched into cache.
 	 */
 	rx_buf->len = len;
+out:
 	if (rx_queue->channel->rx_pkt)
 		__efx_rx_packet(rx_queue->channel,
 				rx_queue->channel->rx_pkt,
@@ -562,6 +575,9 @@
 	struct efx_nic *efx = channel->efx;
 	struct sk_buff *skb;
 
+	rx_buf->data += efx->type->rx_buffer_hash_size;
+	rx_buf->len -= efx->type->rx_buffer_hash_size;
+
 	/* If we're in loopback test, then pass the packet directly to the
 	 * loopback layer, and free the rx_buf here
 	 */
@@ -574,8 +590,12 @@
 	if (rx_buf->skb) {
 		prefetch(skb_shinfo(rx_buf->skb));
 
+		skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
 		skb_put(rx_buf->skb, rx_buf->len);
 
+		if (efx->net_dev->features & NETIF_F_RXHASH)
+			rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
+
 		/* Move past the ethernet header. rx_buf->data still points
 		 * at the ethernet header */
 		rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
@@ -633,7 +653,8 @@
 	unsigned int rxq_size;
 	int rc;
 
-	EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
+	netif_dbg(efx, probe, efx->net_dev,
+		  "creating RX queue %d\n", rx_queue->queue);
 
 	/* Allocate RX buffers */
 	rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
@@ -653,7 +674,8 @@
 {
 	unsigned int max_fill, trigger, limit;
 
-	EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
+	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+		  "initialising RX queue %d\n", rx_queue->queue);
 
 	/* Initialise ptr fields */
 	rx_queue->added_count = 0;
@@ -680,8 +702,10 @@
 	int i;
 	struct efx_rx_buffer *rx_buf;
 
-	EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
+	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+		  "shutting down RX queue %d\n", rx_queue->queue);
 
+	del_timer_sync(&rx_queue->slow_fill);
 	efx_nic_fini_rx(rx_queue);
 
 	/* Release RX buffers NB start at index 0 not current HW ptr */
@@ -691,21 +715,12 @@
 			efx_fini_rx_buffer(rx_queue, rx_buf);
 		}
 	}
-
-	/* For a page that is part-way through splitting into RX buffers */
-	if (rx_queue->buf_page != NULL) {
-		pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
-			       efx_rx_buf_size(rx_queue->efx),
-			       PCI_DMA_FROMDEVICE);
-		__free_pages(rx_queue->buf_page,
-			     rx_queue->efx->rx_buffer_order);
-		rx_queue->buf_page = NULL;
-	}
 }
 
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
 {
-	EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
+	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+		  "destroying RX queue %d\n", rx_queue->queue);
 
 	efx_nic_remove_rx(rx_queue);
 
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 371e86c..85f015f 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -38,7 +38,7 @@
 	struct udphdr udp;
 	__be16 iteration;
 	const char msg[64];
-} __attribute__ ((packed));
+} __packed;
 
 /* Loopback test source MAC address */
 static const unsigned char payload_source[ETH_ALEN] = {
@@ -123,7 +123,7 @@
 {
 	struct efx_channel *channel;
 
-	EFX_LOG(efx, "testing interrupts\n");
+	netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
 	tests->interrupt = -1;
 
 	/* Reset interrupt flag */
@@ -142,16 +142,17 @@
 	efx_nic_generate_interrupt(efx);
 
 	/* Wait for arrival of test interrupt. */
-	EFX_LOG(efx, "waiting for test interrupt\n");
+	netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
 	schedule_timeout_uninterruptible(HZ / 10);
 	if (efx->last_irq_cpu >= 0)
 		goto success;
 
-	EFX_ERR(efx, "timed out waiting for interrupt\n");
+	netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
 	return -ETIMEDOUT;
 
  success:
-	EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx),
+	netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
+		  INT_MODE(efx),
 		efx->last_irq_cpu);
 	tests->interrupt = 1;
 	return 0;
@@ -161,23 +162,18 @@
 static int efx_test_eventq_irq(struct efx_channel *channel,
 			       struct efx_self_tests *tests)
 {
-	unsigned int magic, count;
-
-	/* Channel specific code, limited to 20 bits */
-	magic = (0x00010150 + channel->channel);
-	EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
-		channel->channel, magic);
+	struct efx_nic *efx = channel->efx;
+	unsigned int magic_count, count;
 
 	tests->eventq_dma[channel->channel] = -1;
 	tests->eventq_int[channel->channel] = -1;
 	tests->eventq_poll[channel->channel] = -1;
 
-	/* Reset flag and zero magic word */
+	magic_count = channel->magic_count;
 	channel->efx->last_irq_cpu = -1;
-	channel->eventq_magic = 0;
 	smp_wmb();
 
-	efx_nic_generate_test_event(channel, magic);
+	efx_nic_generate_test_event(channel);
 
 	/* Wait for arrival of interrupt */
 	count = 0;
@@ -187,33 +183,36 @@
 		if (channel->work_pending)
 			efx_process_channel_now(channel);
 
-		if (channel->eventq_magic == magic)
+		if (channel->magic_count != magic_count)
 			goto eventq_ok;
 	} while (++count < 2);
 
-	EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n",
-		channel->channel);
+	netif_err(efx, drv, efx->net_dev,
+		  "channel %d timed out waiting for event queue\n",
+		  channel->channel);
 
 	/* See if interrupt arrived */
 	if (channel->efx->last_irq_cpu >= 0) {
-		EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d "
-			"during event queue test\n", channel->channel,
-			raw_smp_processor_id());
+		netif_err(efx, drv, efx->net_dev,
+			  "channel %d saw interrupt on CPU%d "
+			  "during event queue test\n", channel->channel,
+			  raw_smp_processor_id());
 		tests->eventq_int[channel->channel] = 1;
 	}
 
 	/* Check to see if event was received even if interrupt wasn't */
 	efx_process_channel_now(channel);
-	if (channel->eventq_magic == magic) {
-		EFX_ERR(channel->efx, "channel %d event was generated, but "
-			"failed to trigger an interrupt\n", channel->channel);
+	if (channel->magic_count != magic_count) {
+		netif_err(efx, drv, efx->net_dev,
+			  "channel %d event was generated, but "
+			  "failed to trigger an interrupt\n", channel->channel);
 		tests->eventq_dma[channel->channel] = 1;
 	}
 
 	return -ETIMEDOUT;
  eventq_ok:
-	EFX_LOG(channel->efx, "channel %d event queue passed\n",
-		channel->channel);
+	netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
+		  channel->channel);
 	tests->eventq_dma[channel->channel] = 1;
 	tests->eventq_int[channel->channel] = 1;
 	tests->eventq_poll[channel->channel] = 1;
@@ -266,51 +265,57 @@
 
 	/* Check that header exists */
 	if (pkt_len < sizeof(received->header)) {
-		EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
-			"test\n", pkt_len, LOOPBACK_MODE(efx));
+		netif_err(efx, drv, efx->net_dev,
+			  "saw runt RX packet (length %d) in %s loopback "
+			  "test\n", pkt_len, LOOPBACK_MODE(efx));
 		goto err;
 	}
 
 	/* Check that the ethernet header exists */
 	if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
-		EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n",
-			LOOPBACK_MODE(efx));
+		netif_err(efx, drv, efx->net_dev,
+			  "saw non-loopback RX packet in %s loopback test\n",
+			  LOOPBACK_MODE(efx));
 		goto err;
 	}
 
 	/* Check packet length */
 	if (pkt_len != sizeof(*payload)) {
-		EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in "
-			"%s loopback test\n", pkt_len, (int)sizeof(*payload),
-			LOOPBACK_MODE(efx));
+		netif_err(efx, drv, efx->net_dev,
+			  "saw incorrect RX packet length %d (wanted %d) in "
+			  "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+			  LOOPBACK_MODE(efx));
 		goto err;
 	}
 
 	/* Check that IP header matches */
 	if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
-		EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n",
-			LOOPBACK_MODE(efx));
+		netif_err(efx, drv, efx->net_dev,
+			  "saw corrupted IP header in %s loopback test\n",
+			  LOOPBACK_MODE(efx));
 		goto err;
 	}
 
 	/* Check that msg and padding matches */
 	if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
-		EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n",
-			LOOPBACK_MODE(efx));
+		netif_err(efx, drv, efx->net_dev,
+			  "saw corrupted RX packet in %s loopback test\n",
+			  LOOPBACK_MODE(efx));
 		goto err;
 	}
 
 	/* Check that iteration matches */
 	if (received->iteration != payload->iteration) {
-		EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in "
-			"%s loopback test\n", ntohs(received->iteration),
-			ntohs(payload->iteration), LOOPBACK_MODE(efx));
+		netif_err(efx, drv, efx->net_dev,
+			  "saw RX packet from iteration %d (wanted %d) in "
+			  "%s loopback test\n", ntohs(received->iteration),
+			  ntohs(payload->iteration), LOOPBACK_MODE(efx));
 		goto err;
 	}
 
 	/* Increase correct RX count */
-	EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
-		  LOOPBACK_MODE(efx));
+	netif_vdbg(efx, drv, efx->net_dev,
+		   "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
 
 	atomic_inc(&state->rx_good);
 	return;
@@ -318,10 +323,10 @@
  err:
 #ifdef EFX_ENABLE_DEBUG
 	if (atomic_read(&state->rx_bad) == 0) {
-		EFX_ERR(efx, "received packet:\n");
+		netif_err(efx, drv, efx->net_dev, "received packet:\n");
 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
 			       buf_ptr, pkt_len, 0);
-		EFX_ERR(efx, "expected packet:\n");
+		netif_err(efx, drv, efx->net_dev, "expected packet:\n");
 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
 			       &state->payload, sizeof(state->payload), 0);
 	}
@@ -402,9 +407,11 @@
 			netif_tx_unlock_bh(efx->net_dev);
 
 		if (rc != NETDEV_TX_OK) {
-			EFX_ERR(efx, "TX queue %d could not transmit packet %d "
-				"of %d in %s loopback test\n", tx_queue->queue,
-				i + 1, state->packet_count, LOOPBACK_MODE(efx));
+			netif_err(efx, drv, efx->net_dev,
+				  "TX queue %d could not transmit packet %d of "
+				  "%d in %s loopback test\n", tx_queue->queue,
+				  i + 1, state->packet_count,
+				  LOOPBACK_MODE(efx));
 
 			/* Defer cleaning up the other skbs for the caller */
 			kfree_skb(skb);
@@ -460,20 +467,22 @@
 		/* Don't free the skbs; they will be picked up on TX
 		 * overflow or channel teardown.
 		 */
-		EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d "
-			"TX completion events in %s loopback test\n",
-			tx_queue->queue, tx_done, state->packet_count,
-			LOOPBACK_MODE(efx));
+		netif_err(efx, drv, efx->net_dev,
+			  "TX queue %d saw only %d out of an expected %d "
+			  "TX completion events in %s loopback test\n",
+			  tx_queue->queue, tx_done, state->packet_count,
+			  LOOPBACK_MODE(efx));
 		rc = -ETIMEDOUT;
 		/* Allow to fall through so we see the RX errors as well */
 	}
 
 	/* We may always be up to a flush away from our desired packet total */
 	if (rx_good != state->packet_count) {
-		EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d "
-			"received packets in %s loopback test\n",
-			tx_queue->queue, rx_good, state->packet_count,
-			LOOPBACK_MODE(efx));
+		netif_dbg(efx, drv, efx->net_dev,
+			  "TX queue %d saw only %d out of an expected %d "
+			  "received packets in %s loopback test\n",
+			  tx_queue->queue, rx_good, state->packet_count,
+			  LOOPBACK_MODE(efx));
 		rc = -ETIMEDOUT;
 		/* Fall through */
 	}
@@ -505,9 +514,10 @@
 			return -ENOMEM;
 		state->flush = false;
 
-		EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
-			"packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
-			state->packet_count);
+		netif_dbg(efx, drv, efx->net_dev,
+			  "TX queue %d testing %s loopback with %d packets\n",
+			  tx_queue->queue, LOOPBACK_MODE(efx),
+			  state->packet_count);
 
 		efx_iterate_state(efx);
 		begin_rc = efx_begin_loopback(tx_queue);
@@ -531,9 +541,10 @@
 		}
 	}
 
-	EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length "
-		"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
-		state->packet_count);
+	netif_dbg(efx, drv, efx->net_dev,
+		  "TX queue %d passed %s loopback test with a burst length "
+		  "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+		  state->packet_count);
 
 	return 0;
 }
@@ -545,7 +556,7 @@
 static int efx_wait_for_link(struct efx_nic *efx)
 {
 	struct efx_link_state *link_state = &efx->link_state;
-	int count;
+	int count, link_up_count = 0;
 	bool link_up;
 
 	for (count = 0; count < 40; count++) {
@@ -567,8 +578,12 @@
 			link_up = !efx->mac_op->check_fault(efx);
 		mutex_unlock(&efx->mac_lock);
 
-		if (link_up)
-			return 0;
+		if (link_up) {
+			if (++link_up_count == 2)
+				return 0;
+		} else {
+			link_up_count = 0;
+		}
 	}
 
 	return -ETIMEDOUT;
@@ -604,15 +619,17 @@
 		rc = __efx_reconfigure_port(efx);
 		mutex_unlock(&efx->mac_lock);
 		if (rc) {
-			EFX_ERR(efx, "unable to move into %s loopback\n",
-				LOOPBACK_MODE(efx));
+			netif_err(efx, drv, efx->net_dev,
+				  "unable to move into %s loopback\n",
+				  LOOPBACK_MODE(efx));
 			goto out;
 		}
 
 		rc = efx_wait_for_link(efx);
 		if (rc) {
-			EFX_ERR(efx, "loopback %s never came up\n",
-				LOOPBACK_MODE(efx));
+			netif_err(efx, drv, efx->net_dev,
+				  "loopback %s never came up\n",
+				  LOOPBACK_MODE(efx));
 			goto out;
 		}
 
@@ -720,7 +737,8 @@
 		rc_reset = rc;
 
 	if (rc_reset) {
-		EFX_ERR(efx, "Unable to recover from chip test\n");
+		netif_err(efx, drv, efx->net_dev,
+			  "Unable to recover from chip test\n");
 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
 		return rc_reset;
 	}
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index f2b1e61..3fab030 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -118,10 +118,11 @@
 				  MC_CMD_MAC_NSTATS * sizeof(u64));
 	if (rc)
 		return rc;
-	EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
-		(u64)efx->stats_buffer.dma_addr,
-		efx->stats_buffer.addr,
-		(u64)virt_to_phys(efx->stats_buffer.addr));
+	netif_dbg(efx, probe, efx->net_dev,
+		  "stats buffer at %llx (virt %p phys %llx)\n",
+		  (u64)efx->stats_buffer.dma_addr,
+		  efx->stats_buffer.addr,
+		  (u64)virt_to_phys(efx->stats_buffer.addr));
 
 	efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
 
@@ -216,7 +217,8 @@
 	efx->nic_data = nic_data;
 
 	if (efx_nic_fpga_ver(efx) != 0) {
-		EFX_ERR(efx, "Siena FPGA not supported\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "Siena FPGA not supported\n");
 		rc = -ENODEV;
 		goto fail1;
 	}
@@ -233,8 +235,8 @@
 
 	rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
 	if (rc) {
-		EFX_ERR(efx, "Failed to read MCPU firmware version - "
-			"rc %d\n", rc);
+		netif_err(efx, probe, efx->net_dev,
+			  "Failed to read MCPU firmware version - rc %d\n", rc);
 		goto fail1; /* MCPU absent? */
 	}
 
@@ -242,17 +244,19 @@
 	 * filter settings. We must do this before we reset the NIC */
 	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
 	if (rc) {
-		EFX_ERR(efx, "Unable to register driver with MCPU\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "Unable to register driver with MCPU\n");
 		goto fail2;
 	}
 	if (already_attached)
 		/* Not a fatal error */
-		EFX_ERR(efx, "Host already registered with MCPU\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "Host already registered with MCPU\n");
 
 	/* Now we can reset the NIC */
 	rc = siena_reset_hw(efx, RESET_TYPE_ALL);
 	if (rc) {
-		EFX_ERR(efx, "failed to reset NIC\n");
+		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
 		goto fail3;
 	}
 
@@ -264,24 +268,23 @@
 		goto fail4;
 	BUG_ON(efx->irq_status.dma_addr & 0x0f);
 
-	EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
-		(unsigned long long)efx->irq_status.dma_addr,
-		efx->irq_status.addr,
-		(unsigned long long)virt_to_phys(efx->irq_status.addr));
+	netif_dbg(efx, probe, efx->net_dev,
+		  "INT_KER at %llx (virt %p phys %llx)\n",
+		  (unsigned long long)efx->irq_status.dma_addr,
+		  efx->irq_status.addr,
+		  (unsigned long long)virt_to_phys(efx->irq_status.addr));
 
 	/* Read in the non-volatile configuration */
 	rc = siena_probe_nvconfig(efx);
 	if (rc == -EINVAL) {
-		EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
+		netif_err(efx, probe, efx->net_dev,
+			  "NVRAM is invalid therefore using defaults\n");
 		efx->phy_type = PHY_TYPE_NONE;
 		efx->mdio.prtad = MDIO_PRTAD_NONE;
 	} else if (rc) {
 		goto fail5;
 	}
 
-	get_random_bytes(&nic_data->ipv6_rss_key,
-			 sizeof(nic_data->ipv6_rss_key));
-
 	return 0;
 
 fail5:
@@ -301,7 +304,6 @@
  */
 static int siena_init_nic(struct efx_nic *efx)
 {
-	struct siena_nic_data *nic_data = efx->nic_data;
 	efx_oword_t temp;
 	int rc;
 
@@ -326,25 +328,36 @@
 	efx_reado(efx, &temp, FR_AZ_RX_CFG);
 	EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
 	EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
+	/* Enable hash insertion. This is broken for the 'Falcon' hash
+	 * if IPv6 hashing is also enabled, so also select Toeplitz
+	 * TCP/IPv4 and IPv4 hashes. */
+	EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+	EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
+	EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
 	efx_writeo(efx, &temp, FR_AZ_RX_CFG);
 
+	/* Set hash key for IPv4 */
+	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+	efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
 	/* Enable IPv6 RSS */
-	BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
+	BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
 		     2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
 		     FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
-	memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
+	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
 	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
-	memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
+	memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
 	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
 	EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
 			     FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
-	memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
+	memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
 	       FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
 	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
 
 	if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
 		/* No MCDI operation has been defined to set thresholds */
-		EFX_ERR(efx, "ignoring RX flow control thresholds\n");
+		netif_err(efx, hw, efx->net_dev,
+			  "ignoring RX flow control thresholds\n");
 
 	/* Enable event logging */
 	rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -565,7 +578,8 @@
 
 	return 0;
  fail:
-	EFX_ERR(efx, "%s failed: type=%d rc=%d\n", __func__, type, rc);
+	netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
+		  __func__, type, rc);
 	return rc;
 }
 
@@ -628,6 +642,7 @@
 	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
 	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
 	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+	.rx_buffer_hash_size = 0x10,
 	.rx_buffer_padding = 0,
 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
 	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
@@ -635,6 +650,7 @@
 				   * channels */
 	.tx_dc_base = 0x88000,
 	.rx_dc_base = 0x68000,
-	.offload_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM,
+	.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+			     NETIF_F_RXHASH),
 	.reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
 };
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f21efe7..6791be9 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -228,7 +228,8 @@
 		boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS,
 					  PCS_BOOT_STATUS_REG);
 		if (boot_stat >= 0) {
-			EFX_LOG(efx, "PHY boot status = %#x\n", boot_stat);
+			netif_dbg(efx, hw, efx->net_dev,
+				  "PHY boot status = %#x\n", boot_stat);
 			switch (boot_stat &
 				((1 << PCS_BOOT_FATAL_ERROR_LBN) |
 				 (3 << PCS_BOOT_PROGRESS_LBN) |
@@ -463,10 +464,11 @@
 			reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
 		} else {
 			reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
-			EFX_ERR(efx, "appears to be plugged into a port"
-				" that is not 10GBASE-T capable. The PHY"
-				" supports 10GBASE-T ONLY, so no link can"
-				" be established\n");
+			netif_err(efx, link, efx->net_dev,
+				  "appears to be plugged into a port"
+				  " that is not 10GBASE-T capable. The PHY"
+				  " supports 10GBASE-T ONLY, so no link can"
+				  " be established\n");
 		}
 		efx_mdio_write(efx, MDIO_MMD_PMAPMD,
 			       PMA_PMD_LED_OVERR_REG, reg);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 6bb12a8..c6942da 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -42,7 +42,7 @@
 		return;
 
 	spin_lock_bh(&channel->tx_stop_lock);
-	EFX_TRACE(efx, "stop TX queue\n");
+	netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
 
 	atomic_inc(&channel->tx_stop_count);
 	netif_tx_stop_queue(
@@ -64,7 +64,7 @@
 	local_bh_disable();
 	if (atomic_dec_and_lock(&channel->tx_stop_count,
 				&channel->tx_stop_lock)) {
-		EFX_TRACE(efx, "waking TX queue\n");
+		netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
 		netif_tx_wake_queue(
 			netdev_get_tx_queue(
 				efx->net_dev,
@@ -94,8 +94,9 @@
 	if (buffer->skb) {
 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
 		buffer->skb = NULL;
-		EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
-			  "complete\n", tx_queue->queue, read_ptr);
+		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+			   "TX queue %d transmission id %x complete\n",
+			   tx_queue->queue, tx_queue->read_count);
 	}
 }
 
@@ -300,9 +301,10 @@
 	return NETDEV_TX_OK;
 
  pci_err:
-	EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
-		   "fragments for DMA\n", tx_queue->queue, skb->len,
-		   skb_shinfo(skb)->nr_frags + 1);
+	netif_err(efx, tx_err, efx->net_dev,
+		  " TX queue %d could not map skb with %d bytes %d "
+		  "fragments for DMA\n", tx_queue->queue, skb->len,
+		  skb_shinfo(skb)->nr_frags + 1);
 
 	/* Mark the packet as transmitted, and free the SKB ourselves */
 	dev_kfree_skb_any(skb);
@@ -354,9 +356,9 @@
 	while (read_ptr != stop_index) {
 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
 		if (unlikely(buffer->len == 0)) {
-			EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
-				"completion id %x\n", tx_queue->queue,
-				read_ptr);
+			netif_err(efx, tx_err, efx->net_dev,
+				  "TX queue %d spurious TX completion id %x\n",
+				  tx_queue->queue, read_ptr);
 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
 			return;
 		}
@@ -431,7 +433,8 @@
 	unsigned int txq_size;
 	int i, rc;
 
-	EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
+	netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n",
+		  tx_queue->queue);
 
 	/* Allocate software ring */
 	txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
@@ -456,7 +459,8 @@
 
 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
 {
-	EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
+	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+		  "initialising TX queue %d\n", tx_queue->queue);
 
 	tx_queue->insert_count = 0;
 	tx_queue->write_count = 0;
@@ -488,7 +492,8 @@
 
 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 {
-	EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
+	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+		  "shutting down TX queue %d\n", tx_queue->queue);
 
 	/* Flush TX queue, remove descriptor ring */
 	efx_nic_fini_tx(tx_queue);
@@ -507,7 +512,8 @@
 
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
-	EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
+	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+		  "destroying TX queue %d\n", tx_queue->queue);
 	efx_nic_remove_tx(tx_queue);
 
 	kfree(tx_queue->buffer);
@@ -639,8 +645,8 @@
 
 	base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
 	if (base_kva == NULL) {
-		EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
-			" headers\n");
+		netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
+			  "Unable to allocate page for TSO headers\n");
 		return -ENOMEM;
 	}
 
@@ -1124,7 +1130,8 @@
 	return NETDEV_TX_OK;
 
  mem_err:
-	EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
+	netif_err(efx, tx_err, efx->net_dev,
+		  "Out of memory for TSO headers, or PCI mapping error\n");
 	dev_kfree_skb_any(skb);
 	goto unwind;
 
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 518f7fc..782e45a 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -54,7 +54,7 @@
 /* Increase filter depth to avoid RX_RESET */
 #define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
 /* Flushes may never complete */
-#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A
+#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
 /* Leak overlength packets rather than free */
 #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
 
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 501a55f..7ac814d 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -88,6 +88,55 @@
 	.rpadir		= 1,
 	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
 };
+#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
+#define SH_ETH_RESET_DEFAULT	1
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	u32 ioaddr = ndev->base_addr;
+
+	if (mdp->duplex) /* Full */
+		ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+	else		/* Half */
+		ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	u32 ioaddr = ndev->base_addr;
+
+	switch (mdp->speed) {
+	case 10: /* 10BASE */
+		ctrl_outl(0, ioaddr + RTRATE);
+		break;
+	case 100:/* 100BASE */
+		ctrl_outl(1, ioaddr + RTRATE);
+		break;
+	default:
+		break;
+	}
+}
+
+/* SH7757 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+	.set_duplex		= sh_eth_set_duplex,
+	.set_rate		= sh_eth_set_rate,
+
+	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+	.rmcr_value	= 0x00000001,
+
+	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+	.apr		= 1,
+	.mpr		= 1,
+	.tpauser	= 1,
+	.hw_swap	= 1,
+	.no_ade		= 1,
+};
 
 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
 #define SH_ETH_HAS_TSU	1
@@ -1023,7 +1072,9 @@
 	pm_runtime_get_sync(&mdp->pdev->dev);
 
 	ret = request_irq(ndev->irq, sh_eth_interrupt,
-#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764)
+#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+    defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+    defined(CONFIG_CPU_SUBTYPE_SH7757)
 				IRQF_SHARED,
 #else
 				0,
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7985165..c762c6a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4188,17 +4188,13 @@
 static int sky2_set_flags(struct net_device *dev, u32 data)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
+	u32 supported =
+		(sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH;
+	int rc;
 
-	if (data & ~ETH_FLAG_RXHASH)
-		return -EOPNOTSUPP;
-
-	if (data & ETH_FLAG_RXHASH) {
-		if (sky2->hw->flags & SKY2_HW_RSS_BROKEN)
-			return -EINVAL;
-
-		dev->features |= NETIF_F_RXHASH;
-	} else
-		dev->features &= ~NETIF_F_RXHASH;
+	rc = ethtool_op_set_flags(dev, data, supported);
+	if (rc)
+		return rc;
 
 	rx_set_rss(dev);
 
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 084eff2..61891a6 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2161,21 +2161,21 @@
 	__le16	length;	/* also vlan tag or checksum start */
 	u8	ctrl;
 	u8	opcode;
-} __attribute((packed));
+} __packed;
 
 struct sky2_rx_le {
 	__le32	addr;
 	__le16	length;
 	u8	ctrl;
 	u8	opcode;
-} __attribute((packed));
+} __packed;
 
 struct sky2_status_le {
 	__le32	status;	/* also checksum */
 	__le16	length;	/* also vlan tag */
 	u8	css;
 	u8	opcode;
-} __attribute((packed));
+} __packed;
 
 struct tx_ring_info {
 	struct sk_buff	*skb;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 74b7ae7..a42b687 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -562,7 +562,6 @@
 	unsigned int tx_done;
 	struct napi_struct napi;
 	struct net_device *dev;
-	struct net_device_stats stats;
 	struct pci_dev *pci_dev;
 #ifdef VLAN_SUPPORT
 	struct vlan_group *vlgrp;
@@ -1174,7 +1173,7 @@
 	/* Trigger an immediate transmit demand. */
 
 	dev->trans_start = jiffies; /* prevent tx timeout */
-	np->stats.tx_errors++;
+	dev->stats.tx_errors++;
 	netif_wake_queue(dev);
 }
 
@@ -1265,7 +1264,7 @@
 			}
 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
 				status |= TxCalTCP;
-				np->stats.tx_compressed++;
+				dev->stats.tx_compressed++;
 			}
 			status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
 
@@ -1374,7 +1373,7 @@
 				printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
 				       dev->name, np->dirty_tx, np->tx_done, tx_status);
 			if ((tx_status & 0xe0000000) == 0xa0000000) {
-				np->stats.tx_packets++;
+				dev->stats.tx_packets++;
 			} else if ((tx_status & 0xe0000000) == 0x80000000) {
 				u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
 				struct sk_buff *skb = np->tx_info[entry].skb;
@@ -1462,9 +1461,9 @@
 			/* There was an error. */
 			if (debug > 2)
 				printk(KERN_DEBUG "  netdev_rx() Rx error was %#8.8x.\n", desc_status);
-			np->stats.rx_errors++;
+			dev->stats.rx_errors++;
 			if (desc_status & RxFIFOErr)
-				np->stats.rx_fifo_errors++;
+				dev->stats.rx_fifo_errors++;
 			goto next_rx;
 		}
 
@@ -1515,7 +1514,7 @@
 #endif
 		if (le16_to_cpu(desc->status2) & 0x0100) {
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
-			np->stats.rx_compressed++;
+			dev->stats.rx_compressed++;
 		}
 		/*
 		 * This feature doesn't seem to be working, at least
@@ -1547,7 +1546,7 @@
 		} else
 #endif /* VLAN_SUPPORT */
 			netif_receive_skb(skb);
-		np->stats.rx_packets++;
+		dev->stats.rx_packets++;
 
 	next_rx:
 		np->cur_rx++;
@@ -1717,12 +1716,12 @@
 			printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
 	}
 	if (intr_status & IntrRxGFPDead) {
-		np->stats.rx_fifo_errors++;
-		np->stats.rx_errors++;
+		dev->stats.rx_fifo_errors++;
+		dev->stats.rx_errors++;
 	}
 	if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
-		np->stats.tx_fifo_errors++;
-		np->stats.tx_errors++;
+		dev->stats.tx_fifo_errors++;
+		dev->stats.tx_errors++;
 	}
 	if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
 		printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
@@ -1736,24 +1735,24 @@
 	void __iomem *ioaddr = np->base;
 
 	/* This adapter architecture needs no SMP locks. */
-	np->stats.tx_bytes = readl(ioaddr + 0x57010);
-	np->stats.rx_bytes = readl(ioaddr + 0x57044);
-	np->stats.tx_packets = readl(ioaddr + 0x57000);
-	np->stats.tx_aborted_errors =
+	dev->stats.tx_bytes = readl(ioaddr + 0x57010);
+	dev->stats.rx_bytes = readl(ioaddr + 0x57044);
+	dev->stats.tx_packets = readl(ioaddr + 0x57000);
+	dev->stats.tx_aborted_errors =
 		readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
-	np->stats.tx_window_errors = readl(ioaddr + 0x57018);
-	np->stats.collisions =
+	dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
+	dev->stats.collisions =
 		readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
 
 	/* The chip only need report frame silently dropped. */
-	np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
+	dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
 	writew(0, ioaddr + RxDMAStatus);
-	np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
-	np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
-	np->stats.rx_length_errors = readl(ioaddr + 0x57058);
-	np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
+	dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
+	dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
+	dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
+	dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
 
-	return &np->stats;
+	return &dev->stats;
 }
 
 
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 1513123..b6ae53b 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -142,7 +142,6 @@
 
 struct priv
 {
-	struct net_device_stats stats;
 	unsigned long base;
 	char *memtop;
 	long int lock;
@@ -788,10 +787,10 @@
 						skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
 						skb->protocol=eth_type_trans(skb,dev);
 						netif_rx(skb);
-						p->stats.rx_packets++;
+						dev->stats.rx_packets++;
 					}
 					else
-						p->stats.rx_dropped++;
+						dev->stats.rx_dropped++;
 				}
 				else
 				{
@@ -812,13 +811,13 @@
 					totlen += rstat & RBD_MASK;
 					rbd->status = 0;
 					printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
-					p->stats.rx_dropped++;
+					dev->stats.rx_dropped++;
 			 }
 		}
 		else /* frame !(ok), only with 'save-bad-frames' */
 		{
 			printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
-			p->stats.rx_errors++;
+			dev->stats.rx_errors++;
 		}
 		p->rfd_top->stat_high = 0;
 		p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
@@ -885,7 +884,7 @@
 {
 	struct priv *p = netdev_priv(dev);
 
-	p->stats.rx_errors++;
+	dev->stats.rx_errors++;
 
 	WAIT_4_SCB_CMD();		/* wait for the last cmd, WAIT_4_FULLSTAT?? */
 	p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
@@ -918,29 +917,29 @@
 
 	if(status & STAT_OK)
 	{
-		p->stats.tx_packets++;
-		p->stats.collisions += (status & TCMD_MAXCOLLMASK);
+		dev->stats.tx_packets++;
+		dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
 	}
 	else
 	{
-		p->stats.tx_errors++;
+		dev->stats.tx_errors++;
 		if(status & TCMD_LATECOLL) {
 			printk("%s: late collision detected.\n",dev->name);
-			p->stats.collisions++;
+			dev->stats.collisions++;
 		}
 		else if(status & TCMD_NOCARRIER) {
-			p->stats.tx_carrier_errors++;
+			dev->stats.tx_carrier_errors++;
 			printk("%s: no carrier detected.\n",dev->name);
 		}
 		else if(status & TCMD_LOSTCTS)
 			printk("%s: loss of CTS detected.\n",dev->name);
 		else if(status & TCMD_UNDERRUN) {
-			p->stats.tx_fifo_errors++;
+			dev->stats.tx_fifo_errors++;
 			printk("%s: DMA underrun detected.\n",dev->name);
 		}
 		else if(status & TCMD_MAXCOLL) {
 			printk("%s: Max. collisions exceeded.\n",dev->name);
-			p->stats.collisions += 16;
+			dev->stats.collisions += 16;
 		}
 	}
 
@@ -1129,12 +1128,12 @@
 	ovrn = swab16(p->scb->ovrn_errs);
 	p->scb->ovrn_errs = 0;
 
-	p->stats.rx_crc_errors += crc;
-	p->stats.rx_fifo_errors += ovrn;
-	p->stats.rx_frame_errors += aln;
-	p->stats.rx_dropped += rsc;
+	dev->stats.rx_crc_errors += crc;
+	dev->stats.rx_fifo_errors += ovrn;
+	dev->stats.rx_frame_errors += aln;
+	dev->stats.rx_dropped += rsc;
 
-	return &p->stats;
+	return &dev->stats;
 }
 
 /********************************************************
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index cff98d0..67e3b71 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -334,7 +334,7 @@
 	u32 va_lo;
 	u32 va_hi;
 	struct pbl pbl[0];	/* Fragments */
-} __attribute__ ((packed));
+} __packed;
 
 /* Register region size */
 #define BDX_REGS_SIZE	  0x1000
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 573054a..289cdc5 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
 #include "tg3.h"
 
 #define DRV_MODULE_NAME		"tg3"
-#define DRV_MODULE_VERSION	"3.110"
-#define DRV_MODULE_RELDATE	"April 9, 2010"
+#define DRV_MODULE_VERSION	"3.111"
+#define DRV_MODULE_RELDATE	"June 5, 2010"
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
@@ -145,8 +145,6 @@
 #define TG3_RX_JMB_BUFF_RING_SIZE \
 	(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
 
-#define TG3_RSS_MIN_NUM_MSIX_VECS	2
-
 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
  * that are at least dword aligned when used in PCIX mode.  The driver
  * works around this bug by double copying the packet.  This workaround
@@ -272,6 +270,7 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -585,18 +584,23 @@
 static void tg3_ape_lock_init(struct tg3 *tp)
 {
 	int i;
+	u32 regbase;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+		regbase = TG3_APE_LOCK_GRANT;
+	else
+		regbase = TG3_APE_PER_LOCK_GRANT;
 
 	/* Make sure the driver hasn't any stale locks. */
 	for (i = 0; i < 8; i++)
-		tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
-				APE_LOCK_GRANT_DRIVER);
+		tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
 }
 
 static int tg3_ape_lock(struct tg3 *tp, int locknum)
 {
 	int i, off;
 	int ret = 0;
-	u32 status;
+	u32 status, req, gnt;
 
 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
 		return 0;
@@ -609,13 +613,21 @@
 		return -EINVAL;
 	}
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+		req = TG3_APE_LOCK_REQ;
+		gnt = TG3_APE_LOCK_GRANT;
+	} else {
+		req = TG3_APE_PER_LOCK_REQ;
+		gnt = TG3_APE_PER_LOCK_GRANT;
+	}
+
 	off = 4 * locknum;
 
-	tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
+	tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
 
 	/* Wait for up to 1 millisecond to acquire lock. */
 	for (i = 0; i < 100; i++) {
-		status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
+		status = tg3_ape_read32(tp, gnt + off);
 		if (status == APE_LOCK_GRANT_DRIVER)
 			break;
 		udelay(10);
@@ -623,7 +635,7 @@
 
 	if (status != APE_LOCK_GRANT_DRIVER) {
 		/* Revoke the lock request. */
-		tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
+		tg3_ape_write32(tp, gnt + off,
 				APE_LOCK_GRANT_DRIVER);
 
 		ret = -EBUSY;
@@ -634,7 +646,7 @@
 
 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
 {
-	int off;
+	u32 gnt;
 
 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
 		return;
@@ -647,8 +659,12 @@
 		return;
 	}
 
-	off = 4 * locknum;
-	tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+		gnt = TG3_APE_LOCK_GRANT;
+	else
+		gnt = TG3_APE_PER_LOCK_GRANT;
+
+	tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
 }
 
 static void tg3_disable_ints(struct tg3 *tp)
@@ -1069,14 +1085,11 @@
 	u32 reg;
 	struct phy_device *phydev;
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
-		u32 funcnum, is_serdes;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+		u32 is_serdes;
 
-		funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
-		if (funcnum)
-			tp->phy_addr = 2;
-		else
-			tp->phy_addr = 1;
+		tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
 
 		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
@@ -1589,7 +1602,8 @@
 	u32 reg;
 
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
-		(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+	    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
 	     (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
 		return;
 
@@ -1964,7 +1978,8 @@
 		}
 	}
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
 	    (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
 		return 0;
 
@@ -2049,6 +2064,7 @@
 
 	/* The GPIOs do something completely different on 57765. */
 	if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		return;
 
@@ -4191,6 +4207,8 @@
 					current_duplex = DUPLEX_FULL;
 				else
 					current_duplex = DUPLEX_HALF;
+			} else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+				/* Link is up via parallel detect */
 			} else {
 				current_link_up = 0;
 			}
@@ -6212,6 +6230,8 @@
 	for (j = 0; j < tp->irq_cnt; j++) {
 		struct tg3_napi *tnapi = &tp->napi[j];
 
+		tg3_rx_prodring_free(tp, &tp->prodring[j]);
+
 		if (!tnapi->tx_buffers)
 			continue;
 
@@ -6247,8 +6267,6 @@
 
 			dev_kfree_skb_any(skb);
 		}
-
-		tg3_rx_prodring_free(tp, &tp->prodring[j]);
 	}
 }
 
@@ -6782,7 +6800,8 @@
 	/* Allow reads and writes to the APE register and memory space. */
 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
-		       PCISTATE_ALLOW_APE_SHMEM_WR;
+		       PCISTATE_ALLOW_APE_SHMEM_WR |
+		       PCISTATE_ALLOW_APE_PSPACE_WR;
 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
 
 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
@@ -7069,6 +7088,7 @@
 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
 		val = tr32(0x7c00);
 
@@ -7504,7 +7524,8 @@
 
 
 	/* Disable all receive return rings but the first. */
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
 	else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
@@ -7720,7 +7741,8 @@
 		 */
 		val = tr32(TG3PCI_PCISTATE);
 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
-		       PCISTATE_ALLOW_APE_SHMEM_WR;
+		       PCISTATE_ALLOW_APE_SHMEM_WR |
+		       PCISTATE_ALLOW_APE_PSPACE_WR;
 		tw32(TG3PCI_PCISTATE, val);
 	}
 
@@ -7741,6 +7763,7 @@
 		return err;
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
 		val = tr32(TG3PCI_DMA_RW_CTRL) &
 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
@@ -7869,7 +7892,8 @@
 	     ((u64) tpr->rx_std_mapping >> 32));
 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
 		     NIC_SRAM_RX_BUFFER_DESC);
 
@@ -7894,7 +7918,8 @@
 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 			     (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
 			     BDINFO_FLAGS_USE_EXT_RECV);
-			if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+			if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
+			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
 		} else {
@@ -7903,6 +7928,7 @@
 		}
 
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 			val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
 			      (TG3_RX_STD_DMA_SZ << 2);
@@ -7921,6 +7947,7 @@
 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
 		tw32(STD_REPLENISH_LWM, 32);
 		tw32(JMB_REPLENISH_LWM, 16);
@@ -7956,7 +7983,8 @@
 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
 		      RDMAC_MODE_LNGREAD_ENAB);
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8195,6 +8223,9 @@
 	}
 
 	tp->tx_mode = TX_MODE_ENABLE;
+	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
 	tw32_f(MAC_TX_MODE, tp->tx_mode);
 	udelay(100);
 
@@ -8206,7 +8237,7 @@
 		for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
 			int idx = i % sizeof(val);
 
-			ent[idx] = i % (tp->irq_cnt - 1);
+			ent[idx] = (i % (tp->irq_cnt - 1)) + 1;
 			if (idx == sizeof(val) - 1) {
 				tw32(reg, val);
 				reg += 4;
@@ -8511,8 +8542,10 @@
 				}
 				tg3_setup_phy(tp, 0);
 			}
-		} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+		} else if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
+			   !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 			tg3_serdes_parallel_detect(tp);
+		}
 
 		tp->timer_counter = tp->timer_multiplier;
 	}
@@ -8606,6 +8639,7 @@
 	 * observable way to know whether the interrupt was delivered.
 	 */
 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
 	    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
@@ -8650,6 +8684,7 @@
 	if (intr_ok) {
 		/* Reenable MSI one shot mode. */
 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
 		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
@@ -8775,9 +8810,9 @@
 	}
 
 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
-	if (rc != 0) {
-		if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
-			return false;
+	if (rc < 0) {
+		return false;
+	} else if (rc != 0) {
 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
 			return false;
 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
@@ -8785,16 +8820,19 @@
 		tp->irq_cnt = rc;
 	}
 
-	tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
-
 	for (i = 0; i < tp->irq_max; i++)
 		tp->napi[i].irq_vec = msix_ent[i].vector;
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
-		tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
-		tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
-	} else
-		tp->dev->real_num_tx_queues = 1;
+	tp->dev->real_num_tx_queues = 1;
+	if (tp->irq_cnt > 1) {
+		tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+			tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
+			tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
+		}
+	}
 
 	return true;
 }
@@ -8943,6 +8981,7 @@
 		}
 
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
 		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
 		    (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
@@ -10554,7 +10593,8 @@
 	int err = 0;
 	int i;
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 		mem_tbl = mem_tbl_5717;
 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		mem_tbl = mem_tbl_57765;
@@ -11634,7 +11674,8 @@
 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 			tg3_get_57780_nvram_info(tp);
-		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 			tg3_get_5717_nvram_info(tp);
 		else
 			tg3_get_nvram_info(tp);
@@ -12070,11 +12111,10 @@
 
 		tp->phy_id = eeprom_phy_id;
 		if (eeprom_phy_serdes) {
-			if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
-			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
-				tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
-			else
+			if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+			else
+				tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
 		}
 
 		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
@@ -12804,7 +12844,8 @@
 
 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
-		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
 			pci_read_config_dword(tp->pdev,
 					      TG3PCI_GEN2_PRODID_ASICREV,
 					      &prod_id_asic_rev);
@@ -12970,6 +13011,7 @@
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
 
@@ -12999,6 +13041,7 @@
 
 	/* Determine TSO capabilities */
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
 	else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
@@ -13036,6 +13079,7 @@
 		}
 
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
 			tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
 			tp->irq_max = TG3_IRQ_MAX_VECS;
@@ -13043,6 +13087,7 @@
 	}
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 		tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
 	else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
@@ -13051,6 +13096,7 @@
 	}
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
 
@@ -13242,7 +13288,8 @@
 		 * APE register and memory space.
 		 */
 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
-				 PCISTATE_ALLOW_APE_SHMEM_WR;
+				 PCISTATE_ALLOW_APE_SHMEM_WR |
+				 PCISTATE_ALLOW_APE_PSPACE_WR;
 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
 				       pci_state_reg);
 	}
@@ -13252,6 +13299,7 @@
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
 
@@ -13332,6 +13380,7 @@
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
@@ -13580,9 +13629,12 @@
 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
 		else
 			tg3_nvram_unlock(tp);
-	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
-		if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+		if (PCI_FUNC(tp->pdev->devfn) & 1)
 			mac_offset = 0xcc;
+		if (PCI_FUNC(tp->pdev->devfn) > 1)
+			mac_offset += 0x18c;
 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 		mac_offset = 0x10;
 
@@ -13668,6 +13720,7 @@
 #endif
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
 		goto out;
@@ -13880,6 +13933,7 @@
 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 		goto out;
 
@@ -14079,6 +14133,7 @@
 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
 {
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
 		tp->bufmgr_config.mbuf_read_dma_low_water =
 			DEFAULT_MB_RDMA_LOW_WATER_5705;
@@ -14156,6 +14211,7 @@
 	case TG3_PHY_ID_BCM5718C:	return "5718C";
 	case TG3_PHY_ID_BCM5718S:	return "5718S";
 	case TG3_PHY_ID_BCM57765:	return "57765";
+	case TG3_PHY_ID_BCM5719C:	return "5719C";
 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
 	case 0:			return "serdes";
 	default:		return "unknown";
@@ -14404,7 +14460,8 @@
 	}
 
 	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
-	    tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+	    tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
 		dev->netdev_ops = &tg3_netdev_ops;
 	else
 		dev->netdev_ops = &tg3_netdev_ops_dma_bug;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ce9c491..6b6af76 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -53,6 +53,7 @@
 #define  TG3PCI_DEVICE_TIGON3_57765	 0x16b4
 #define  TG3PCI_DEVICE_TIGON3_57791	 0x16b2
 #define  TG3PCI_DEVICE_TIGON3_57795	 0x16b6
+#define  TG3PCI_DEVICE_TIGON3_5719	 0x1657
 /* 0x04 --> 0x2c unused */
 #define TG3PCI_SUBVENDOR_ID_BROADCOM		PCI_VENDOR_ID_BROADCOM
 #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6	0x1644
@@ -160,6 +161,7 @@
 #define   ASIC_REV_57780		 0x57780
 #define   ASIC_REV_5717			 0x5717
 #define   ASIC_REV_57765		 0x57785
+#define   ASIC_REV_5719			 0x5719
 #define  GET_CHIP_REV(CHIP_REV_ID)	((CHIP_REV_ID) >> 8)
 #define   CHIPREV_5700_AX		 0x70
 #define   CHIPREV_5700_BX		 0x71
@@ -231,6 +233,7 @@
 #define  PCISTATE_RETRY_SAME_DMA	 0x00002000
 #define  PCISTATE_ALLOW_APE_CTLSPC_WR	 0x00010000
 #define  PCISTATE_ALLOW_APE_SHMEM_WR	 0x00020000
+#define  PCISTATE_ALLOW_APE_PSPACE_WR	 0x00040000
 #define TG3PCI_CLOCK_CTRL		0x00000074
 #define  CLOCK_CTRL_CORECLK_DISABLE	 0x00000200
 #define  CLOCK_CTRL_RXCLK_DISABLE	 0x00000400
@@ -468,6 +471,7 @@
 #define  TX_MODE_FLOW_CTRL_ENABLE	 0x00000010
 #define  TX_MODE_BIG_BCKOFF_ENABLE	 0x00000020
 #define  TX_MODE_LONG_PAUSE_ENABLE	 0x00000040
+#define  TX_MODE_MBUF_LOCKUP_FIX	 0x00000100
 #define MAC_TX_STATUS			0x00000460
 #define  TX_STATUS_XOFFED		 0x00000001
 #define  TX_STATUS_SENT_XOFF		 0x00000002
@@ -1071,10 +1075,8 @@
 #define TG3_CPMU_HST_ACC		0x0000361c
 #define  CPMU_HST_ACC_MACCLK_MASK	 0x001f0000
 #define  CPMU_HST_ACC_MACCLK_6_25	 0x00130000
-/* 0x3620 --> 0x362c unused */
+/* 0x3620 --> 0x3630 unused */
 
-#define TG3_CPMU_STATUS			0x0000362c
-#define  TG3_CPMU_STATUS_PCIE_FUNC	 0x20000000
 #define TG3_CPMU_CLCK_STAT		0x00003630
 #define  CPMU_CLCK_STAT_MAC_CLCK_MASK	 0x001f0000
 #define  CPMU_CLCK_STAT_MAC_CLCK_62_5	 0x00000000
@@ -2209,6 +2211,11 @@
 #define  APE_EVENT_STATUS_STATE_SUSPEND	 0x00040000
 #define  APE_EVENT_STATUS_EVENT_PENDING	 0x80000000
 
+#define TG3_APE_PER_LOCK_REQ		0x8400
+#define  APE_LOCK_PER_REQ_DRIVER	 0x00001000
+#define TG3_APE_PER_LOCK_GRANT		0x8420
+#define  APE_PER_LOCK_GRANT_DRIVER	 0x00001000
+
 /* APE convenience enumerations. */
 #define TG3_APE_LOCK_GRC                1
 #define TG3_APE_LOCK_MEM                4
@@ -2942,6 +2949,7 @@
 #define TG3_PHY_ID_BCM5718C		0x5c0d8a00
 #define TG3_PHY_ID_BCM5718S		0xbc050ff0
 #define TG3_PHY_ID_BCM57765		0x5c0d8a40
+#define TG3_PHY_ID_BCM5719C		0x5c0d8a20
 #define TG3_PHY_ID_BCM5906		0xdc00ac40
 #define TG3_PHY_ID_BCM8002		0x60010140
 #define TG3_PHY_ID_INVALID		0xffffffff
@@ -2965,7 +2973,8 @@
 	 (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
 	 (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
 	 (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
-	 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM8002)
+	 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
+	 (X) == TG3_PHY_ID_BCM8002)
 
 	u32				led_ctrl;
 	u32				phy_otp;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index c0e7000..9609626 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -262,13 +262,13 @@
 	u16			csr13;
 	u16			csr14;
 	u16			csr15;
-} __attribute__((packed));
+} __packed;
 
 struct de_srom_info_leaf {
 	u16			default_media;
 	u8			n_blocks;
 	u8			unused;
-} __attribute__((packed));
+} __packed;
 
 struct de_desc {
 	__le32			opts1;
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 6002e65..3031ed9 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -120,8 +120,8 @@
 			  0x00, 0x06  /* ttm bit map */
 			};
 
-		tp->mtable = (struct mediatable *)
-			kmalloc(sizeof(struct mediatable) + sizeof(struct medialeaf), GFP_KERNEL);
+		tp->mtable = kmalloc(sizeof(struct mediatable) +
+				     sizeof(struct medialeaf), GFP_KERNEL);
 
 		if (tp->mtable == NULL)
 			return; /* Horrible, impossible failure. */
@@ -227,9 +227,9 @@
 		        return;
 		}
 
-		mtable = (struct mediatable *)
-			kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf),
-					GFP_KERNEL);
+		mtable = kmalloc(sizeof(struct mediatable) +
+				 count * sizeof(struct medialeaf),
+				 GFP_KERNEL);
 		if (mtable == NULL)
 			return;				/* Horrible, impossible failure. */
 		last_mediatable = tp->mtable = mtable;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 0afa2d4..e525875 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -20,6 +20,7 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/netdevice.h>
+#include <linux/ethtool.h>
 #include <linux/timer.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
@@ -51,22 +52,23 @@
 
 
 enum tbl_flag {
-	HAS_MII			= 0x0001,
-	HAS_MEDIA_TABLE		= 0x0002,
-	CSR12_IN_SROM		= 0x0004,
-	ALWAYS_CHECK_MII	= 0x0008,
-	HAS_ACPI		= 0x0010,
-	MC_HASH_ONLY		= 0x0020, /* Hash-only multicast filter. */
-	HAS_PNICNWAY		= 0x0080,
-	HAS_NWAY		= 0x0040, /* Uses internal NWay xcvr. */
-	HAS_INTR_MITIGATION	= 0x0100,
-	IS_ASIX			= 0x0200,
-	HAS_8023X		= 0x0400,
-	COMET_MAC_ADDR		= 0x0800,
-	HAS_PCI_MWI		= 0x1000,
-	HAS_PHY_IRQ		= 0x2000,
-	HAS_SWAPPED_SEEPROM	= 0x4000,
-	NEEDS_FAKE_MEDIA_TABLE	= 0x8000,
+	HAS_MII			= 0x00001,
+	HAS_MEDIA_TABLE		= 0x00002,
+	CSR12_IN_SROM		= 0x00004,
+	ALWAYS_CHECK_MII	= 0x00008,
+	HAS_ACPI		= 0x00010,
+	MC_HASH_ONLY		= 0x00020, /* Hash-only multicast filter. */
+	HAS_PNICNWAY		= 0x00080,
+	HAS_NWAY		= 0x00040, /* Uses internal NWay xcvr. */
+	HAS_INTR_MITIGATION	= 0x00100,
+	IS_ASIX			= 0x00200,
+	HAS_8023X		= 0x00400,
+	COMET_MAC_ADDR		= 0x00800,
+	HAS_PCI_MWI		= 0x01000,
+	HAS_PHY_IRQ		= 0x02000,
+	HAS_SWAPPED_SEEPROM	= 0x04000,
+	NEEDS_FAKE_MEDIA_TABLE	= 0x08000,
+	COMET_PM		= 0x10000,
 };
 
 
@@ -120,6 +122,11 @@
 	CSR13 = 0x68,
 	CSR14 = 0x70,
 	CSR15 = 0x78,
+	CSR18 = 0x88,
+	CSR19 = 0x8c,
+	CSR20 = 0x90,
+	CSR27 = 0xAC,
+	CSR28 = 0xB0,
 };
 
 /* register offset and bits for CFDD PCI config reg */
@@ -289,6 +296,30 @@
 	csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
 };
 
+enum tulip_comet_csr13_bits {
+/* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they
+ * determine which link status transition wakes up if LSCE is
+ * enabled */
+        comet_csr13_linkoffe = (1 << 17),
+        comet_csr13_linkone = (1 << 16),
+        comet_csr13_wfre = (1 << 10),
+        comet_csr13_mpre = (1 << 9),
+        comet_csr13_lsce = (1 << 8),
+        comet_csr13_wfr = (1 << 2),
+        comet_csr13_mpr = (1 << 1),
+        comet_csr13_lsc = (1 << 0),
+};
+
+enum tulip_comet_csr18_bits {
+        comet_csr18_pmes_sticky = (1 << 24),
+        comet_csr18_pm_mode = (1 << 19),
+        comet_csr18_apm_mode = (1 << 18),
+        comet_csr18_d3a = (1 << 7)
+};
+
+enum tulip_comet_csr20_bits {
+        comet_csr20_pmes = (1 << 15),
+};
 
 /* Keep the ring sizes a power of two for efficiency.
    Making the Tx ring too large decreases the effectiveness of channel
@@ -411,6 +442,7 @@
 	unsigned int csr6;	/* Current CSR6 control settings. */
 	unsigned char eeprom[EEPROM_SIZE];	/* Serial EEPROM contents. */
 	void (*link_change) (struct net_device * dev, int csr5);
+        struct ethtool_wolinfo wolinfo;        /* WOL settings */
 	u16 sym_advertise, mii_advertise; /* NWay capabilities advertised.  */
 	u16 lpar;		/* 21143 Link partner ability. */
 	u16 advertising[4];
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 254643e..03e96b9 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -30,7 +30,6 @@
 #include <linux/etherdevice.h>
 #include <linux/delay.h>
 #include <linux/mii.h>
-#include <linux/ethtool.h>
 #include <linux/crc32.h>
 #include <asm/unaligned.h>
 #include <asm/uaccess.h>
@@ -272,6 +271,7 @@
 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static void set_rx_mode(struct net_device *dev);
+static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void poll_tulip(struct net_device *dev);
 #endif
@@ -309,6 +309,11 @@
 	/* Wake the chip from sleep/snooze mode. */
 	tulip_set_power_state (tp, 0, 0);
 
+	/* Disable all WOL events */
+	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
+	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
+	tulip_set_wolopts(tp->pdev, 0);
+
 	/* On some chip revs we must set the MII/SYM port before the reset!? */
 	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
 		iowrite32(0x00040000, ioaddr + CSR6);
@@ -345,8 +350,8 @@
 		} else if (tp->flags & COMET_MAC_ADDR) {
 			iowrite32(addr_low,  ioaddr + 0xA4);
 			iowrite32(addr_high, ioaddr + 0xA8);
-			iowrite32(0, ioaddr + 0xAC);
-			iowrite32(0, ioaddr + 0xB0);
+			iowrite32(0, ioaddr + CSR27);
+			iowrite32(0, ioaddr + CSR28);
 		}
 	} else {
 		/* This is set_rx_mode(), but without starting the transmitter. */
@@ -876,8 +881,35 @@
 	strcpy(info->bus_info, pci_name(np->pdev));
 }
 
+
+static int tulip_ethtool_set_wol(struct net_device *dev,
+				 struct ethtool_wolinfo *wolinfo)
+{
+	struct tulip_private *tp = netdev_priv(dev);
+
+	if (wolinfo->wolopts & (~tp->wolinfo.supported))
+		   return -EOPNOTSUPP;
+
+	tp->wolinfo.wolopts = wolinfo->wolopts;
+	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
+	return 0;
+}
+
+static void tulip_ethtool_get_wol(struct net_device *dev,
+				  struct ethtool_wolinfo *wolinfo)
+{
+	struct tulip_private *tp = netdev_priv(dev);
+
+	wolinfo->supported = tp->wolinfo.supported;
+	wolinfo->wolopts = tp->wolinfo.wolopts;
+	return;
+}
+
+
 static const struct ethtool_ops ops = {
-	.get_drvinfo = tulip_get_drvinfo
+	.get_drvinfo = tulip_get_drvinfo,
+	.set_wol     = tulip_ethtool_set_wol,
+	.get_wol     = tulip_ethtool_get_wol,
 };
 
 /* Provide ioctl() calls to examine the MII xcvr state. */
@@ -1093,8 +1125,8 @@
 				iowrite32(3, ioaddr + CSR13);
 				iowrite32(mc_filter[1], ioaddr + CSR14);
 			} else if (tp->flags & COMET_MAC_ADDR) {
-				iowrite32(mc_filter[0], ioaddr + 0xAC);
-				iowrite32(mc_filter[1], ioaddr + 0xB0);
+				iowrite32(mc_filter[0], ioaddr + CSR27);
+				iowrite32(mc_filter[1], ioaddr + CSR28);
 			}
 			tp->mc_filter[0] = mc_filter[0];
 			tp->mc_filter[1] = mc_filter[1];
@@ -1381,6 +1413,13 @@
 		return i;
 	}
 
+	/* The chip will fail to enter a low-power state later unless
+	 * first explicitly commanded into D0 */
+	if (pci_set_power_state(pdev, PCI_D0)) {
+		printk (KERN_NOTICE PFX
+			"Failed to set power state to D0\n");
+	}
+
 	irq = pdev->irq;
 
 	/* alloc_etherdev ensures aligned and zeroed private structures */
@@ -1427,6 +1466,19 @@
 
 	tp->chip_id = chip_idx;
 	tp->flags = tulip_tbl[chip_idx].flags;
+
+	tp->wolinfo.supported = 0;
+	tp->wolinfo.wolopts = 0;
+	/* COMET: Enable power management only for AN983B */
+	if (chip_idx == COMET ) {
+		u32 sig;
+		pci_read_config_dword (pdev, 0x80, &sig);
+		if (sig == 0x09811317) {
+			tp->flags |= COMET_PM;
+			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
+			printk(KERN_INFO "tulip_init_one: Enabled WOL support for AN983B\n");
+		}
+	}
 	tp->pdev = pdev;
 	tp->base_addr = ioaddr;
 	tp->revision = pdev->revision;
@@ -1759,11 +1811,43 @@
 }
 
 
+/* set the registers according to the given wolopts */
+static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tulip_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->base_addr;
+
+	if (tp->flags & COMET_PM) {
+	  
+		unsigned int tmp;
+			
+		tmp = ioread32(ioaddr + CSR18);
+		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
+		tmp |= comet_csr18_pm_mode;
+		iowrite32(tmp, ioaddr + CSR18);
+			
+		/* Set the Wake-up Control/Status Register to the given WOL options*/
+		tmp = ioread32(ioaddr + CSR13);
+		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
+		if (wolopts & WAKE_MAGIC)
+			tmp |= comet_csr13_mpre;
+		if (wolopts & WAKE_PHY)
+			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
+		/* Clear the event flags */
+		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
+		iowrite32(tmp, ioaddr + CSR13);
+	}
+}
+
 #ifdef CONFIG_PM
 
+
 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
 {
+	pci_power_t pstate;
 	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tulip_private *tp = netdev_priv(dev);
 
 	if (!dev)
 		return -EINVAL;
@@ -1779,7 +1863,16 @@
 save_state:
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	pstate = pci_choose_state(pdev, state);
+	if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
+		int rc;
+
+		tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
+		rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
+		if (rc)
+			printk("tulip: pci_enable_wake failed (%d)\n", rc);
+	}
+	pci_set_power_state(pdev, pstate);
 
 	return 0;
 }
@@ -1788,7 +1881,10 @@
 static int tulip_resume(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tulip_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->base_addr;
 	int retval;
+	unsigned int tmp;
 
 	if (!dev)
 		return -EINVAL;
@@ -1809,6 +1905,18 @@
 		return retval;
 	}
 
+	if (tp->flags & COMET_PM) {
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_enable_wake(pdev, PCI_D3cold, 0);
+
+		/* Clear the PMES flag */
+		tmp = ioread32(ioaddr + CSR20);
+		tmp |= comet_csr20_pmes;
+		iowrite32(tmp, ioaddr + CSR20);
+
+		/* Disable all wake-up events */
+		tulip_set_wolopts(pdev, 0);
+	}
 	netif_device_attach(dev);
 
 	if (netif_running(dev))
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 22bde49..2e50077 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -255,7 +255,7 @@
 	struct rx_free			rxBuff[RXFREE_ENTRIES]	__3xp_aligned;
 	u32				zeroWord;
 	struct tx_desc			txHi[TXHI_ENTRIES];
-} __attribute__ ((packed));
+} __packed;
 
 struct rxbuff_ent {
 	struct sk_buff *skb;
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h
index 673fd51..88187fc 100644
--- a/drivers/net/typhoon.h
+++ b/drivers/net/typhoon.h
@@ -77,7 +77,7 @@
 	volatile __le32 cmdCleared;
 	volatile __le32 respReady;
 	volatile __le32 rxHiReady;
-} __attribute__ ((packed));
+} __packed;
 
 /* The host<->Typhoon interface
  * Our means of communicating where things are
@@ -125,7 +125,7 @@
 	__le32 rxHiAddr;
 	__le32 rxHiAddrHi;
 	__le32 rxHiSize;
-} __attribute__ ((packed));
+} __packed;
 
 /* The Typhoon transmit/fragment descriptor
  *
@@ -187,7 +187,7 @@
 #define TYPHOON_TX_PF_VLAN_MASK		cpu_to_le32(0x0ffff000)
 #define TYPHOON_TX_PF_INTERNAL		cpu_to_le32(0xf0000000)
 #define TYPHOON_TX_PF_VLAN_TAG_SHIFT	12
-} __attribute__ ((packed));
+} __packed;
 
 /* The TCP Segmentation offload option descriptor
  *
@@ -208,7 +208,7 @@
 	__le32 respAddrLo;
 	__le32 bytesTx;
 	__le32 status;
-} __attribute__ ((packed));
+} __packed;
 
 /* The IPSEC Offload descriptor
  *
@@ -227,7 +227,7 @@
 	__le32 sa1;
 	__le32 sa2;
 	__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /* The Typhoon receive descriptor (Updated by NIC)
  *
@@ -284,7 +284,7 @@
 #define TYPHOON_RX_UNKNOWN_SA		cpu_to_le16(0x0100)
 #define TYPHOON_RX_ESP_FORMAT_ERR	cpu_to_le16(0x0200)
 	__be32 vlanTag;
-} __attribute__ ((packed));
+} __packed;
 
 /* The Typhoon free buffer descriptor, used to give a buffer to the NIC
  *
@@ -301,7 +301,7 @@
 	__le32 physAddrHi;
 	u32 virtAddr;
 	u32 virtAddrHi;
-} __attribute__ ((packed));
+} __packed;
 
 /* The Typhoon command descriptor, used for commands and responses
  *
@@ -347,7 +347,7 @@
 	__le16 parm1;
 	__le32 parm2;
 	__le32 parm3;
-} __attribute__ ((packed));
+} __packed;
 
 /* The Typhoon response descriptor, see command descriptor for details
  */
@@ -359,7 +359,7 @@
 	__le16 parm1;
 	__le32 parm2;
 	__le32 parm3;
-} __attribute__ ((packed));
+} __packed;
 
 #define INIT_COMMAND_NO_RESPONSE(x, command)				\
 	do { struct cmd_desc *_ptr = (x);				\
@@ -427,7 +427,7 @@
 #define TYPHOON_LINK_HALF_DUPLEX	cpu_to_le32(0x00000000)
 	__le32 unused2;
 	__le32 unused3;
-} __attribute__ ((packed));
+} __packed;
 
 /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1)
  */
@@ -488,7 +488,7 @@
 	u32 index;
 	u32 unused;
 	u32 unused2;
-} __attribute__ ((packed));
+} __packed;
 
 /* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx))
  * This is all for IPv4.
@@ -518,14 +518,14 @@
 	__le32 numSections;
 	__le32 startAddr;
 	__le32 hmacDigest[5];
-} __attribute__ ((packed));
+} __packed;
 
 struct typhoon_section_header {
 	__le32 len;
 	u16 checksum;
 	u16 reserved;
 	__le32 startAddr;
-} __attribute__ ((packed));
+} __packed;
 
 /* The Typhoon Register offsets
  */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 807470e..dc32a62 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3704,6 +3704,19 @@
 	return PHY_INTERFACE_MODE_MII;
 }
 
+static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	if (!ugeth->phydev)
+		return -ENODEV;
+
+	return phy_mii_ioctl(ugeth->phydev, if_mii(rq), cmd);
+}
+
 static const struct net_device_ops ucc_geth_netdev_ops = {
 	.ndo_open		= ucc_geth_open,
 	.ndo_stop		= ucc_geth_close,
@@ -3713,6 +3726,7 @@
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_set_multicast_list	= ucc_geth_set_multi,
 	.ndo_tx_timeout		= ucc_geth_timeout,
+	.ndo_do_ioctl		= ucc_geth_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ucc_netpoll,
 #endif
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index ef1fbeb..05a9558 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -106,7 +106,7 @@
 	u32 scar;		/* Statistics carry register */
 	u32 scam;		/* Statistics caryy mask register */
 	u8 res5[0x200 - 0x1c4];
-} __attribute__ ((packed));
+} __packed;
 
 /* UCC GETH TEMODR Register */
 #define TEMODER_TX_RMON_STATISTICS_ENABLE       0x0100	/* enable Tx statistics
@@ -420,11 +420,11 @@
 
 struct ucc_geth_thread_data_tx {
 	u8 res0[104];
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_thread_data_rx {
 	u8 res0[40];
-} __attribute__ ((packed));
+} __packed;
 
 /* Send Queue Queue-Descriptor */
 struct ucc_geth_send_queue_qd {
@@ -432,19 +432,19 @@
 	u8 res0[0x8];
 	u32 last_bd_completed_address;/* initialize to last entry in BD ring */
 	u8 res1[0x30];
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_send_queue_mem_region {
 	struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES];
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_thread_tx_pram {
 	u8 res0[64];
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_thread_rx_pram {
 	u8 res0[128];
-} __attribute__ ((packed));
+} __packed;
 
 #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING        64
 #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8      64
@@ -484,7 +484,7 @@
 				      /**< weight factor for queues   */
 	u32 minw;		/* temporary variable handled by QE */
 	u8 res1[0x70 - 0x64];
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_tx_firmware_statistics_pram {
 	u32 sicoltx;		/* single collision */
@@ -506,7 +506,7 @@
 				   and 1518 octets */
 	u32 txpktsjumbo;	/* total packets (including bad) between 1024
 				   and MAXLength octets */
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_rx_firmware_statistics_pram {
 	u32 frrxfcser;		/* frames with crc error */
@@ -540,7 +540,7 @@
 				   replaced */
 	u32 insertvlan;		/* total frames that had their VLAN tag
 				   inserted */
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_rx_interrupt_coalescing_entry {
 	u32 interruptcoalescingmaxvalue;	/* interrupt coalescing max
@@ -548,23 +548,23 @@
 	u32 interruptcoalescingcounter;	/* interrupt coalescing counter,
 					   initialize to
 					   interruptcoalescingmaxvalue */
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_rx_interrupt_coalescing_table {
 	struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES];
 				       /**< interrupt coalescing entry */
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_rx_prefetched_bds {
 	struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS];	/* prefetched bd */
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_rx_bd_queues_entry {
 	u32 bdbaseptr;		/* BD base pointer */
 	u32 bdptr;		/* BD pointer */
 	u32 externalbdbaseptr;	/* external BD base pointer */
 	u32 externalbdptr;	/* external BD pointer */
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_tx_global_pram {
 	u16 temoder;
@@ -580,13 +580,13 @@
 	u32 tqptr;		/* a base pointer to the Tx Queues Memory
 				   Region */
 	u8 res2[0x80 - 0x74];
-} __attribute__ ((packed));
+} __packed;
 
 /* structure representing Extended Filtering Global Parameters in PRAM */
 struct ucc_geth_exf_global_pram {
 	u32 l2pcdptr;		/* individual address filter, high */
 	u8 res0[0x10 - 0x04];
-} __attribute__ ((packed));
+} __packed;
 
 struct ucc_geth_rx_global_pram {
 	u32 remoder;		/* ethernet mode reg. */
@@ -620,7 +620,7 @@
 	u32 exfGlobalParam;	/* base address for extended filtering global
 				   parameters */
 	u8 res6[0x100 - 0xC4];	/* Initialize to zero */
-} __attribute__ ((packed));
+} __packed;
 
 #define GRACEFUL_STOP_ACKNOWLEDGE_RX            0x01
 
@@ -639,7 +639,7 @@
 	u32 txglobal;		/* tx global */
 	u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX];	/* tx threads */
 	u8 res3[0x1];
-} __attribute__ ((packed));
+} __packed;
 
 #define ENET_INIT_PARAM_RGF_SHIFT               (32 - 4)
 #define ENET_INIT_PARAM_TGF_SHIFT               (32 - 8)
@@ -661,7 +661,7 @@
 	u16 h;			/* address (MSB) */
 	u16 m;			/* address */
 	u16 l;			/* address (LSB) */
-} __attribute__ ((packed));
+} __packed;
 
 /* structure representing 82xx Address Filtering PRAM */
 struct ucc_geth_82xx_address_filtering_pram {
@@ -672,7 +672,7 @@
 	struct ucc_geth_82xx_enet_address __iomem taddr;
 	struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
 	u8 res0[0x40 - 0x38];
-} __attribute__ ((packed));
+} __packed;
 
 /* GETH Tx firmware statistics structure, used when calling
    UCC_GETH_GetStatistics. */
@@ -696,7 +696,7 @@
 				   and 1518 octets */
 	u32 txpktsjumbo;	/* total packets (including bad) between 1024
 				   and MAXLength octets */
-} __attribute__ ((packed));
+} __packed;
 
 /* GETH Rx firmware statistics structure, used when calling
    UCC_GETH_GetStatistics. */
@@ -732,7 +732,7 @@
 				   replaced */
 	u32 insertvlan;		/* total frames that had their VLAN tag
 				   inserted */
-} __attribute__ ((packed));
+} __packed;
 
 /* GETH hardware statistics structure, used when calling
    UCC_GETH_GetStatistics. */
@@ -781,7 +781,7 @@
 	u32 rbca;		/* Total number of frames received successfully
 				   that had destination address equal to the
 				   broadcast address */
-} __attribute__ ((packed));
+} __packed;
 
 /* UCC GETH Tx errors returned via TxConf callback */
 #define TX_ERRORS_DEF      0x0200
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 9516f38..aea4645 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -179,7 +179,7 @@
 	__le16 res2;
 	u8 status;
 	__le16 res3;
-} __attribute__ ((packed));
+} __packed;
 
 static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
 			    u16 size, void *data)
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index dc94445..109751b 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -97,8 +97,9 @@
 	struct sk_buff *skb = req->context;
 	struct net_device *dev = skb->dev;
 	struct usbpn_dev *pnd = netdev_priv(dev);
+	int status = req->status;
 
-	switch (req->status) {
+	switch (status) {
 	case 0:
 		dev->stats.tx_bytes += skb->len;
 		break;
@@ -109,7 +110,7 @@
 		dev->stats.tx_aborted_errors++;
 	default:
 		dev->stats.tx_errors++;
-		dev_dbg(&dev->dev, "TX error (%d)\n", req->status);
+		dev_dbg(&dev->dev, "TX error (%d)\n", status);
 	}
 	dev->stats.tx_packets++;
 
@@ -150,8 +151,9 @@
 	struct page *page = virt_to_page(req->transfer_buffer);
 	struct sk_buff *skb;
 	unsigned long flags;
+	int status = req->status;
 
-	switch (req->status) {
+	switch (status) {
 	case 0:
 		spin_lock_irqsave(&pnd->rx_lock, flags);
 		skb = pnd->rx_skb;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 4dd2351..39422f7 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -211,7 +211,7 @@
 	u16 wIndex;
 	u16 wLength;
 	u16 UART_state_bitmap;
-} __attribute__((packed));
+} __packed;
 
 struct hso_tiocmget {
 	struct mutex mutex;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 197c352..08e7b6a 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -193,7 +193,7 @@
 	case 0:
 		break;
 	default:
-		err("%s: urb status: %d", __func__, urb->status);
+		err("%s: urb status: %d", __func__, status);
 		return;
 	}
 
@@ -222,16 +222,17 @@
 static void ipheth_sndbulk_callback(struct urb *urb)
 {
 	struct ipheth_device *dev;
+	int status = urb->status;
 
 	dev = urb->context;
 	if (dev == NULL)
 		return;
 
-	if (urb->status != 0 &&
-	    urb->status != -ENOENT &&
-	    urb->status != -ECONNRESET &&
-	    urb->status != -ESHUTDOWN)
-		err("%s: urb status: %d", __func__, urb->status);
+	if (status != 0 &&
+	    status != -ENOENT &&
+	    status != -ECONNRESET &&
+	    status != -ESHUTDOWN)
+		err("%s: urb status: %d", __func__, status);
 
 	dev_kfree_skb_irq(dev->tx_skb);
 	netif_wake_queue(dev->net);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index d6078b8..2b7b39c 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -207,7 +207,7 @@
 	__le16 segment_size;
 	__u16 max_multicast_filters;
 	__u8 reserved3;
-} __attribute__ ((packed));
+} __packed;
 
 /****************************************************************
  *     kaweth_device
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 961a8ed..ba72a72 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -64,13 +64,13 @@
 	// all else is optional, and must start with:
 	// __le16	vendorId;	// from usb-if
 	// __le16	productId;
-} __attribute__((__packed__));
+} __packed;
 
 #define	PAD_BYTE	((unsigned char)0xAC)
 
 struct nc_trailer {
 	__le16	packet_id;
-} __attribute__((__packed__));
+} __packed;
 
 // packets may use FLAG_FRAMING_NC and optional pad
 #define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 974d17f..6710f09 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -21,11 +21,11 @@
  *			behaves. Pegasus II support added since this version.
  *			TODO: suppressing HCD warnings spewage on disconnect.
  *		v0.4.13	Ethernet address is now set at probe(), not at open()
- *			time as this seems to break dhcpd. 
+ *			time as this seems to break dhcpd.
  *		v0.5.0	branch to 2.5.x kernels
  *		v0.5.1	ethtool support added
  *		v0.5.5	rx socket buffers are in a pool and the their allocation
- * 			is out of the interrupt routine.
+ *			is out of the interrupt routine.
  */
 
 #include <linux/sched.h>
@@ -55,9 +55,9 @@
 #define	BMSR_MEDIA	(BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
 			BMSR_100FULL | BMSR_ANEGCAPABLE)
 
-static int loopback = 0;
-static int mii_mode = 0;
-static char *devid=NULL;
+static int loopback;
+static int mii_mode;
+static char *devid;
 
 static struct usb_eth_dev usb_dev_id[] = {
 #define	PEGASUS_DEV(pn, vid, pid, flags)	\
@@ -102,8 +102,8 @@
 
 /* use ethtool to change the level for any given device */
 static int msg_level = -1;
-module_param (msg_level, int, 0);
-MODULE_PARM_DESC (msg_level, "Override default message level");
+module_param(msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Override default message level");
 
 MODULE_DEVICE_TABLE(usb, pegasus_ids);
 static const struct net_device_ops pegasus_netdev_ops;
@@ -141,7 +141,7 @@
 	wake_up(&pegasus->ctrl_wait);
 }
 
-static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
+static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
 			 void *data)
 {
 	int ret;
@@ -196,7 +196,7 @@
 	return ret;
 }
 
-static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
+static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
 			 void *data)
 {
 	int ret;
@@ -248,7 +248,7 @@
 	return ret;
 }
 
-static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
+static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
 {
 	int ret;
 	char *tmp;
@@ -299,7 +299,7 @@
 	return ret;
 }
 
-static int update_eth_regs_async(pegasus_t * pegasus)
+static int update_eth_regs_async(pegasus_t *pegasus)
 {
 	int ret;
 
@@ -326,7 +326,7 @@
 }
 
 /* Returns 0 on success, error on failure */
-static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
+static int read_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
 {
 	int i;
 	__u8 data[4] = { phy, 0, 0, indx };
@@ -334,7 +334,7 @@
 	int ret;
 
 	set_register(pegasus, PhyCtrl, 0);
-	set_registers(pegasus, PhyAddr, sizeof (data), data);
+	set_registers(pegasus, PhyAddr, sizeof(data), data);
 	set_register(pegasus, PhyCtrl, (indx | PHY_READ));
 	for (i = 0; i < REG_TIMEOUT; i++) {
 		ret = get_registers(pegasus, PhyCtrl, 1, data);
@@ -366,7 +366,7 @@
 	return (int)res;
 }
 
-static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
+static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 regd)
 {
 	int i;
 	__u8 data[4] = { phy, 0, 0, indx };
@@ -402,7 +402,7 @@
 	write_mii_word(pegasus, phy_id, loc, val);
 }
 
-static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
+static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 {
 	int i;
 	__u8 tmp;
@@ -433,7 +433,7 @@
 }
 
 #ifdef	PEGASUS_WRITE_EEPROM
-static inline void enable_eprom_write(pegasus_t * pegasus)
+static inline void enable_eprom_write(pegasus_t *pegasus)
 {
 	__u8 tmp;
 	int ret;
@@ -442,7 +442,7 @@
 	set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE);
 }
 
-static inline void disable_eprom_write(pegasus_t * pegasus)
+static inline void disable_eprom_write(pegasus_t *pegasus)
 {
 	__u8 tmp;
 	int ret;
@@ -452,7 +452,7 @@
 	set_register(pegasus, EthCtrl2, tmp & ~EPROM_WR_ENABLE);
 }
 
-static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
+static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
 {
 	int i;
 	__u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
@@ -484,7 +484,7 @@
 }
 #endif				/* PEGASUS_WRITE_EEPROM */
 
-static inline void get_node_id(pegasus_t * pegasus, __u8 * id)
+static inline void get_node_id(pegasus_t *pegasus, __u8 *id)
 {
 	int i;
 	__u16 w16;
@@ -495,7 +495,7 @@
 	}
 }
 
-static void set_ethernet_addr(pegasus_t * pegasus)
+static void set_ethernet_addr(pegasus_t *pegasus)
 {
 	__u8 node_id[6];
 
@@ -503,12 +503,12 @@
 		get_registers(pegasus, 0x10, sizeof(node_id), node_id);
 	} else {
 		get_node_id(pegasus, node_id);
-		set_registers(pegasus, EthID, sizeof (node_id), node_id);
+		set_registers(pegasus, EthID, sizeof(node_id), node_id);
 	}
-	memcpy(pegasus->net->dev_addr, node_id, sizeof (node_id));
+	memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id));
 }
 
-static inline int reset_mac(pegasus_t * pegasus)
+static inline int reset_mac(pegasus_t *pegasus)
 {
 	__u8 data = 0x8;
 	int i;
@@ -563,7 +563,7 @@
 		data[1] = 0;
 	data[2] = (loopback & 1) ? 0x09 : 0x01;
 
-	memcpy(pegasus->eth_regs, data, sizeof (data));
+	memcpy(pegasus->eth_regs, data, sizeof(data));
 	ret = set_registers(pegasus, EthCtrl0, 3, data);
 
 	if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS ||
@@ -577,7 +577,7 @@
 	return ret;
 }
 
-static void fill_skb_pool(pegasus_t * pegasus)
+static void fill_skb_pool(pegasus_t *pegasus)
 {
 	int i;
 
@@ -595,7 +595,7 @@
 	}
 }
 
-static void free_skb_pool(pegasus_t * pegasus)
+static void free_skb_pool(pegasus_t *pegasus)
 {
 	int i;
 
@@ -667,11 +667,11 @@
 		netif_dbg(pegasus, rx_err, net,
 			  "RX packet error %x\n", rx_status);
 		pegasus->stats.rx_errors++;
-		if (rx_status & 0x06)	// long or runt
+		if (rx_status & 0x06)	/* long or runt	*/
 			pegasus->stats.rx_length_errors++;
 		if (rx_status & 0x08)
 			pegasus->stats.rx_crc_errors++;
-		if (rx_status & 0x10)	// extra bits
+		if (rx_status & 0x10)	/* extra bits	*/
 			pegasus->stats.rx_frame_errors++;
 		goto goon;
 	}
@@ -748,9 +748,8 @@
 	if (pegasus->flags & PEGASUS_RX_URB_FAIL)
 		if (pegasus->rx_skb)
 			goto try_again;
-	if (pegasus->rx_skb == NULL) {
+	if (pegasus->rx_skb == NULL)
 		pegasus->rx_skb = pull_skb(pegasus);
-	}
 	if (pegasus->rx_skb == NULL) {
 		netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n");
 		tasklet_schedule(&pegasus->rx_tl);
@@ -835,7 +834,7 @@
 	}
 
 	if (urb->actual_length >= 6) {
-		u8	* d = urb->transfer_buffer;
+		u8 *d = urb->transfer_buffer;
 
 		/* byte 0 == tx_status1, reg 2B */
 		if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL
@@ -918,14 +917,14 @@
 	return &((pegasus_t *) netdev_priv(dev))->stats;
 }
 
-static inline void disable_net_traffic(pegasus_t * pegasus)
+static inline void disable_net_traffic(pegasus_t *pegasus)
 {
 	__le16 tmp = cpu_to_le16(0);
 
 	set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
 }
 
-static inline void get_interrupt_interval(pegasus_t * pegasus)
+static inline void get_interrupt_interval(pegasus_t *pegasus)
 {
 	u16 data;
 	u8 interval;
@@ -961,7 +960,7 @@
 		netif_carrier_off(net);
 }
 
-static void free_all_urbs(pegasus_t * pegasus)
+static void free_all_urbs(pegasus_t *pegasus)
 {
 	usb_free_urb(pegasus->intr_urb);
 	usb_free_urb(pegasus->tx_urb);
@@ -969,7 +968,7 @@
 	usb_free_urb(pegasus->ctrl_urb);
 }
 
-static void unlink_all_urbs(pegasus_t * pegasus)
+static void unlink_all_urbs(pegasus_t *pegasus)
 {
 	usb_kill_urb(pegasus->intr_urb);
 	usb_kill_urb(pegasus->tx_urb);
@@ -977,12 +976,11 @@
 	usb_kill_urb(pegasus->ctrl_urb);
 }
 
-static int alloc_urbs(pegasus_t * pegasus)
+static int alloc_urbs(pegasus_t *pegasus)
 {
 	pegasus->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
-	if (!pegasus->ctrl_urb) {
+	if (!pegasus->ctrl_urb)
 		return 0;
-	}
 	pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!pegasus->rx_urb) {
 		usb_free_urb(pegasus->ctrl_urb);
@@ -1019,7 +1017,7 @@
 		return -ENOMEM;
 
 	res = set_registers(pegasus, EthID, 6, net->dev_addr);
-	
+
 	usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
 			  usb_rcvbulkpipe(pegasus->usb, 1),
 			  pegasus->rx_skb->data, PEGASUS_MTU + 8,
@@ -1033,7 +1031,7 @@
 
 	usb_fill_int_urb(pegasus->intr_urb, pegasus->usb,
 			 usb_rcvintpipe(pegasus->usb, 3),
-			 pegasus->intr_buff, sizeof (pegasus->intr_buff),
+			 pegasus->intr_buff, sizeof(pegasus->intr_buff),
 			 intr_callback, pegasus, pegasus->intr_interval);
 	if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) {
 		if (res == -ENODEV)
@@ -1076,9 +1074,9 @@
 				struct ethtool_drvinfo *info)
 {
 	pegasus_t *pegasus = netdev_priv(dev);
-	strncpy(info->driver, driver_name, sizeof (info->driver) - 1);
-	strncpy(info->version, DRIVER_VERSION, sizeof (info->version) - 1);
-	usb_make_path(pegasus->usb, info->bus_info, sizeof (info->bus_info));
+	strncpy(info->driver, driver_name, sizeof(info->driver) - 1);
+	strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
+	usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
 }
 
 /* also handles three patterns of some kind in hardware */
@@ -1098,7 +1096,7 @@
 {
 	pegasus_t	*pegasus = netdev_priv(dev);
 	u8		reg78 = 0x04;
-	
+
 	if (wol->wolopts & ~WOL_SUPPORTED)
 		return -EINVAL;
 
@@ -1118,7 +1116,7 @@
 static inline void pegasus_reset_wol(struct net_device *dev)
 {
 	struct ethtool_wolinfo wol;
-	
+
 	memset(&wol, 0, sizeof wol);
 	(void) pegasus_set_wol(dev, &wol);
 }
@@ -1178,7 +1176,7 @@
 
 static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
 {
-	__u16 *data = (__u16 *) & rq->ifr_ifru;
+	__u16 *data = (__u16 *) &rq->ifr_ifru;
 	pegasus_t *pegasus = netdev_priv(net);
 	int res;
 
@@ -1223,7 +1221,7 @@
 	ctrl_callback(pegasus->ctrl_urb);
 }
 
-static __u8 mii_phy_probe(pegasus_t * pegasus)
+static __u8 mii_phy_probe(pegasus_t *pegasus)
 {
 	int i;
 	__u16 tmp;
@@ -1239,10 +1237,10 @@
 	return 0xff;
 }
 
-static inline void setup_pegasus_II(pegasus_t * pegasus)
+static inline void setup_pegasus_II(pegasus_t *pegasus)
 {
 	__u8 data = 0xa5;
-	
+
 	set_register(pegasus, Reg1d, 0);
 	set_register(pegasus, Reg7b, 1);
 	mdelay(100);
@@ -1254,16 +1252,15 @@
 	set_register(pegasus, 0x83, data);
 	get_registers(pegasus, 0x83, 1, &data);
 
-	if (data == 0xa5) {
+	if (data == 0xa5)
 		pegasus->chip = 0x8513;
-	} else {
+	else
 		pegasus->chip = 0;
-	}
 
 	set_register(pegasus, 0x80, 0xc0);
 	set_register(pegasus, 0x83, 0xff);
 	set_register(pegasus, 0x84, 0x01);
-	
+
 	if (pegasus->features & HAS_HOME_PNA && mii_mode)
 		set_register(pegasus, Reg81, 6);
 	else
@@ -1272,7 +1269,7 @@
 
 
 static int pegasus_count;
-static struct workqueue_struct *pegasus_workqueue = NULL;
+static struct workqueue_struct *pegasus_workqueue;
 #define CARRIER_CHECK_DELAY (2 * HZ)
 
 static void check_carrier(struct work_struct *work)
@@ -1367,7 +1364,7 @@
 	pegasus->mii.phy_id_mask = 0x1f;
 	pegasus->mii.reg_num_mask = 0x1f;
 	spin_lock_init(&pegasus->rx_pool_lock);
-	pegasus->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
+	pegasus->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
 				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
 
 	pegasus->features = usb_dev_id[dev_index].private;
@@ -1442,11 +1439,11 @@
 	pegasus_dec_workqueue();
 }
 
-static int pegasus_suspend (struct usb_interface *intf, pm_message_t message)
+static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
 {
 	struct pegasus *pegasus = usb_get_intfdata(intf);
-	
-	netif_device_detach (pegasus->net);
+
+	netif_device_detach(pegasus->net);
 	cancel_delayed_work(&pegasus->carrier_check);
 	if (netif_running(pegasus->net)) {
 		usb_kill_urb(pegasus->rx_urb);
@@ -1455,11 +1452,11 @@
 	return 0;
 }
 
-static int pegasus_resume (struct usb_interface *intf)
+static int pegasus_resume(struct usb_interface *intf)
 {
 	struct pegasus *pegasus = usb_get_intfdata(intf);
 
-	netif_device_attach (pegasus->net);
+	netif_device_attach(pegasus->net);
 	if (netif_running(pegasus->net)) {
 		pegasus->rx_urb->status = 0;
 		pegasus->rx_urb->actual_length = 0;
@@ -1498,8 +1495,8 @@
 
 static void __init parse_id(char *id)
 {
-	unsigned int vendor_id=0, device_id=0, flags=0, i=0;
-	char *token, *name=NULL;
+	unsigned int vendor_id = 0, device_id = 0, flags = 0, i = 0;
+	char *token, *name = NULL;
 
 	if ((token = strsep(&id, ":")) != NULL)
 		name = token;
@@ -1510,14 +1507,14 @@
 		device_id = simple_strtoul(token, NULL, 16);
 	flags = simple_strtoul(id, NULL, 16);
 	pr_info("%s: new device %s, vendor ID 0x%04x, device ID 0x%04x, flags: 0x%x\n",
-	        driver_name, name, vendor_id, device_id, flags);
+		driver_name, name, vendor_id, device_id, flags);
 
 	if (vendor_id > 0x10000 || vendor_id == 0)
 		return;
 	if (device_id > 0x10000 || device_id == 0)
 		return;
 
-	for (i=0; usb_dev_id[i].name; i++);
+	for (i = 0; usb_dev_id[i].name; i++);
 	usb_dev_id[i].name = name;
 	usb_dev_id[i].vendor = vendor_id;
 	usb_dev_id[i].device = device_id;
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index 29f5211..65b78b3 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -68,7 +68,7 @@
 	EpromData = 0x21,	/* 0x21 low, 0x22 high byte */
 	EpromCtrl = 0x23,
 	PhyAddr = 0x25,
-	PhyData = 0x26, 	/* 0x26 low, 0x27 high byte */
+	PhyData = 0x26,		/* 0x26 low, 0x27 high byte */
 	PhyCtrl = 0x28,
 	UsbStst = 0x2a,
 	EthTxStat0 = 0x2b,
@@ -154,162 +154,162 @@
 
 #else	/* PEGASUS_DEV */
 
-PEGASUS_DEV( "3Com USB Ethernet 3C460B", VENDOR_3COM, 0x4601,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "ATEN USB Ethernet UC-110T", VENDOR_ATEN, 0x2007,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x110c,
-		DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
-PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4104,
-		DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4004,
-		DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4007,
-		DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4102,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4002,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400b,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Philips USB 10/100 Ethernet", VENDOR_ACCTON, 0xb004,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "ADMtek ADM8511 \"Pegasus II\" USB Ethernet",
+PEGASUS_DEV("3Com USB Ethernet 3C460B", VENDOR_3COM, 0x4601,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("ATEN USB Ethernet UC-110T", VENDOR_ATEN, 0x2007,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x110c,
+		DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA)
+PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4104,
+		DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4004,
+		DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4007,
+		DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4102,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4002,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400b,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Philips USB 10/100 Ethernet", VENDOR_ACCTON, 0xb004,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("ADMtek ADM8511 \"Pegasus II\" USB Ethernet",
 		VENDOR_ADMTEK, 0x8511,
-		DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
-PEGASUS_DEV( "ADMtek ADM8513 \"Pegasus II\" USB Ethernet",
+		DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA)
+PEGASUS_DEV("ADMtek ADM8513 \"Pegasus II\" USB Ethernet",
 		VENDOR_ADMTEK, 0x8513,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "ADMtek ADM8515 \"Pegasus II\" USB-2.0 Ethernet",
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("ADMtek ADM8515 \"Pegasus II\" USB-2.0 Ethernet",
 		VENDOR_ADMTEK, 0x8515,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "ADMtek AN986 \"Pegasus\" USB Ethernet (evaluation board)",
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("ADMtek AN986 \"Pegasus\" USB Ethernet (evaluation board)",
 		VENDOR_ADMTEK, 0x0986,
-		DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "AN986A USB MAC", VENDOR_ADMTEK, 1986,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
+		DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("AN986A USB MAC", VENDOR_ADMTEK, 1986,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
 /*
  * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors
  * with the same product IDs by checking the device class too.
  */
-PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Belkin F5U122 10/100 USB Ethernet", VENDOR_BELKIN, 0x0122,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987,
-		DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "iPAQ Networking 10/100 USB", VENDOR_COMPAQ, 0x8511,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Corega FEther USB-TX", VENDOR_COREGA, 0x0004,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Corega FEther USB-TXS", VENDOR_COREGA, 0x000d,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4002,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4102,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x400b,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x200c,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "D-Link DSB-650TX(PNA)", VENDOR_DLINK, 0x4003,
-		DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "D-Link DSB-650", VENDOR_DLINK, 0xabc1,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002,
-		DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
-PEGASUS_DEV( "ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM,  0x4010,
-		DEFAULT_GPIO_RESET  | PEGASUS_II )
-PEGASUS_DEV( "EasiDock Ethernet", VENDOR_MOBILITY, 0x0304,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "HP hn210c Ethernet USB", VENDOR_HP, 0x811c,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
+PEGASUS_DEV_CLASS("Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Belkin F5U122 10/100 USB Ethernet", VENDOR_BELKIN, 0x0122,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
 		DEFAULT_GPIO_RESET)
-PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "LANEED USB Ethernet LD-USBL/TX", VENDOR_LANEED, 0x4005,
+PEGASUS_DEV("Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987,
+		DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("iPAQ Networking 10/100 USB", VENDOR_COMPAQ, 0x8511,
 		DEFAULT_GPIO_RESET | PEGASUS_II)
-PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x400b,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "LANEED USB Ethernet LD-USB/T", VENDOR_LANEED, 0xabc1,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x200c,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x2202,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2203,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2204,
-		DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "Linksys USB10T Ethernet Adapter", VENDOR_LINKSYS, 0x2206,
+PEGASUS_DEV("Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Billionton USBE-100", VENDOR_BILLIONTON, 0x8511,
 		DEFAULT_GPIO_RESET | PEGASUS_II)
-PEGASUS_DEV( "Linksys USBVPN1", VENDOR_LINKSYS2, 0x08b4,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Linksys USB USB100TX", VENDOR_LINKSYS, 0x400b,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x200c,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0001,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0005,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "MELCO/BUFFALO LUA2-TX", VENDOR_MELCO, 0x0009,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Microsoft MN-110", VENDOR_MICROSOFT, 0x007a,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "NETGEAR FA101", VENDOR_NETGEAR, 0x1020,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "OCT Inc.", VENDOR_OCT, 0x0109,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "OCT USB TO Ethernet", VENDOR_OCT, 0x0901,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "smartNIC 2 PnP Adapter", VENDOR_SMARTBRIDGES, 0x0003,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "SMC 202 USB Ethernet", VENDOR_SMC, 0x0200,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "SMC 2206 USB Ethernet", VENDOR_SMC, 0x0201,
+PEGASUS_DEV("Corega FEther USB-TX", VENDOR_COREGA, 0x0004,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Corega FEther USB-TXS", VENDOR_COREGA, 0x000d,
 		DEFAULT_GPIO_RESET | PEGASUS_II)
-PEGASUS_DEV( "SOHOware NUB100 Ethernet", VENDOR_SOHOWARE, 0x9100,
-		DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "SOHOware NUB110 Ethernet", VENDOR_SOHOWARE, 0x9110,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_SIEMENS, 0x1001,
-		DEFAULT_GPIO_RESET | PEGASUS_II )
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4001,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4002,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4102,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x400b,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x200c,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("D-Link DSB-650TX(PNA)", VENDOR_DLINK, 0x4003,
+		DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("D-Link DSB-650", VENDOR_DLINK, 0xabc1,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002,
+		DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA)
+PEGASUS_DEV("ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM,  0x4010,
+		DEFAULT_GPIO_RESET  | PEGASUS_II)
+PEGASUS_DEV("EasiDock Ethernet", VENDOR_MOBILITY, 0x0304,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("HP hn210c Ethernet USB", VENDOR_HP, 0x811c,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("LANEED USB Ethernet LD-USBL/TX", VENDOR_LANEED, 0x4005,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x400b,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("LANEED USB Ethernet LD-USB/T", VENDOR_LANEED, 0xabc1,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x200c,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Linksys USB10TX", VENDOR_LINKSYS, 0x2202,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Linksys USB100TX", VENDOR_LINKSYS, 0x2203,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Linksys USB100TX", VENDOR_LINKSYS, 0x2204,
+		DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("Linksys USB10T Ethernet Adapter", VENDOR_LINKSYS, 0x2206,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Linksys USBVPN1", VENDOR_LINKSYS2, 0x08b4,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Linksys USB USB100TX", VENDOR_LINKSYS, 0x400b,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Linksys USB10TX", VENDOR_LINKSYS, 0x200c,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0001,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0005,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("MELCO/BUFFALO LUA2-TX", VENDOR_MELCO, 0x0009,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Microsoft MN-110", VENDOR_MICROSOFT, 0x007a,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("NETGEAR FA101", VENDOR_NETGEAR, 0x1020,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("OCT Inc.", VENDOR_OCT, 0x0109,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("OCT USB TO Ethernet", VENDOR_OCT, 0x0901,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("smartNIC 2 PnP Adapter", VENDOR_SMARTBRIDGES, 0x0003,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("SMC 202 USB Ethernet", VENDOR_SMC, 0x0200,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("SMC 2206 USB Ethernet", VENDOR_SMC, 0x0201,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("SOHOware NUB100 Ethernet", VENDOR_SOHOWARE, 0x9100,
+		DEFAULT_GPIO_RESET)
+PEGASUS_DEV("SOHOware NUB110 Ethernet", VENDOR_SOHOWARE, 0x9110,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("SpeedStream USB 10/100 Ethernet", VENDOR_SIEMENS, 0x1001,
+		DEFAULT_GPIO_RESET | PEGASUS_II)
 
 
 #endif	/* PEGASUS_DEV */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index f1942d6..ee85c8b 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -165,7 +165,7 @@
 	u8 gw_addr_len; /* NW-supplied GW address len */
 	u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
 	u8 reserved[8];
-} __attribute__ ((packed));
+} __packed;
 
 #define SIERRA_NET_LSI_COMMON_LEN      4
 #define SIERRA_NET_LSI_UMTS_LEN        (sizeof(struct lsi_umts))
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 81c76ad..7eab407 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -643,7 +643,7 @@
 	netif_stop_queue (net);
 
 	netif_info(dev, ifdown, dev->net,
-		   "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
 		   net->stats.rx_packets, net->stats.tx_packets,
 		   net->stats.rx_errors, net->stats.tx_errors);
 
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index c381911..f7b33ae 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -193,7 +193,7 @@
 	__le32 pa_low;		/* Low 32 bit PCI address */
 	__le16 pa_high;		/* Next 16 bit PCI address (48 total) */
 	__le16 size;		/* bits 0--14 - frame size, bit 15 - enable int. */
-} __attribute__ ((__packed__));
+} __packed;
 
 /*
  *	Transmit descriptor
@@ -208,7 +208,7 @@
 	__le16 vlan;
 	u8 TCR;
 	u8 cmd;			/* bits 0--1 - TCPLS, bits 4--7 - CMDZ */
-} __attribute__ ((__packed__));
+} __packed;
 
 enum {
 	TD_QUEUE = cpu_to_le16(0x8000)
@@ -218,7 +218,7 @@
 	__le32 pa_low;
 	__le16 pa_high;
 	__le16 size;		/* bits 0--13 - size, bit 15 - queue */
-} __attribute__ ((__packed__));
+} __packed;
 
 struct tx_desc {
 	struct tdesc0 tdesc0;
@@ -1096,7 +1096,7 @@
 
 	volatile __le16 PatternCRC[8];	/* 0xB0 */
 	volatile __le32 ByteMask[4][4];	/* 0xC0 */
-} __attribute__ ((__packed__));
+} __packed;
 
 
 enum hw_mib {
@@ -1216,7 +1216,7 @@
 	u8 ar_sip[4];
 	u8 ar_tha[ETH_ALEN];
 	u8 ar_tip[4];
-} __attribute__ ((__packed__));
+} __packed;
 
 struct _magic_packet {
 	u8 dest_mac[6];
@@ -1224,7 +1224,7 @@
 	__be16 type;
 	u8 MAC[16][6];
 	u8 password[6];
-} __attribute__ ((__packed__));
+} __packed;
 
 /*
  *	Store for chip context when saving and restoring status. Not
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 3935c44..de1ba14 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -275,17 +275,16 @@
 	}
 }
 
-static u32
-vmxnet3_get_flags(struct net_device *netdev) {
-	return netdev->features;
-}
-
 static int
-vmxnet3_set_flags(struct net_device *netdev, u32 data) {
+vmxnet3_set_flags(struct net_device *netdev, u32 data)
+{
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
 	u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
 
+	if (data & ~ETH_FLAG_LRO)
+		return -EOPNOTSUPP;
+
 	if (lro_requested ^ lro_present) {
 		/* toggle the LRO feature*/
 		netdev->features ^= NETIF_F_LRO;
@@ -554,7 +553,7 @@
 	.get_tso           = ethtool_op_get_tso,
 	.set_tso           = ethtool_op_set_tso,
 	.get_strings       = vmxnet3_get_strings,
-	.get_flags	   = vmxnet3_get_flags,
+	.get_flags	   = ethtool_op_get_flags,
 	.set_flags	   = vmxnet3_set_flags,
 	.get_sset_count	   = vmxnet3_get_sset_count,
 	.get_ethtool_stats = vmxnet3_get_ethtool_stats,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index fc8b2d7..ed17865 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4023,7 +4023,7 @@
 	int high_dma = 0;
 	u64 vpath_mask = 0;
 	struct vxgedev *vdev;
-	struct vxge_config ll_config;
+	struct vxge_config *ll_config = NULL;
 	struct vxge_hw_device_config *device_config = NULL;
 	struct vxge_hw_device_attr attr;
 	int i, j, no_of_vpath = 0, max_vpath_supported = 0;
@@ -4082,17 +4082,24 @@
 		goto _exit0;
 	}
 
-	memset(&ll_config, 0, sizeof(struct vxge_config));
-	ll_config.tx_steering_type = TX_MULTIQ_STEERING;
-	ll_config.intr_type = MSI_X;
-	ll_config.napi_weight = NEW_NAPI_WEIGHT;
-	ll_config.rth_steering = RTH_STEERING;
+	ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
+	if (!ll_config) {
+		ret = -ENOMEM;
+		vxge_debug_init(VXGE_ERR,
+			"ll_config : malloc failed %s %d",
+			__FILE__, __LINE__);
+		goto _exit0;
+	}
+	ll_config->tx_steering_type = TX_MULTIQ_STEERING;
+	ll_config->intr_type = MSI_X;
+	ll_config->napi_weight = NEW_NAPI_WEIGHT;
+	ll_config->rth_steering = RTH_STEERING;
 
 	/* get the default configuration parameters */
 	vxge_hw_device_config_default_get(device_config);
 
 	/* initialize configuration parameters */
-	vxge_device_config_init(device_config, &ll_config.intr_type);
+	vxge_device_config_init(device_config, &ll_config->intr_type);
 
 	ret = pci_enable_device(pdev);
 	if (ret) {
@@ -4145,7 +4152,7 @@
 		(unsigned long long)pci_resource_start(pdev, 0));
 
 	status = vxge_hw_device_hw_info_get(attr.bar0,
-			&ll_config.device_hw_info);
+			&ll_config->device_hw_info);
 	if (status != VXGE_HW_OK) {
 		vxge_debug_init(VXGE_ERR,
 			"%s: Reading of hardware info failed."
@@ -4154,7 +4161,7 @@
 		goto _exit3;
 	}
 
-	if (ll_config.device_hw_info.fw_version.major !=
+	if (ll_config->device_hw_info.fw_version.major !=
 		VXGE_DRIVER_FW_VERSION_MAJOR) {
 		vxge_debug_init(VXGE_ERR,
 			"%s: Incorrect firmware version."
@@ -4164,7 +4171,7 @@
 		goto _exit3;
 	}
 
-	vpath_mask = ll_config.device_hw_info.vpath_mask;
+	vpath_mask = ll_config->device_hw_info.vpath_mask;
 	if (vpath_mask == 0) {
 		vxge_debug_ll_config(VXGE_TRACE,
 			"%s: No vpaths available in device", VXGE_DRIVER_NAME);
@@ -4176,10 +4183,10 @@
 		"%s:%d  Vpath mask = %llx", __func__, __LINE__,
 		(unsigned long long)vpath_mask);
 
-	function_mode = ll_config.device_hw_info.function_mode;
-	host_type = ll_config.device_hw_info.host_type;
+	function_mode = ll_config->device_hw_info.function_mode;
+	host_type = ll_config->device_hw_info.host_type;
 	is_privileged = __vxge_hw_device_is_privilaged(host_type,
-		ll_config.device_hw_info.func_id);
+		ll_config->device_hw_info.func_id);
 
 	/* Check how many vpaths are available */
 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -4193,7 +4200,7 @@
 
 	/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
 	if (is_sriov(function_mode) && (max_config_dev > 1) &&
-		(ll_config.intr_type != INTA) &&
+		(ll_config->intr_type != INTA) &&
 		(is_privileged == VXGE_HW_OK)) {
 		ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
 			? (max_config_dev - 1) : num_vfs);
@@ -4206,7 +4213,7 @@
 	 * Configure vpaths and get driver configured number of vpaths
 	 * which is less than or equal to the maximum vpaths per function.
 	 */
-	no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config);
+	no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
 	if (!no_of_vpath) {
 		vxge_debug_ll_config(VXGE_ERR,
 			"%s: No more vpaths to configure", VXGE_DRIVER_NAME);
@@ -4241,21 +4248,21 @@
 	/* set private device info */
 	pci_set_drvdata(pdev, hldev);
 
-	ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
-	ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
-	ll_config.addr_learn_en = addr_learn_en;
-	ll_config.rth_algorithm = RTH_ALG_JENKINS;
-	ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
-	ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
-	ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-	ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-	ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
-	ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
-	ll_config.rth_bkt_sz = RTH_BUCKET_SIZE;
-	ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
-	ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
+	ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
+	ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
+	ll_config->addr_learn_en = addr_learn_en;
+	ll_config->rth_algorithm = RTH_ALG_JENKINS;
+	ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
+	ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
+	ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
+	ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
+	ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+	ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+	ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
+	ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
+	ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
 
-	if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath,
+	if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
 		&vdev)) {
 		ret = -EINVAL;
 		goto _exit4;
@@ -4286,7 +4293,7 @@
 		vdev->vpaths[j].vdev = vdev;
 		vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
 		memcpy((u8 *)vdev->vpaths[j].macaddr,
-				(u8 *)ll_config.device_hw_info.mac_addrs[i],
+				ll_config->device_hw_info.mac_addrs[i],
 				ETH_ALEN);
 
 		/* Initialize the mac address list header */
@@ -4307,18 +4314,18 @@
 
 	macaddr = (u8 *)vdev->vpaths[0].macaddr;
 
-	ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
-	ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
-	ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
+	ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
+	ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
+	ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
 
 	vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
-		vdev->ndev->name, ll_config.device_hw_info.serial_number);
+		vdev->ndev->name, ll_config->device_hw_info.serial_number);
 
 	vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
-		vdev->ndev->name, ll_config.device_hw_info.part_number);
+		vdev->ndev->name, ll_config->device_hw_info.part_number);
 
 	vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
-		vdev->ndev->name, ll_config.device_hw_info.product_desc);
+		vdev->ndev->name, ll_config->device_hw_info.product_desc);
 
 	vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
 		vdev->ndev->name, macaddr);
@@ -4328,11 +4335,11 @@
 
 	vxge_debug_init(VXGE_TRACE,
 		"%s: Firmware version : %s Date : %s", vdev->ndev->name,
-		ll_config.device_hw_info.fw_version.version,
-		ll_config.device_hw_info.fw_date.date);
+		ll_config->device_hw_info.fw_version.version,
+		ll_config->device_hw_info.fw_date.date);
 
 	if (new_device) {
-		switch (ll_config.device_hw_info.function_mode) {
+		switch (ll_config->device_hw_info.function_mode) {
 		case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
 			vxge_debug_init(VXGE_TRACE,
 			"%s: Single Function Mode Enabled", vdev->ndev->name);
@@ -4355,7 +4362,7 @@
 	vxge_print_parm(vdev, vpath_mask);
 
 	/* Store the fw version for ethttool option */
-	strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version);
+	strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
 	memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
 	memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
 
@@ -4394,7 +4401,7 @@
 	 * present to prevent such a failure.
 	 */
 
-	if (ll_config.device_hw_info.function_mode ==
+	if (ll_config->device_hw_info.function_mode ==
 		VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
 		if (vdev->config.intr_type == INTA)
 			vxge_hw_device_unmask_all(hldev);
@@ -4406,6 +4413,7 @@
 	VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
 		vxge_hw_device_trace_level_get(hldev));
 
+	kfree(ll_config);
 	return 0;
 
 _exit5:
@@ -4423,6 +4431,7 @@
 _exit1:
 	pci_disable_device(pdev);
 _exit0:
+	kfree(ll_config);
 	kfree(device_config);
 	driver_config->config_dev_cnt--;
 	pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index e087b9a..43b7727 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2038,16 +2038,10 @@
 
 		/* Now copy the data to the card. */
 
-		buf = kmalloc(wrthdr.size, GFP_KERNEL);
-		if (!buf)
-			return -ENOMEM;
-
-		if (copy_from_user(buf,
-				   ifr->ifr_data + sizeof (struct fstioc_write),
-				   wrthdr.size)) {
-			kfree(buf);
-			return -EFAULT;
-		}
+		buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write),
+				  wrthdr.size);
+		if (IS_ERR(buf))
+			return PTR_ERR(buf);
 
 		memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size);
 		kfree(buf);
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
index 3839662..e4f539a 100644
--- a/drivers/net/wan/hd64570.h
+++ b/drivers/net/wan/hd64570.h
@@ -153,7 +153,7 @@
 	u16 len;		/* Data Length */
 	u8 stat;		/* Status */
 	u8 unused;		/* pads to 2-byte boundary */
-}__attribute__ ((packed)) pkt_desc;
+}__packed pkt_desc;
 
 
 /* Packet Descriptor Status bits */
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index ee7083f..b38ffa1 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -36,7 +36,7 @@
 	u8 address;
 	u8 control;
 	__be16 protocol;
-}__attribute__ ((packed));
+}__packed;
 
 
 struct cisco_packet {
@@ -45,7 +45,7 @@
 	__be32 par2;
 	__be16 rel;		/* reliability */
 	__be32 time;
-}__attribute__ ((packed));
+}__packed;
 #define	CISCO_PACKET_LEN	18
 #define	CISCO_BIG_PACKET_LEN	20
 
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0e52993..0edb535 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -112,7 +112,7 @@
 	unsigned de:	1;
 	unsigned ea2:	1;
 #endif
-}__attribute__ ((packed)) fr_hdr;
+}__packed fr_hdr;
 
 
 typedef struct pvc_device_struct {
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 43ae6f4..f4125da 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -330,7 +330,7 @@
 {
 	short dlci;
 	char  flags;
-} __attribute__((packed));
+} __packed;
 
 struct _frad_stat 
 {
@@ -1211,14 +1211,9 @@
 	}
 	else
 	{
-		temp = kmalloc(mem.len, GFP_KERNEL);
-		if (!temp)
-			return(-ENOMEM);
-		if(copy_from_user(temp, mem.data, mem.len))
-		{
-			kfree(temp);
-			return -EFAULT;
-		}
+		temp = memdup_user(mem.data, mem.len);
+		if (IS_ERR(temp))
+			return PTR_ERR(temp);
 		sdla_write(dev, mem.addr, temp, mem.len);
 		kfree(temp);
 	}
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index d86e8f3..2f725d0 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -848,7 +848,7 @@
 	struct i2400m_l3l4_hdr hdr;
 	struct i2400m_tlv_hdr tlv;
 	__le32 val;
-} __attribute__((packed));
+} __packed;
 
 
 /*
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 1149135..8b55a5b 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -651,7 +651,7 @@
 	struct {
 		struct i2400m_bootrom_header cmd;
 		u8 cmd_payload[chunk_len];
-	} __attribute__((packed)) *buf;
+	} __packed *buf;
 	struct i2400m_bootrom_header ack;
 
 	d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
@@ -794,7 +794,7 @@
 	struct {
 		struct i2400m_bootrom_header cmd;
 		u8 cmd_pl[0];
-	} __attribute__((packed)) *cmd_buf;
+	} __packed *cmd_buf;
 	size_t signature_block_offset, signature_block_size;
 
 	d_fnstart(3, dev, "offset %zu\n", offset);
@@ -1029,7 +1029,7 @@
 	struct {
 		struct i2400m_bootrom_header ack;
 		u8 ack_pl[16];
-	} __attribute__((packed)) ack_buf;
+	} __packed ack_buf;
 
 	d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
 	cmd = i2400m->bm_cmd_buf;
@@ -1115,7 +1115,7 @@
 	struct {
 		struct i2400m_bootrom_header cmd;
 		struct i2400m_bcf_hdr cmd_pl;
-	} __attribute__((packed)) *cmd_buf;
+	} __packed *cmd_buf;
 	struct i2400m_bootrom_header ack;
 
 	d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr);
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 035e4cf..9e02b90 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -91,7 +91,7 @@
 	struct {
 		struct i2400m_l3l4_hdr hdr;
 		struct i2400m_tlv_rf_operation sw_rf;
-	} __attribute__((packed)) *cmd;
+	} __packed *cmd;
 	char strerr[32];
 
 	d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state);
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index b07e4d3..bbc10b1 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -80,7 +80,7 @@
 	__le32 FEMR;		/* 0x104 */
 	__le32 FPSR;		/* 0x108 */
 	__le32 FFER;		/* 0x10C */
-} __attribute__ ((packed));
+} __packed;
 
 /* CSR0 - PAR (PCI Address Register) */
 #define ADM8211_PAR_MWIE	(1 << 24)
@@ -484,7 +484,7 @@
 	u8 entry_control;	// huh??
 	u16 reserved_1;
 	u32 reserved_2;
-} __attribute__ ((packed));
+} __packed;
 
 
 #define RX_COPY_BREAK 128
@@ -531,7 +531,7 @@
 	u8	lnags_threshold[14];	/* 0x70 */
 	__le16	checksum;		/* 0x7E */
 	u8	cis_data[0];		/* 0x80, 384 bytes */
-} __attribute__ ((packed));
+} __packed;
 
 struct adm8211_priv {
 	struct pci_dev *pdev;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 3b7ab20..6b605df 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -506,20 +506,20 @@
 	u8 mac[ETH_ALEN];
 	__le16 klen;
 	u8 key[16];
-} __attribute__ ((packed));
+} __packed;
 
 /* These structures are from the Aironet's PC4500 Developers Manual */
 typedef struct Ssid Ssid;
 struct Ssid {
 	__le16 len;
 	u8 ssid[32];
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct SsidRid SsidRid;
 struct SsidRid {
 	__le16 len;
 	Ssid ssids[3];
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct ModulationRid ModulationRid;
 struct ModulationRid {
@@ -528,7 +528,7 @@
 #define MOD_DEFAULT cpu_to_le16(0)
 #define MOD_CCK cpu_to_le16(1)
 #define MOD_MOK cpu_to_le16(2)
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct ConfigRid ConfigRid;
 struct ConfigRid {
@@ -652,7 +652,7 @@
 #define MAGIC_STAY_IN_CAM (1<<10)
 	u8 magicControl;
 	__le16 autoWake;
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct StatusRid StatusRid;
 struct StatusRid {
@@ -711,20 +711,20 @@
 #define STAT_LEAPFAILED 91
 #define STAT_LEAPTIMEDOUT 92
 #define STAT_LEAPCOMPLETE 93
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct StatsRid StatsRid;
 struct StatsRid {
 	__le16 len;
 	__le16 spacer;
 	__le32 vals[100];
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct APListRid APListRid;
 struct APListRid {
 	__le16 len;
 	u8 ap[4][ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct CapabilityRid CapabilityRid;
 struct CapabilityRid {
@@ -754,7 +754,7 @@
 	__le16 bootBlockVer;
 	__le16 requiredHard;
 	__le16 extSoftCap;
-} __attribute__ ((packed));
+} __packed;
 
 /* Only present on firmware >= 5.30.17 */
 typedef struct BSSListRidExtra BSSListRidExtra;
@@ -762,7 +762,7 @@
   __le16 unknown[4];
   u8 fixed[12]; /* WLAN management frame */
   u8 iep[624];
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct BSSListRid BSSListRid;
 struct BSSListRid {
@@ -796,7 +796,7 @@
 
   /* Only present on firmware >= 5.30.17 */
   BSSListRidExtra extra;
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct {
   BSSListRid bss;
@@ -807,13 +807,13 @@
 struct tdsRssiEntry {
   u8 rssipct;
   u8 rssidBm;
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct tdsRssiRid tdsRssiRid;
 struct tdsRssiRid {
   u16 len;
   tdsRssiEntry x[256];
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct MICRid MICRid;
 struct MICRid {
@@ -823,7 +823,7 @@
 	u8  multicast[16];
 	__le16 unicastValid;
 	u8  unicast[16];
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct MICBuffer MICBuffer;
 struct MICBuffer {
@@ -841,7 +841,7 @@
 	} u;
 	__be32 mic;
 	__be32 seq;
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct {
 	u8 da[ETH_ALEN];
@@ -996,7 +996,7 @@
 	u8 rate;
 	u8 freq;
 	__le16 tmp[4];
-} __attribute__ ((packed));
+} __packed;
 
 typedef struct {
 	unsigned int  ctl: 15;
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 8a2d4af..cd8caea 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -7,6 +7,7 @@
  * Copyright (c) 2004 Balint Seeber <n0_5p4m_p13453@hotmail.com>
  * Copyright (c) 2007 Guido Guenther <agx@sigxcpu.org>
  * Copyright (c) 2007 Kalle Valo <kalle.valo@iki.fi>
+ * Copyright (c) 2010 Sebastian Smolorz <sesmo@gmx.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -305,7 +306,7 @@
 	unsigned char poll_timeout[3];
 	unsigned char state;
 	unsigned char string;
-} __attribute__((packed));
+} __packed;
 
 static inline int at76_is_intersil(enum board_type board)
 {
@@ -1649,6 +1650,58 @@
 		return NULL;
 }
 
+static int at76_join(struct at76_priv *priv)
+{
+	struct at76_req_join join;
+	int ret;
+
+	memset(&join, 0, sizeof(struct at76_req_join));
+	memcpy(join.essid, priv->essid, priv->essid_size);
+	join.essid_size = priv->essid_size;
+	memcpy(join.bssid, priv->bssid, ETH_ALEN);
+	join.bss_type = INFRASTRUCTURE_MODE;
+	join.channel = priv->channel;
+	join.timeout = cpu_to_le16(2000);
+
+	at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__);
+	ret = at76_set_card_command(priv->udev, CMD_JOIN, &join,
+				    sizeof(struct at76_req_join));
+
+	if (ret < 0) {
+		printk(KERN_ERR "%s: at76_set_card_command failed: %d\n",
+		       wiphy_name(priv->hw->wiphy), ret);
+		return 0;
+	}
+
+	ret = at76_wait_completion(priv, CMD_JOIN);
+	at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret);
+	if (ret != CMD_STATUS_COMPLETE) {
+		printk(KERN_ERR "%s: at76_wait_completion failed: %d\n",
+		       wiphy_name(priv->hw->wiphy), ret);
+		return 0;
+	}
+
+	at76_set_pm_mode(priv);
+
+	return 0;
+}
+
+static void at76_work_join_bssid(struct work_struct *work)
+{
+	struct at76_priv *priv = container_of(work, struct at76_priv,
+					      work_join_bssid);
+
+	if (priv->device_unplugged)
+		return;
+
+	mutex_lock(&priv->mtx);
+
+	if (is_valid_ether_addr(priv->bssid))
+		at76_join(priv);
+
+	mutex_unlock(&priv->mtx);
+}
+
 static void at76_mac80211_tx_callback(struct urb *urb)
 {
 	struct at76_priv *priv = urb->context;
@@ -1686,6 +1739,7 @@
 	struct at76_priv *priv = hw->priv;
 	struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
 	int padding, submit_len, ret;
 
 	at76_dbg(DBG_MAC80211, "%s()", __func__);
@@ -1696,6 +1750,21 @@
 		return NETDEV_TX_BUSY;
 	}
 
+	/* The following code lines are important when the device is going to
+	 * authenticate with a new bssid. The driver must send CMD_JOIN before
+	 * an authentication frame is transmitted. For this to succeed, the
+	 * correct bssid of the AP must be known. As mac80211 does not inform
+	 * drivers about the bssid prior to the authentication process the
+	 * following workaround is necessary. If the TX frame is an
+	 * authentication frame extract the bssid and send the CMD_JOIN. */
+	if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) {
+		if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
+			memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
+			ieee80211_queue_work(hw, &priv->work_join_bssid);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
 	ieee80211_stop_queues(hw);
 
 	at76_ledtrig_tx_activity();	/* tell ledtrigger we send a packet */
@@ -1770,6 +1839,7 @@
 	at76_dbg(DBG_MAC80211, "%s()", __func__);
 
 	cancel_delayed_work(&priv->dwork_hw_scan);
+	cancel_work_sync(&priv->work_join_bssid);
 	cancel_work_sync(&priv->work_set_promisc);
 
 	mutex_lock(&priv->mtx);
@@ -1818,42 +1888,6 @@
 	at76_dbg(DBG_MAC80211, "%s()", __func__);
 }
 
-static int at76_join(struct at76_priv *priv)
-{
-	struct at76_req_join join;
-	int ret;
-
-	memset(&join, 0, sizeof(struct at76_req_join));
-	memcpy(join.essid, priv->essid, priv->essid_size);
-	join.essid_size = priv->essid_size;
-	memcpy(join.bssid, priv->bssid, ETH_ALEN);
-	join.bss_type = INFRASTRUCTURE_MODE;
-	join.channel = priv->channel;
-	join.timeout = cpu_to_le16(2000);
-
-	at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__);
-	ret = at76_set_card_command(priv->udev, CMD_JOIN, &join,
-				    sizeof(struct at76_req_join));
-
-	if (ret < 0) {
-		printk(KERN_ERR "%s: at76_set_card_command failed: %d\n",
-		       wiphy_name(priv->hw->wiphy), ret);
-		return 0;
-	}
-
-	ret = at76_wait_completion(priv, CMD_JOIN);
-	at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret);
-	if (ret != CMD_STATUS_COMPLETE) {
-		printk(KERN_ERR "%s: at76_wait_completion failed: %d\n",
-		       wiphy_name(priv->hw->wiphy), ret);
-		return 0;
-	}
-
-	at76_set_pm_mode(priv);
-
-	return 0;
-}
-
 static void at76_dwork_hw_scan(struct work_struct *work)
 {
 	struct at76_priv *priv = container_of(work, struct at76_priv,
@@ -2107,6 +2141,7 @@
 	mutex_init(&priv->mtx);
 	INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc);
 	INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx);
+	INIT_WORK(&priv->work_join_bssid, at76_work_join_bssid);
 	INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan);
 
 	tasklet_init(&priv->rx_tasklet, at76_rx_tasklet, 0);
@@ -2508,5 +2543,6 @@
 MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>");
 MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>");
 MODULE_AUTHOR("Kalle Valo <kalle.valo@iki.fi>");
+MODULE_AUTHOR("Sebastian Smolorz <sesmo@gmx.net>");
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 1ec5ccf..4a37447 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -99,7 +99,7 @@
 	u8 reserved2[14];
 	u8 cr15_values[14];
 	u8 reserved3[3];
-} __attribute__((packed));
+} __packed;
 
 struct hwcfg_rfmd {
 	u8 cr20_values[14];
@@ -111,7 +111,7 @@
 	u8 low_power_values[14];
 	u8 normal_power_values[14];
 	u8 reserved1[3];
-} __attribute__((packed));
+} __packed;
 
 struct hwcfg_intersil {
 	u8 mac_addr[ETH_ALEN];
@@ -120,7 +120,7 @@
 	u8 pidvid[4];
 	u8 regulatory_domain;
 	u8 reserved[1];
-} __attribute__((packed));
+} __packed;
 
 union at76_hwcfg {
 	struct hwcfg_intersil i;
@@ -149,14 +149,14 @@
 	u8 ssid_len;
 	u8 short_preamble;
 	__le16 beacon_period;
-} __attribute__((packed));
+} __packed;
 
 struct at76_command {
 	u8 cmd;
 	u8 reserved;
 	__le16 size;
 	u8 data[0];
-} __attribute__((packed));
+} __packed;
 
 /* Length of Atmel-specific Rx header before 802.11 frame */
 #define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet)
@@ -171,7 +171,7 @@
 	u8 noise_level;
 	__le32 rx_time;
 	u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
-} __attribute__((packed));
+} __packed;
 
 /* Length of Atmel-specific Tx header before 802.11 frame */
 #define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet)
@@ -182,7 +182,7 @@
 	u8 padding;
 	u8 reserved[4];
 	u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
-} __attribute__((packed));
+} __packed;
 
 /* defines for scan_type below */
 #define SCAN_TYPE_ACTIVE	0
@@ -198,7 +198,7 @@
 	__le16 max_channel_time;
 	u8 essid_size;
 	u8 international_scan;
-} __attribute__((packed));
+} __packed;
 
 struct at76_req_ibss {
 	u8 bssid[ETH_ALEN];
@@ -207,7 +207,7 @@
 	u8 channel;
 	u8 essid_size;
 	u8 reserved[3];
-} __attribute__((packed));
+} __packed;
 
 struct at76_req_join {
 	u8 bssid[ETH_ALEN];
@@ -217,7 +217,7 @@
 	__le16 timeout;
 	u8 essid_size;
 	u8 reserved;
-} __attribute__((packed));
+} __packed;
 
 struct set_mib_buffer {
 	u8 type;
@@ -229,7 +229,7 @@
 		__le16 word;
 		u8 addr[ETH_ALEN];
 	} data;
-} __attribute__((packed));
+} __packed;
 
 struct mib_local {
 	u16 reserved0;
@@ -241,14 +241,14 @@
 	u16 reserved2;
 	u8 preamble_type;
 	u16 reserved3;
-} __attribute__((packed));
+} __packed;
 
 struct mib_mac_addr {
 	u8 mac_addr[ETH_ALEN];
 	u8 res[2];		/* ??? */
 	u8 group_addr[4][ETH_ALEN];
 	u8 group_addr_status[4];
-} __attribute__((packed));
+} __packed;
 
 struct mib_mac {
 	__le32 max_tx_msdu_lifetime;
@@ -269,7 +269,7 @@
 	u8 desired_bssid[ETH_ALEN];
 	u8 desired_bsstype;	/* ad-hoc or infrastructure */
 	u8 reserved2;
-} __attribute__((packed));
+} __packed;
 
 struct mib_mac_mgmt {
 	__le16 beacon_period;
@@ -292,7 +292,7 @@
 	u8 multi_domain_capability_enabled;
 	u8 country_string[3];
 	u8 reserved[3];
-} __attribute__((packed));
+} __packed;
 
 struct mib_mac_wep {
 	u8 privacy_invoked;	/* 0 disable encr., 1 enable encr */
@@ -303,7 +303,7 @@
 	__le32 wep_excluded_count;
 	u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN];
 	u8 encryption_level;	/* 1 for 40bit, 2 for 104bit encryption */
-} __attribute__((packed));
+} __packed;
 
 struct mib_phy {
 	__le32 ed_threshold;
@@ -320,19 +320,19 @@
 	u8 current_cca_mode;
 	u8 phy_type;
 	u8 current_reg_domain;
-} __attribute__((packed));
+} __packed;
 
 struct mib_fw_version {
 	u8 major;
 	u8 minor;
 	u8 patch;
 	u8 build;
-} __attribute__((packed));
+} __packed;
 
 struct mib_mdomain {
 	u8 tx_powerlevel[14];
 	u8 channel_list[14];	/* 0 for invalid channels */
-} __attribute__((packed));
+} __packed;
 
 struct at76_fw_header {
 	__le32 crc;		/* CRC32 of the whole image */
@@ -346,7 +346,7 @@
 	__le32 int_fw_len;	/* internal firmware image length */
 	__le32 ext_fw_offset;	/* external firmware image offset */
 	__le32 ext_fw_len;	/* external firmware image length */
-} __attribute__((packed));
+} __packed;
 
 /* a description of a regulatory domain and the allowed channels */
 struct reg_domain {
@@ -387,6 +387,7 @@
 	/* work queues */
 	struct work_struct work_set_promisc;
 	struct work_struct work_submit_rx;
+	struct work_struct work_join_bssid;
 	struct delayed_work dwork_hw_scan;
 
 	struct tasklet_struct rx_tasklet;
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index cc09595..2242a14 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -13,5 +13,6 @@
 ath5k-y				+= led.o
 ath5k-y				+= rfkill.o
 ath5k-y				+= ani.o
+ath5k-y				+= sysfs.o
 ath5k-$(CONFIG_ATH5K_DEBUG)	+= debug.o
 obj-$(CONFIG_ATH5K)		+= ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index f2311ab..26dbe65 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -74,8 +74,8 @@
 	const s8 fr[] = { -78, -80 };
 #endif
 	if (level < 0 || level >= ARRAY_SIZE(sz)) {
-		ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
-			"level out of range %d", level);
+		ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range",
+			  level);
 		return;
 	}
 
@@ -106,8 +106,8 @@
 
 	if (level < 0 || level >= ARRAY_SIZE(val) ||
 	    level > ah->ah_sc->ani_state.max_spur_level) {
-		ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
-			"level out of range %d", level);
+		ATH5K_ERR(ah->ah_sc, "spur immunity level %d out of range",
+			  level);
 		return;
 	}
 
@@ -130,8 +130,7 @@
 	const int val[] = { 0, 4, 8 };
 
 	if (level < 0 || level >= ARRAY_SIZE(val)) {
-		ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
-			"level out of range %d", level);
+		ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level);
 		return;
 	}
 
@@ -481,14 +480,15 @@
 	struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
 	int listen, ofdm_high, ofdm_low, cck_high, cck_low;
 
-	if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
-		return;
-
 	/* get listen time since last call and add it to the counter because we
-	 * might not have restarted the "ani period" last time */
+	 * might not have restarted the "ani period" last time.
+	 * always do this to calculate the busy time also in manual mode */
 	listen = ath5k_hw_ani_get_listen_time(ah, as);
 	as->listen_time += listen;
 
+	if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
+		return;
+
 	ath5k_ani_save_and_clear_phy_errors(ah, as);
 
 	ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 2785946..ea6362a 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -204,6 +204,7 @@
 #define AR5K_TUNE_TPC_TXPOWER			false
 #define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL    10000   /* 10 sec */
 #define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI	1000	/* 1 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF	60000	/* 60 sec */
 
 #define AR5K_INIT_CARR_SENSE_EN			1
 
@@ -565,7 +566,7 @@
 )
 
 /*
- * DMA size definitions (2^n+2)
+ * DMA size definitions (2^(n+2))
  */
 enum ath5k_dmasize {
 	AR5K_DMASIZE_4B	= 0,
@@ -1118,6 +1119,7 @@
 	/* Calibration timestamp */
 	unsigned long		ah_cal_next_full;
 	unsigned long		ah_cal_next_ani;
+	unsigned long		ah_cal_next_nf;
 
 	/* Calibration mask */
 	u8			ah_cal_mask;
@@ -1125,15 +1127,10 @@
 	/*
 	 * Function pointers
 	 */
-	int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
-				u32 size, unsigned int flags);
 	int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
 		unsigned int, unsigned int, int, enum ath5k_pkt_type,
 		unsigned int, unsigned int, unsigned int, unsigned int,
 		unsigned int, unsigned int, unsigned int, unsigned int);
-	int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
-		unsigned int, unsigned int, unsigned int, unsigned int,
-		unsigned int, unsigned int);
 	int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
 		struct ath5k_tx_status *);
 	int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *,
@@ -1148,6 +1145,9 @@
 int ath5k_hw_attach(struct ath5k_softc *sc);
 void ath5k_hw_detach(struct ath5k_hw *ah);
 
+int ath5k_sysfs_register(struct ath5k_softc *sc);
+void ath5k_sysfs_unregister(struct ath5k_softc *sc);
+
 /* LED functions */
 int ath5k_init_leds(struct ath5k_softc *sc);
 void ath5k_led_enable(struct ath5k_softc *sc);
@@ -1231,6 +1231,11 @@
 
 /* Hardware Descriptor Functions */
 int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
+int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+			   u32 size, unsigned int flags);
+int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+	unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
+	u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3);
 
 /* GPIO Functions */
 void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
@@ -1270,6 +1275,7 @@
 void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
 int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
 			   struct ieee80211_channel *channel);
+void ath5k_hw_update_noise_floor(struct ath5k_hw *ah);
 /* Spur mitigation */
 bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
 				  struct ieee80211_channel *channel);
@@ -1280,6 +1286,7 @@
 int ath5k_hw_phy_disable(struct ath5k_hw *ah);
 /* Antenna control */
 void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
+void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode);
 /* TX power setup */
 int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
 		     u8 ee_mode, u8 txpower);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 31c0080..b32e28c 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -352,8 +352,6 @@
  */
 void ath5k_hw_detach(struct ath5k_hw *ah)
 {
-	ATH5K_TRACE(ah->ah_sc);
-
 	__set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
 
 	if (ah->ah_rf_banks != NULL)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 648972d..20328bd 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -311,7 +311,8 @@
 static int 	ath5k_txbuf_setup(struct ath5k_softc *sc,
 				struct ath5k_buf *bf,
 				struct ath5k_txq *txq, int padsize);
-static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
+
+static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
 				struct ath5k_buf *bf)
 {
 	BUG_ON(!bf);
@@ -321,9 +322,11 @@
 			PCI_DMA_TODEVICE);
 	dev_kfree_skb_any(bf->skb);
 	bf->skb = NULL;
+	bf->skbaddr = 0;
+	bf->desc->ds_data = 0;
 }
 
-static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
+static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
 				struct ath5k_buf *bf)
 {
 	struct ath5k_hw *ah = sc->ah;
@@ -336,6 +339,8 @@
 			PCI_DMA_FROMDEVICE);
 	dev_kfree_skb_any(bf->skb);
 	bf->skb = NULL;
+	bf->skbaddr = 0;
+	bf->desc->ds_data = 0;
 }
 
 
@@ -352,7 +357,6 @@
 static int 	ath5k_rx_start(struct ath5k_softc *sc);
 static void 	ath5k_rx_stop(struct ath5k_softc *sc);
 static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
-					struct ath5k_desc *ds,
 					struct sk_buff *skb,
 					struct ath5k_rx_status *rs);
 static void 	ath5k_tasklet_rx(unsigned long data);
@@ -578,7 +582,7 @@
 	spin_lock_init(&sc->block);
 
 	/* Set private data */
-	pci_set_drvdata(pdev, hw);
+	pci_set_drvdata(pdev, sc);
 
 	/* Setup interrupt handler */
 	ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
@@ -694,25 +698,23 @@
 static void __devexit
 ath5k_pci_remove(struct pci_dev *pdev)
 {
-	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-	struct ath5k_softc *sc = hw->priv;
+	struct ath5k_softc *sc = pci_get_drvdata(pdev);
 
 	ath5k_debug_finish_device(sc);
-	ath5k_detach(pdev, hw);
+	ath5k_detach(pdev, sc->hw);
 	ath5k_hw_detach(sc->ah);
 	kfree(sc->ah);
 	free_irq(pdev->irq, sc);
 	pci_iounmap(pdev, sc->iobase);
 	pci_release_region(pdev, 0);
 	pci_disable_device(pdev);
-	ieee80211_free_hw(hw);
+	ieee80211_free_hw(sc->hw);
 }
 
 #ifdef CONFIG_PM_SLEEP
 static int ath5k_pci_suspend(struct device *dev)
 {
-	struct ieee80211_hw *hw = pci_get_drvdata(to_pci_dev(dev));
-	struct ath5k_softc *sc = hw->priv;
+	struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
 
 	ath5k_led_off(sc);
 	return 0;
@@ -721,8 +723,7 @@
 static int ath5k_pci_resume(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
-	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-	struct ath5k_softc *sc = hw->priv;
+	struct ath5k_softc *sc = pci_get_drvdata(pdev);
 
 	/*
 	 * Suspend/Resume resets the PCI configuration space, so we have to
@@ -768,7 +769,8 @@
 	 * return false w/o doing anything.  MAC's that do
 	 * support it will return true w/o doing anything.
 	 */
-	ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
+	ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
+
 	if (ret < 0)
 		goto err;
 	if (ret > 0)
@@ -864,6 +866,8 @@
 
 	ath5k_init_leds(sc);
 
+	ath5k_sysfs_register(sc);
+
 	return 0;
 err_queues:
 	ath5k_txq_release(sc);
@@ -899,6 +903,7 @@
 	ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
 	ath5k_unregister_leds(sc);
 
+	ath5k_sysfs_unregister(sc);
 	/*
 	 * NB: can't reclaim these until after ieee80211_ifdetach
 	 * returns because we'll get called back to reclaim node
@@ -1111,8 +1116,9 @@
 static int
 ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
 {
-	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n",
-		sc->curchan->center_freq, chan->center_freq);
+	ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+		  "channel set, resetting (%u -> %u MHz)\n",
+		  sc->curchan->center_freq, chan->center_freq);
 
 	/*
 	 * To switch channels clear any pending DMA operations;
@@ -1228,21 +1234,23 @@
 	 * not get overrun under high load (as can happen with a
 	 * 5212 when ANI processing enables PHY error frames).
 	 *
-	 * To insure the last descriptor is self-linked we create
+	 * To ensure the last descriptor is self-linked we create
 	 * each descriptor as self-linked and add it to the end.  As
 	 * each additional descriptor is added the previous self-linked
-	 * entry is ``fixed'' naturally.  This should be safe even
+	 * entry is "fixed" naturally.  This should be safe even
 	 * if DMA is happening.  When processing RX interrupts we
 	 * never remove/process the last, self-linked, entry on the
-	 * descriptor list.  This insures the hardware always has
+	 * descriptor list.  This ensures the hardware always has
 	 * someplace to write a new frame.
 	 */
 	ds = bf->desc;
 	ds->ds_link = bf->daddr;	/* link to self */
 	ds->ds_data = bf->skbaddr;
-	ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
-	if (ret)
+	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
+	if (ret) {
+		ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
 		return ret;
+	}
 
 	if (sc->rxlink != NULL)
 		*sc->rxlink = bf->daddr;
@@ -1347,7 +1355,7 @@
 		mrr_tries[i] = info->control.rates[i + 1].count;
 	}
 
-	ah->ah_setup_mrr_tx_desc(ah, ds,
+	ath5k_hw_setup_mrr_tx_desc(ah, ds,
 		mrr_rate[0], mrr_tries[0],
 		mrr_rate[1], mrr_tries[1],
 		mrr_rate[2], mrr_tries[2]);
@@ -1443,17 +1451,20 @@
 {
 	struct ath5k_buf *bf;
 
-	ath5k_txbuf_free(sc, sc->bbuf);
+	ath5k_txbuf_free_skb(sc, sc->bbuf);
 	list_for_each_entry(bf, &sc->txbuf, list)
-		ath5k_txbuf_free(sc, bf);
+		ath5k_txbuf_free_skb(sc, bf);
 	list_for_each_entry(bf, &sc->rxbuf, list)
-		ath5k_rxbuf_free(sc, bf);
+		ath5k_rxbuf_free_skb(sc, bf);
 
 	/* Free memory associated with all descriptors */
 	pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
+	sc->desc = NULL;
+	sc->desc_daddr = 0;
 
 	kfree(sc->bufptr);
 	sc->bufptr = NULL;
+	sc->bbuf = NULL;
 }
 
 
@@ -1602,7 +1613,7 @@
 	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
 		ath5k_debug_printtxbuf(sc, bf);
 
-		ath5k_txbuf_free(sc, bf);
+		ath5k_txbuf_free_skb(sc, bf);
 
 		spin_lock_bh(&sc->txbuflock);
 		list_move_tail(&bf->list, &sc->txbuf);
@@ -1721,8 +1732,8 @@
 }
 
 static unsigned int
-ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
-		struct sk_buff *skb, struct ath5k_rx_status *rs)
+ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
+		   struct ath5k_rx_status *rs)
 {
 	struct ath5k_hw *ah = sc->ah;
 	struct ath_common *common = ath5k_hw_common(ah);
@@ -1889,9 +1900,138 @@
 }
 
 static void
-ath5k_tasklet_rx(unsigned long data)
+ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
+		    struct ath5k_rx_status *rs)
 {
 	struct ieee80211_rx_status *rxs;
+
+	/* The MAC header is padded to have 32-bit boundary if the
+	 * packet payload is non-zero. The general calculation for
+	 * padsize would take into account odd header lengths:
+	 * padsize = (4 - hdrlen % 4) % 4; However, since only
+	 * even-length headers are used, padding can only be 0 or 2
+	 * bytes and we can optimize this a bit. In addition, we must
+	 * not try to remove padding from short control frames that do
+	 * not have payload. */
+	ath5k_remove_padding(skb);
+
+	rxs = IEEE80211_SKB_RXCB(skb);
+
+	rxs->flag = 0;
+	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
+		rxs->flag |= RX_FLAG_MMIC_ERROR;
+
+	/*
+	 * always extend the mac timestamp, since this information is
+	 * also needed for proper IBSS merging.
+	 *
+	 * XXX: it might be too late to do it here, since rs_tstamp is
+	 * 15bit only. that means TSF extension has to be done within
+	 * 32768usec (about 32ms). it might be necessary to move this to
+	 * the interrupt handler, like it is done in madwifi.
+	 *
+	 * Unfortunately we don't know when the hardware takes the rx
+	 * timestamp (beginning of phy frame, data frame, end of rx?).
+	 * The only thing we know is that it is hardware specific...
+	 * On AR5213 it seems the rx timestamp is at the end of the
+	 * frame, but i'm not sure.
+	 *
+	 * NOTE: mac80211 defines mactime at the beginning of the first
+	 * data symbol. Since we don't have any time references it's
+	 * impossible to comply to that. This affects IBSS merge only
+	 * right now, so it's not too bad...
+	 */
+	rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
+	rxs->flag |= RX_FLAG_TSFT;
+
+	rxs->freq = sc->curchan->center_freq;
+	rxs->band = sc->curband->band;
+
+	rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
+
+	rxs->antenna = rs->rs_antenna;
+
+	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
+		sc->stats.antenna_rx[rs->rs_antenna]++;
+	else
+		sc->stats.antenna_rx[0]++; /* invalid */
+
+	rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
+	rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
+
+	if (rxs->rate_idx >= 0 && rs->rs_rate ==
+	    sc->curband->bitrates[rxs->rate_idx].hw_value_short)
+		rxs->flag |= RX_FLAG_SHORTPRE;
+
+	ath5k_debug_dump_skb(sc, skb, "RX  ", 0);
+
+	ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
+
+	/* check beacons in IBSS mode */
+	if (sc->opmode == NL80211_IFTYPE_ADHOC)
+		ath5k_check_ibss_tsf(sc, skb, rxs);
+
+	ieee80211_rx(sc->hw, skb);
+}
+
+/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
+ *
+ * Check if we want to further process this frame or not. Also update
+ * statistics. Return true if we want this frame, false if not.
+ */
+static bool
+ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
+{
+	sc->stats.rx_all_count++;
+
+	if (unlikely(rs->rs_status)) {
+		if (rs->rs_status & AR5K_RXERR_CRC)
+			sc->stats.rxerr_crc++;
+		if (rs->rs_status & AR5K_RXERR_FIFO)
+			sc->stats.rxerr_fifo++;
+		if (rs->rs_status & AR5K_RXERR_PHY) {
+			sc->stats.rxerr_phy++;
+			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
+				sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
+			return false;
+		}
+		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
+			/*
+			 * Decrypt error.  If the error occurred
+			 * because there was no hardware key, then
+			 * let the frame through so the upper layers
+			 * can process it.  This is necessary for 5210
+			 * parts which have no way to setup a ``clear''
+			 * key cache entry.
+			 *
+			 * XXX do key cache faulting
+			 */
+			sc->stats.rxerr_decrypt++;
+			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
+			    !(rs->rs_status & AR5K_RXERR_CRC))
+				return true;
+		}
+		if (rs->rs_status & AR5K_RXERR_MIC) {
+			sc->stats.rxerr_mic++;
+			return true;
+		}
+
+		/* let crypto-error packets fall through in MNTR */
+		if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
+		    sc->opmode != NL80211_IFTYPE_MONITOR)
+			return false;
+	}
+
+	if (unlikely(rs->rs_more)) {
+		sc->stats.rxerr_jumbo++;
+		return false;
+	}
+	return true;
+}
+
+static void
+ath5k_tasklet_rx(unsigned long data)
+{
 	struct ath5k_rx_status rs = {};
 	struct sk_buff *skb, *next_skb;
 	dma_addr_t next_skb_addr;
@@ -1901,7 +2041,6 @@
 	struct ath5k_buf *bf;
 	struct ath5k_desc *ds;
 	int ret;
-	int rx_flag;
 
 	spin_lock(&sc->rxbuflock);
 	if (list_empty(&sc->rxbuf)) {
@@ -1909,8 +2048,6 @@
 		goto unlock;
 	}
 	do {
-		rx_flag = 0;
-
 		bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
 		BUG_ON(bf->skb == NULL);
 		skb = bf->skb;
@@ -1926,137 +2063,30 @@
 		else if (unlikely(ret)) {
 			ATH5K_ERR(sc, "error in processing rx descriptor\n");
 			sc->stats.rxerr_proc++;
-			spin_unlock(&sc->rxbuflock);
-			return;
+			break;
 		}
 
-		sc->stats.rx_all_count++;
+		if (ath5k_receive_frame_ok(sc, &rs)) {
+			next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
 
-		if (unlikely(rs.rs_status)) {
-			if (rs.rs_status & AR5K_RXERR_CRC)
-				sc->stats.rxerr_crc++;
-			if (rs.rs_status & AR5K_RXERR_FIFO)
-				sc->stats.rxerr_fifo++;
-			if (rs.rs_status & AR5K_RXERR_PHY) {
-				sc->stats.rxerr_phy++;
-				if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
-					sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
+			/*
+			 * If we can't replace bf->skb with a new skb under
+			 * memory pressure, just skip this packet
+			 */
+			if (!next_skb)
 				goto next;
-			}
-			if (rs.rs_status & AR5K_RXERR_DECRYPT) {
-				/*
-				 * Decrypt error.  If the error occurred
-				 * because there was no hardware key, then
-				 * let the frame through so the upper layers
-				 * can process it.  This is necessary for 5210
-				 * parts which have no way to setup a ``clear''
-				 * key cache entry.
-				 *
-				 * XXX do key cache faulting
-				 */
-				sc->stats.rxerr_decrypt++;
-				if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
-				    !(rs.rs_status & AR5K_RXERR_CRC))
-					goto accept;
-			}
-			if (rs.rs_status & AR5K_RXERR_MIC) {
-				rx_flag |= RX_FLAG_MMIC_ERROR;
-				sc->stats.rxerr_mic++;
-				goto accept;
-			}
 
-			/* let crypto-error packets fall through in MNTR */
-			if ((rs.rs_status &
-				~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
-					sc->opmode != NL80211_IFTYPE_MONITOR)
-				goto next;
+			pci_unmap_single(sc->pdev, bf->skbaddr,
+					 common->rx_bufsize,
+					 PCI_DMA_FROMDEVICE);
+
+			skb_put(skb, rs.rs_datalen);
+
+			ath5k_receive_frame(sc, skb, &rs);
+
+			bf->skb = next_skb;
+			bf->skbaddr = next_skb_addr;
 		}
-
-		if (unlikely(rs.rs_more)) {
-			sc->stats.rxerr_jumbo++;
-			goto next;
-
-		}
-accept:
-		next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
-
-		/*
-		 * If we can't replace bf->skb with a new skb under memory
-		 * pressure, just skip this packet
-		 */
-		if (!next_skb)
-			goto next;
-
-		pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
-				PCI_DMA_FROMDEVICE);
-		skb_put(skb, rs.rs_datalen);
-
-		/* The MAC header is padded to have 32-bit boundary if the
-		 * packet payload is non-zero. The general calculation for
-		 * padsize would take into account odd header lengths:
-		 * padsize = (4 - hdrlen % 4) % 4; However, since only
-		 * even-length headers are used, padding can only be 0 or 2
-		 * bytes and we can optimize this a bit. In addition, we must
-		 * not try to remove padding from short control frames that do
-		 * not have payload. */
-		ath5k_remove_padding(skb);
-
-		rxs = IEEE80211_SKB_RXCB(skb);
-
-		/*
-		 * always extend the mac timestamp, since this information is
-		 * also needed for proper IBSS merging.
-		 *
-		 * XXX: it might be too late to do it here, since rs_tstamp is
-		 * 15bit only. that means TSF extension has to be done within
-		 * 32768usec (about 32ms). it might be necessary to move this to
-		 * the interrupt handler, like it is done in madwifi.
-		 *
-		 * Unfortunately we don't know when the hardware takes the rx
-		 * timestamp (beginning of phy frame, data frame, end of rx?).
-		 * The only thing we know is that it is hardware specific...
-		 * On AR5213 it seems the rx timestamp is at the end of the
-		 * frame, but i'm not sure.
-		 *
-		 * NOTE: mac80211 defines mactime at the beginning of the first
-		 * data symbol. Since we don't have any time references it's
-		 * impossible to comply to that. This affects IBSS merge only
-		 * right now, so it's not too bad...
-		 */
-		rxs->mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp);
-		rxs->flag = rx_flag | RX_FLAG_TSFT;
-
-		rxs->freq = sc->curchan->center_freq;
-		rxs->band = sc->curband->band;
-
-		rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
-
-		rxs->antenna = rs.rs_antenna;
-
-		if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
-			sc->stats.antenna_rx[rs.rs_antenna]++;
-		else
-			sc->stats.antenna_rx[0]++; /* invalid */
-
-		rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
-		rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
-
-		if (rxs->rate_idx >= 0 && rs.rs_rate ==
-		    sc->curband->bitrates[rxs->rate_idx].hw_value_short)
-			rxs->flag |= RX_FLAG_SHORTPRE;
-
-		ath5k_debug_dump_skb(sc, skb, "RX  ", 0);
-
-		ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
-
-		/* check beacons in IBSS mode */
-		if (sc->opmode == NL80211_IFTYPE_ADHOC)
-			ath5k_check_ibss_tsf(sc, skb, rxs);
-
-		ieee80211_rx(sc->hw, skb);
-
-		bf->skb = next_skb;
-		bf->skbaddr = next_skb_addr;
 next:
 		list_move_tail(&bf->list, &sc->rxbuf);
 	} while (ath5k_rxbuf_setup(sc, bf) == 0);
@@ -2065,8 +2095,6 @@
 }
 
 
-
-
 /*************\
 * TX Handling *
 \*************/
@@ -2298,6 +2326,8 @@
 			ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
 				"stuck beacon time (%u missed)\n",
 				sc->bmisscount);
+			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+				  "stuck beacon, resetting\n");
 			tasklet_schedule(&sc->restq);
 		}
 		return;
@@ -2647,7 +2677,7 @@
 		ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
 				"putting device to sleep\n");
 	}
-	ath5k_txbuf_free(sc, sc->bbuf);
+	ath5k_txbuf_free_skb(sc, sc->bbuf);
 
 	mmiowb();
 	mutex_unlock(&sc->lock);
@@ -2705,6 +2735,8 @@
 			 * Fatal errors are unrecoverable.
 			 * Typically these are caused by DMA errors.
 			 */
+			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+				  "fatal int, resetting\n");
 			tasklet_schedule(&sc->restq);
 		} else if (unlikely(status & AR5K_INT_RXORN)) {
 			/*
@@ -2717,8 +2749,11 @@
 			 * this guess is copied from the HAL.
 			 */
 			sc->stats.rxorn_intr++;
-			if (ah->ah_mac_srev < AR5K_SREV_AR5212)
+			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
+				ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+					  "rx overrun, resetting\n");
 				tasklet_schedule(&sc->restq);
+			}
 			else
 				tasklet_schedule(&sc->rxtq);
 		} else {
@@ -2785,10 +2820,6 @@
 	/* Only full calibration for now */
 	ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
 
-	/* Stop queues so that calibration
-	 * doesn't interfere with tx */
-	ieee80211_stop_queues(sc->hw);
-
 	ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
 		ieee80211_frequency_to_channel(sc->curchan->center_freq),
 		sc->curchan->hw_value);
@@ -2806,8 +2837,16 @@
 			ieee80211_frequency_to_channel(
 				sc->curchan->center_freq));
 
-	/* Wake queues */
-	ieee80211_wake_queues(sc->hw);
+	/* Noise floor calibration interrupts rx/tx path while I/Q calibration
+	 * doesn't. We stop the queues so that calibration doesn't interfere
+	 * with TX and don't run it as often */
+	if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
+		ah->ah_cal_next_nf = jiffies +
+			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
+		ieee80211_stop_queues(sc->hw);
+		ath5k_hw_update_noise_floor(ah);
+		ieee80211_wake_queues(sc->hw);
+	}
 
 	ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
 }
@@ -2926,6 +2965,10 @@
 
 	ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
 
+	ah->ah_cal_next_full = jiffies;
+	ah->ah_cal_next_ani = jiffies;
+	ah->ah_cal_next_nf = jiffies;
+
 	/*
 	 * Change channels and update the h/w rate map if we're switching;
 	 * e.g. 11a to 11b/g.
@@ -3360,7 +3403,7 @@
 
 	ath5k_debug_dump_skb(sc, skb, "BC  ", 1);
 
-	ath5k_txbuf_free(sc, sc->bbuf);
+	ath5k_txbuf_free_skb(sc, sc->bbuf);
 	sc->bbuf->skb = skb;
 	ret = ath5k_beacon_setup(sc, sc->bbuf);
 	if (ret)
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 74f0071..beae519 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -34,7 +34,6 @@
 {
 	u16 ee_header;
 
-	ATH5K_TRACE(ah->ah_sc);
 	/* Capabilities stored in the EEPROM */
 	ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
 
@@ -123,8 +122,6 @@
 		enum ath5k_capability_type cap_type,
 		u32 capability, u32 *result)
 {
-	ATH5K_TRACE(ah->ah_sc);
-
 	switch (cap_type) {
 	case AR5K_CAP_NUM_TXQUEUES:
 		if (result) {
@@ -173,8 +170,6 @@
 int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
 		u16 assoc_id)
 {
-	ATH5K_TRACE(ah->ah_sc);
-
 	if (ah->ah_version == AR5K_AR5210) {
 		AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
 			AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
@@ -186,8 +181,6 @@
 
 int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
 {
-	ATH5K_TRACE(ah->ah_sc);
-
 	if (ah->ah_version == AR5K_AR5210) {
 		AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
 			AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 6fb5c5f..8c63886 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -278,6 +278,7 @@
 				 size_t count, loff_t *ppos)
 {
 	struct ath5k_softc *sc = file->private_data;
+	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
 	tasklet_schedule(&sc->restq);
 	return count;
 }
@@ -307,7 +308,6 @@
 	{ ATH5K_DEBUG_DUMP_RX,	"dumprx",	"print received skb content" },
 	{ ATH5K_DEBUG_DUMP_TX,	"dumptx",	"print transmit skb content" },
 	{ ATH5K_DEBUG_DUMPBANDS, "dumpbands",	"dump bands" },
-	{ ATH5K_DEBUG_TRACE,	"trace",	"trace function calls" },
 	{ ATH5K_DEBUG_ANI,	"ani",		"adaptive noise immunity" },
 	{ ATH5K_DEBUG_ANY,	"all",		"show all debug levels" },
 };
@@ -426,6 +426,13 @@
 		"AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
 		(v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
 
+	v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
+	len += snprintf(buf+len, sizeof(buf)-len,
+			"\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
+	v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
+	len += snprintf(buf+len, sizeof(buf)-len,
+			"AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
+
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -729,6 +736,66 @@
 };
 
 
+/* debugfs: queues etc */
+
+static ssize_t read_file_queue(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct ath5k_softc *sc = file->private_data;
+	char buf[700];
+	unsigned int len = 0;
+
+	struct ath5k_txq *txq;
+	struct ath5k_buf *bf, *bf0;
+	int i, n = 0;
+
+	len += snprintf(buf+len, sizeof(buf)-len,
+			"available txbuffers: %d\n", sc->txbuf_len);
+
+	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
+		txq = &sc->txqs[i];
+
+		len += snprintf(buf+len, sizeof(buf)-len,
+			"%02d: %ssetup\n", i, txq->setup ? "" : "not ");
+
+		if (!txq->setup)
+			continue;
+
+		list_for_each_entry_safe(bf, bf0, &txq->q, list)
+			n++;
+		len += snprintf(buf+len, sizeof(buf)-len, "  len: %d\n", n);
+	}
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_queue(struct file *file,
+				 const char __user *userbuf,
+				 size_t count, loff_t *ppos)
+{
+	struct ath5k_softc *sc = file->private_data;
+	char buf[20];
+
+	if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+		return -EFAULT;
+
+	if (strncmp(buf, "start", 5) == 0)
+		ieee80211_wake_queues(sc->hw);
+	else if (strncmp(buf, "stop", 4) == 0)
+		ieee80211_stop_queues(sc->hw);
+
+	return count;
+}
+
+
+static const struct file_operations fops_queue = {
+	.read = read_file_queue,
+	.write = write_file_queue,
+	.open = ath5k_debugfs_open,
+	.owner = THIS_MODULE,
+};
+
+
 /* init */
 
 void
@@ -772,6 +839,11 @@
 				S_IWUSR | S_IRUSR,
 				sc->debug.debugfs_phydir, sc,
 				&fops_ani);
+
+	sc->debug.debugfs_queue = debugfs_create_file("queue",
+				S_IWUSR | S_IRUSR,
+				sc->debug.debugfs_phydir, sc,
+				&fops_queue);
 }
 
 void
@@ -790,6 +862,7 @@
 	debugfs_remove(sc->debug.debugfs_antenna);
 	debugfs_remove(sc->debug.debugfs_frameerrors);
 	debugfs_remove(sc->debug.debugfs_ani);
+	debugfs_remove(sc->debug.debugfs_queue);
 	debugfs_remove(sc->debug.debugfs_phydir);
 }
 
@@ -852,7 +925,7 @@
 		ds, (unsigned long long)bf->daddr,
 		ds->ds_link, ds->ds_data,
 		rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1,
-		rd->u.rx_stat.rx_status_0, rd->u.rx_stat.rx_status_0,
+		rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1,
 		!done ? ' ' : (rs->rs_status == 0) ? '*' : '!');
 }
 
@@ -867,7 +940,7 @@
 	if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET)))
 		return;
 
-	printk(KERN_DEBUG "rx queue %x, link %p\n",
+	printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
 		ath5k_hw_get_rxdp(ah), sc->rxlink);
 
 	spin_lock_bh(&sc->rxbuflock);
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index ddd5b3a..606ae94 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -77,6 +77,7 @@
 	struct dentry		*debugfs_antenna;
 	struct dentry		*debugfs_frameerrors;
 	struct dentry		*debugfs_ani;
+	struct dentry		*debugfs_queue;
 };
 
 /**
@@ -115,18 +116,12 @@
 	ATH5K_DEBUG_DUMP_RX	= 0x00000100,
 	ATH5K_DEBUG_DUMP_TX	= 0x00000200,
 	ATH5K_DEBUG_DUMPBANDS	= 0x00000400,
-	ATH5K_DEBUG_TRACE	= 0x00001000,
 	ATH5K_DEBUG_ANI		= 0x00002000,
 	ATH5K_DEBUG_ANY		= 0xffffffff
 };
 
 #ifdef CONFIG_ATH5K_DEBUG
 
-#define ATH5K_TRACE(_sc) do { \
-	if (unlikely((_sc)->debug.level & ATH5K_DEBUG_TRACE)) \
-		printk(KERN_DEBUG "ath5k trace %s:%d\n", __func__, __LINE__); \
-	} while (0)
-
 #define ATH5K_DBG(_sc, _m, _fmt, ...) do { \
 	if (unlikely((_sc)->debug.level & (_m) && net_ratelimit())) \
 		ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \
@@ -168,8 +163,6 @@
 
 #include <linux/compiler.h>
 
-#define ATH5K_TRACE(_sc) typecheck(struct ath5k_softc *, (_sc))
-
 static inline void __attribute__ ((format (printf, 3, 4)))
 ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {}
 
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 7d7b646..4324438 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -91,14 +91,13 @@
 	tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
 
 	/*
-	 * Verify and set header length
-	 * XXX: I only found that on 5210 code, does it work on 5211 ?
+	 * Verify and set header length (only 5210)
 	 */
 	if (ah->ah_version == AR5K_AR5210) {
-		if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
+		if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210)
 			return -EINVAL;
 		tx_ctl->tx_control_0 |=
-			AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
+			AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210);
 	}
 
 	/*Differences between 5210-5211*/
@@ -110,11 +109,11 @@
 		case AR5K_PKT_TYPE_PIFS:
 			frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
 		default:
-			frame_type = type /*<< 2 ?*/;
+			frame_type = type;
 		}
 
 		tx_ctl->tx_control_0 |=
-		AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
+		AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) |
 		AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
 
 	} else {
@@ -123,21 +122,30 @@
 			AR5K_REG_SM(antenna_mode,
 				AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
 		tx_ctl->tx_control_1 |=
-			AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
+			AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211);
 	}
+
 #define _TX_FLAGS(_c, _flag)					\
 	if (flags & AR5K_TXDESC_##_flag) {			\
 		tx_ctl->tx_control_##_c |=			\
 			AR5K_2W_TX_DESC_CTL##_c##_##_flag;	\
 	}
-
+#define _TX_FLAGS_5211(_c, _flag)					\
+	if (flags & AR5K_TXDESC_##_flag) {				\
+		tx_ctl->tx_control_##_c |=				\
+			AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211;	\
+	}
 	_TX_FLAGS(0, CLRDMASK);
-	_TX_FLAGS(0, VEOL);
 	_TX_FLAGS(0, INTREQ);
 	_TX_FLAGS(0, RTSENA);
-	_TX_FLAGS(1, NOACK);
+
+	if (ah->ah_version == AR5K_AR5211) {
+		_TX_FLAGS_5211(0, VEOL);
+		_TX_FLAGS_5211(1, NOACK);
+	}
 
 #undef _TX_FLAGS
+#undef _TX_FLAGS_5211
 
 	/*
 	 * WEP crap
@@ -147,7 +155,7 @@
 			AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
 		tx_ctl->tx_control_1 |=
 			AR5K_REG_SM(key_index,
-			AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
+			AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX);
 	}
 
 	/*
@@ -156,7 +164,7 @@
 	if ((ah->ah_version == AR5K_AR5210) &&
 			(flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
 		tx_ctl->tx_control_1 |= rtscts_duration &
-				AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
+				AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210;
 
 	return 0;
 }
@@ -176,7 +184,6 @@
 	struct ath5k_hw_4w_tx_ctl *tx_ctl;
 	unsigned int frame_len;
 
-	ATH5K_TRACE(ah->ah_sc);
 	tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
 
 	/*
@@ -256,7 +263,7 @@
 	if (key_index != AR5K_TXKEYIX_INVALID) {
 		tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
 		tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
-				AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
+				AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
 	}
 
 	/*
@@ -278,13 +285,17 @@
 /*
  * Initialize a 4-word multi rate retry tx control descriptor on 5212
  */
-static int
+int
 ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
 	unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
 	u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
 {
 	struct ath5k_hw_4w_tx_ctl *tx_ctl;
 
+	/* no mrr support for cards older than 5212 */
+	if (ah->ah_version < AR5K_AR5212)
+		return 0;
+
 	/*
 	 * Rates can be 0 as long as the retry count is 0 too.
 	 * A zero rate and nonzero retry count will put the HW into a mode where
@@ -324,15 +335,6 @@
 	return 0;
 }
 
-/* no mrr support for cards older than 5212 */
-static int
-ath5k_hw_setup_no_mrr(struct ath5k_hw *ah, struct ath5k_desc *desc,
-	unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
-	u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
-{
-	return 0;
-}
-
 /*
  * Proccess the tx status descriptor on 5210/5211
  */
@@ -342,8 +344,6 @@
 	struct ath5k_hw_2w_tx_ctl *tx_ctl;
 	struct ath5k_hw_tx_status *tx_status;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
 	tx_status = &desc->ud.ds_tx5210.tx_stat;
 
@@ -396,8 +396,6 @@
 	struct ath5k_hw_4w_tx_ctl *tx_ctl;
 	struct ath5k_hw_tx_status *tx_status;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
 	tx_status = &desc->ud.ds_tx5212.tx_stat;
 
@@ -419,11 +417,11 @@
 	ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
 		AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
 	ts->ts_antenna = (tx_status->tx_status_1 &
-		AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
+		AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
 	ts->ts_status = 0;
 
 	ts->ts_final_idx = AR5K_REG_MS(tx_status->tx_status_1,
-			AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX);
+			AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
 
 	/* The longretry counter has the number of un-acked retries
 	 * for the final rate. To get the total number of retries
@@ -485,12 +483,11 @@
 /*
  * Initialize an rx control descriptor
  */
-static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-			u32 size, unsigned int flags)
+int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+			   u32 size, unsigned int flags)
 {
 	struct ath5k_hw_rx_ctl *rx_ctl;
 
-	ATH5K_TRACE(ah->ah_sc);
 	rx_ctl = &desc->ud.ds_rx.rx_ctl;
 
 	/*
@@ -502,10 +499,11 @@
 	*/
 	memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
 
+	if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN))
+		return -EINVAL;
+
 	/* Setup descriptor */
 	rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
-	if (unlikely(rx_ctl->rx_control_1 != size))
-		return -EINVAL;
 
 	if (flags & AR5K_RXDESC_INTREQ)
 		rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
@@ -521,13 +519,15 @@
 {
 	struct ath5k_hw_rx_status *rx_status;
 
-	rx_status = &desc->ud.ds_rx.u.rx_stat;
+	rx_status = &desc->ud.ds_rx.rx_stat;
 
 	/* No frame received / not ready */
 	if (unlikely(!(rx_status->rx_status_1 &
-	AR5K_5210_RX_DESC_STATUS1_DONE)))
+			AR5K_5210_RX_DESC_STATUS1_DONE)))
 		return -EINPROGRESS;
 
+	memset(rs, 0, sizeof(struct ath5k_rx_status));
+
 	/*
 	 * Frame receive status
 	 */
@@ -537,15 +537,23 @@
 		AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
 	rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
 		AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
-	rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
-		AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA);
 	rs->rs_more = !!(rx_status->rx_status_0 &
 		AR5K_5210_RX_DESC_STATUS0_MORE);
-	/* TODO: this timestamp is 13 bit, later on we assume 15 bit */
+	/* TODO: this timestamp is 13 bit, later on we assume 15 bit!
+	 * also the HAL code for 5210 says the timestamp is bits [10..22] of the
+	 * TSF, and extends the timestamp here to 15 bit.
+	 * we need to check on 5210...
+	 */
 	rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
 		AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
-	rs->rs_status = 0;
-	rs->rs_phyerr = 0;
+
+	if (ah->ah_version == AR5K_AR5211)
+		rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
+				AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211);
+	else
+		rs->rs_antenna = (rx_status->rx_status_0 &
+				AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210)
+				? 2 : 1;
 
 	/*
 	 * Key table status
@@ -560,19 +568,21 @@
 	 * Receive/descriptor errors
 	 */
 	if (!(rx_status->rx_status_1 &
-	AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+			AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
 		if (rx_status->rx_status_1 &
 				AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
 			rs->rs_status |= AR5K_RXERR_CRC;
 
-		if (rx_status->rx_status_1 &
-				AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
+		/* only on 5210 */
+		if ((ah->ah_version == AR5K_AR5210) &&
+		    (rx_status->rx_status_1 &
+				AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210))
 			rs->rs_status |= AR5K_RXERR_FIFO;
 
 		if (rx_status->rx_status_1 &
 				AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
 			rs->rs_status |= AR5K_RXERR_PHY;
-			rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1,
+			rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
 				AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
 		}
 
@@ -588,22 +598,20 @@
  * Proccess the rx status descriptor on 5212
  */
 static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
-		struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+					struct ath5k_desc *desc,
+					struct ath5k_rx_status *rs)
 {
 	struct ath5k_hw_rx_status *rx_status;
-	struct ath5k_hw_rx_error *rx_err;
 
-	ATH5K_TRACE(ah->ah_sc);
-	rx_status = &desc->ud.ds_rx.u.rx_stat;
-
-	/* Overlay on error */
-	rx_err = &desc->ud.ds_rx.u.rx_err;
+	rx_status = &desc->ud.ds_rx.rx_stat;
 
 	/* No frame received / not ready */
 	if (unlikely(!(rx_status->rx_status_1 &
-	AR5K_5212_RX_DESC_STATUS1_DONE)))
+				AR5K_5212_RX_DESC_STATUS1_DONE)))
 		return -EINPROGRESS;
 
+	memset(rs, 0, sizeof(struct ath5k_rx_status));
+
 	/*
 	 * Frame receive status
 	 */
@@ -619,15 +627,13 @@
 		AR5K_5212_RX_DESC_STATUS0_MORE);
 	rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
 		AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
-	rs->rs_status = 0;
-	rs->rs_phyerr = 0;
 
 	/*
 	 * Key table status
 	 */
 	if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
 		rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
-				AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
+					   AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
 	else
 		rs->rs_keyix = AR5K_RXKEYIX_INVALID;
 
@@ -635,7 +641,7 @@
 	 * Receive/descriptor errors
 	 */
 	if (!(rx_status->rx_status_1 &
-	AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+	    AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
 		if (rx_status->rx_status_1 &
 				AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
 			rs->rs_status |= AR5K_RXERR_CRC;
@@ -643,9 +649,10 @@
 		if (rx_status->rx_status_1 &
 				AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
 			rs->rs_status |= AR5K_RXERR_PHY;
-			rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
-					   AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
-			ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
+			rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
+				AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
+			if (!ah->ah_capabilities.cap_has_phyerr_counters)
+				ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
 		}
 
 		if (rx_status->rx_status_1 &
@@ -656,7 +663,6 @@
 				AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
 			rs->rs_status |= AR5K_RXERR_MIC;
 	}
-
 	return 0;
 }
 
@@ -665,29 +671,15 @@
  */
 int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
 {
-
-	if (ah->ah_version != AR5K_AR5210 &&
-		ah->ah_version != AR5K_AR5211 &&
-		ah->ah_version != AR5K_AR5212)
-			return -ENOTSUPP;
-
 	if (ah->ah_version == AR5K_AR5212) {
-		ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
 		ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
-		ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
 		ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
-	} else {
-		ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
-		ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
-		ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_no_mrr;
-		ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
-	}
-
-	if (ah->ah_version == AR5K_AR5212)
 		ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
-	else if (ah->ah_version <= AR5K_AR5211)
+	} else if (ah->ah_version <= AR5K_AR5211) {
+		ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
+		ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
 		ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
-
+	} else
+		return -ENOTSUPP;
 	return 0;
 }
-
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 64538fb..b2adb2a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -17,28 +17,24 @@
  */
 
 /*
- * Internal RX/TX descriptor structures
- * (rX: reserved fields possibily used by future versions of the ar5k chipset)
+ * RX/TX descriptor structures
  */
 
 /*
- * common hardware RX control descriptor
+ * Common hardware RX control descriptor
  */
 struct ath5k_hw_rx_ctl {
 	u32	rx_control_0; /* RX control word 0 */
 	u32	rx_control_1; /* RX control word 1 */
 } __packed;
 
-/* RX control word 0 field/sflags */
-#define AR5K_DESC_RX_CTL0			0x00000000
-
 /* RX control word 1 fields/flags */
-#define AR5K_DESC_RX_CTL1_BUF_LEN		0x00000fff
-#define AR5K_DESC_RX_CTL1_INTREQ		0x00002000
+#define AR5K_DESC_RX_CTL1_BUF_LEN		0x00000fff /* data buffer length */
+#define AR5K_DESC_RX_CTL1_INTREQ		0x00002000 /* RX interrupt request */
 
 /*
- * common hardware RX status descriptor
- * 5210/11 and 5212 differ only in the flags defined below
+ * Common hardware RX status descriptor
+ * 5210, 5211 and 5212 differ only in the fields and flags defined below
  */
 struct ath5k_hw_rx_status {
 	u32	rx_status_0; /* RX status word 0 */
@@ -47,81 +43,69 @@
 
 /* 5210/5211 */
 /* RX status word 0 fields/flags */
-#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN		0x00000fff
-#define AR5K_5210_RX_DESC_STATUS0_MORE			0x00001000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE		0x00078000
+#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN		0x00000fff /* RX data length */
+#define AR5K_5210_RX_DESC_STATUS0_MORE			0x00001000 /* more desc for this frame */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210	0x00004000 /* [5210] receive on ant 1 */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE		0x00078000 /* reception rate */
 #define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S	15
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL	0x07f80000
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL	0x07f80000 /* rssi */
 #define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S	19
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA	0x38000000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S	27
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211	0x38000000 /* [5211] receive antenna */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211_S	27
 
 /* RX status word 1 fields/flags */
-#define AR5K_5210_RX_DESC_STATUS1_DONE			0x00000001
-#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK	0x00000002
-#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR		0x00000004
-#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN		0x00000008
-#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR	0x00000010
-#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR		0x000000e0
+#define AR5K_5210_RX_DESC_STATUS1_DONE			0x00000001 /* descriptor complete */
+#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK	0x00000002 /* reception success */
+#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR		0x00000004 /* CRC error */
+#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210	0x00000008 /* [5210] FIFO overrun */
+#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR	0x00000010 /* decyption CRC failure */
+#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR		0x000000e0 /* PHY error */
 #define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S		5
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID	0x00000100
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX		0x00007e00
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID	0x00000100 /* key index valid */
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX		0x00007e00 /* decyption key index */
 #define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S		9
-#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP	0x0fff8000
+#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP	0x0fff8000 /* 13 bit of TSF */
 #define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S	15
-#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS	0x10000000
+#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS	0x10000000 /* key cache miss */
 
 /* 5212 */
 /* RX status word 0 fields/flags */
-#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN		0x00000fff
-#define AR5K_5212_RX_DESC_STATUS0_MORE			0x00001000
-#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR	0x00002000
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE		0x000f8000
+#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN		0x00000fff /* RX data length */
+#define AR5K_5212_RX_DESC_STATUS0_MORE			0x00001000 /* more desc for this frame */
+#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR	0x00002000 /* decompression CRC error */
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE		0x000f8000 /* reception rate */
 #define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S	15
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL	0x0ff00000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL	0x0ff00000 /* rssi */
 #define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S	20
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA	0xf0000000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA	0xf0000000 /* receive antenna */
 #define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S	28
 
 /* RX status word 1 fields/flags */
-#define AR5K_5212_RX_DESC_STATUS1_DONE			0x00000001
-#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK	0x00000002
-#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR		0x00000004
-#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR	0x00000008
-#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR		0x00000010
-#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR		0x00000020
-#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID	0x00000100
-#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX		0x0000fe00
+#define AR5K_5212_RX_DESC_STATUS1_DONE			0x00000001 /* descriptor complete */
+#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK	0x00000002 /* frame reception success */
+#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR		0x00000004 /* CRC error */
+#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR	0x00000008 /* decryption CRC failure */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR		0x00000010 /* PHY error */
+#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR		0x00000020 /* MIC decrypt error */
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID	0x00000100 /* key index valid */
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX		0x0000fe00 /* decryption key index */
 #define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S		9
-#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP	0x7fff0000
+#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP	0x7fff0000 /* first 15bit of the TSF */
 #define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S	16
-#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS	0x80000000
-
-/*
- * common hardware RX error descriptor
- */
-struct ath5k_hw_rx_error {
-	u32	rx_error_0; /* RX status word 0 */
-	u32	rx_error_1; /* RX status word 1 */
-} __packed;
-
-/* RX error word 0 fields/flags */
-#define AR5K_RX_DESC_ERROR0			0x00000000
-
-/* RX error word 1 fields/flags */
-#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE	0x0000ff00
-#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S	8
+#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS	0x80000000 /* key cache miss */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE	0x0000ff00 /* phy error code overlays key index and valid fields */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE_S	8
 
 /**
  * enum ath5k_phy_error_code - PHY Error codes
  */
 enum ath5k_phy_error_code {
-	AR5K_RX_PHY_ERROR_UNDERRUN		= 0,	/* Transmit underrun */
+	AR5K_RX_PHY_ERROR_UNDERRUN		= 0,	/* Transmit underrun, [5210] No error */
 	AR5K_RX_PHY_ERROR_TIMING		= 1,	/* Timing error */
 	AR5K_RX_PHY_ERROR_PARITY		= 2,	/* Illegal parity */
 	AR5K_RX_PHY_ERROR_RATE			= 3,	/* Illegal rate */
 	AR5K_RX_PHY_ERROR_LENGTH		= 4,	/* Illegal length */
-	AR5K_RX_PHY_ERROR_RADAR			= 5,	/* Radar detect */
+	AR5K_RX_PHY_ERROR_RADAR			= 5,	/* Radar detect, [5210] 64 QAM rate */
 	AR5K_RX_PHY_ERROR_SERVICE		= 6,	/* Illegal service */
 	AR5K_RX_PHY_ERROR_TOR			= 7,	/* Transmit override receive */
 	/* these are specific to the 5212 */
@@ -148,112 +132,111 @@
 } __packed;
 
 /* TX control word 0 fields/flags */
-#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN		0x00000fff
-#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN		0x0003f000 /*[5210 ?]*/
-#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S	12
-#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE		0x003c0000
+#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN		0x00000fff /* frame length */
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210	0x0003f000 /* [5210] header length */
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210_S	12
+#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE		0x003c0000 /* tx rate */
 #define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S	18
-#define AR5K_2W_TX_DESC_CTL0_RTSENA		0x00400000
-#define AR5K_2W_TX_DESC_CTL0_CLRDMASK		0x01000000
-#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET	0x00800000 /*[5210]*/
-#define AR5K_2W_TX_DESC_CTL0_VEOL		0x00800000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE		0x1c000000 /*[5210]*/
-#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S	26
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210	0x02000000
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211	0x1e000000
-
+#define AR5K_2W_TX_DESC_CTL0_RTSENA		0x00400000 /* RTS/CTS enable */
+#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET_5210	0x00800000 /* [5210] long packet */
+#define AR5K_2W_TX_DESC_CTL0_VEOL_5211		0x00800000 /* [5211] virtual end-of-list */
+#define AR5K_2W_TX_DESC_CTL0_CLRDMASK		0x01000000 /* clear destination mask */
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210	0x02000000 /* [5210] antenna selection */
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211	0x1e000000 /* [5211] antenna selection */
 #define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT			\
 		(ah->ah_version == AR5K_AR5210 ?		\
 		AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 :	\
 		AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
-
 #define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S	25
-#define AR5K_2W_TX_DESC_CTL0_INTREQ		0x20000000
-#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID	0x40000000
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210	0x1c000000 /* [5210] frame type */
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210_S	26
+#define AR5K_2W_TX_DESC_CTL0_INTREQ		0x20000000 /* TX interrupt request */
+#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID	0x40000000 /* key is valid */
 
 /* TX control word 1 fields/flags */
-#define AR5K_2W_TX_DESC_CTL1_BUF_LEN		0x00000fff
-#define AR5K_2W_TX_DESC_CTL1_MORE		0x00001000
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210	0x0007e000
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211	0x000fe000
-
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX				\
+#define AR5K_2W_TX_DESC_CTL1_BUF_LEN		0x00000fff /* data buffer length */
+#define AR5K_2W_TX_DESC_CTL1_MORE		0x00001000 /* more desc for this frame */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210	0x0007e000 /* [5210] key table index */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211	0x000fe000 /* [5211] key table index */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX				\
 			(ah->ah_version == AR5K_AR5210 ?		\
-			AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 :	\
-			AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211)
-
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S	13
-#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE		0x00700000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S	20
-#define AR5K_2W_TX_DESC_CTL1_NOACK		0x00800000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION	0xfff80000 /*[5210 ?]*/
+			AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 :		\
+			AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211)
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_S	13
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211	0x00700000 /* [5211] frame type */
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211_S	20
+#define AR5K_2W_TX_DESC_CTL1_NOACK_5211		0x00800000 /* [5211] no ACK */
+#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210	0xfff80000 /* [5210] lower 13 bit of duration */
 
 /* Frame types */
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL   0x00
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM     0x04
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL   0x08
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 0x0c
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS     0x10
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL	0
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM	1
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL	2
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY	3
+#define AR5K_AR5211_TX_DESC_FRAME_TYPE_BEACON	3
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS	4
+#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP	4
 
 /*
  * 5212 hardware 4-word TX control descriptor
  */
 struct ath5k_hw_4w_tx_ctl {
 	u32	tx_control_0; /* TX control word 0 */
-
-#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN		0x00000fff
-#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER		0x003f0000
-#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S	16
-#define AR5K_4W_TX_DESC_CTL0_RTSENA		0x00400000
-#define AR5K_4W_TX_DESC_CTL0_VEOL		0x00800000
-#define AR5K_4W_TX_DESC_CTL0_CLRDMASK		0x01000000
-#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT	0x1e000000
-#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S	25
-#define AR5K_4W_TX_DESC_CTL0_INTREQ		0x20000000
-#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID	0x40000000
-#define AR5K_4W_TX_DESC_CTL0_CTSENA		0x80000000
-
 	u32	tx_control_1; /* TX control word 1 */
+	u32	tx_control_2; /* TX control word 2 */
+	u32	tx_control_3; /* TX control word 3 */
+} __packed;
 
-#define AR5K_4W_TX_DESC_CTL1_BUF_LEN		0x00000fff
-#define AR5K_4W_TX_DESC_CTL1_MORE		0x00001000
-#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX	0x000fe000
-#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S	13
-#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE		0x00f00000
+/* TX control word 0 fields/flags */
+#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN		0x00000fff /* frame length */
+#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER		0x003f0000 /* transmit power */
+#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S	16
+#define AR5K_4W_TX_DESC_CTL0_RTSENA		0x00400000 /* RTS/CTS enable */
+#define AR5K_4W_TX_DESC_CTL0_VEOL		0x00800000 /* virtual end-of-list */
+#define AR5K_4W_TX_DESC_CTL0_CLRDMASK		0x01000000 /* clear destination mask */
+#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT	0x1e000000 /* TX antenna selection */
+#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S	25
+#define AR5K_4W_TX_DESC_CTL0_INTREQ		0x20000000 /* TX interrupt request */
+#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID	0x40000000 /* destination index valid */
+#define AR5K_4W_TX_DESC_CTL0_CTSENA		0x80000000 /* precede frame with CTS */
+
+/* TX control word 1 fields/flags */
+#define AR5K_4W_TX_DESC_CTL1_BUF_LEN		0x00000fff /* data buffer length */
+#define AR5K_4W_TX_DESC_CTL1_MORE		0x00001000 /* more desc for this frame */
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX	0x000fe000 /* destination table index */
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX_S	13
+#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE		0x00f00000 /* frame type */
 #define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S	20
-#define AR5K_4W_TX_DESC_CTL1_NOACK		0x01000000
-#define AR5K_4W_TX_DESC_CTL1_COMP_PROC		0x06000000
+#define AR5K_4W_TX_DESC_CTL1_NOACK		0x01000000 /* no ACK */
+#define AR5K_4W_TX_DESC_CTL1_COMP_PROC		0x06000000 /* compression processing */
 #define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S	25
-#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN	0x18000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN	0x18000000 /* length of frame IV */
 #define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S	27
-#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN	0x60000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN	0x60000000 /* length of frame ICV */
 #define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S	29
 
-	u32	tx_control_2; /* TX control word 2 */
+/* TX control word 2 fields/flags */
+#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION	0x00007fff /* RTS/CTS duration */
+#define AR5K_4W_TX_DESC_CTL2_DURATION_UPD_EN	0x00008000 /* frame duration update */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0	0x000f0000 /* series 0 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S	16
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1	0x00f00000 /* series 1 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S	20
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2	0x0f000000 /* series 2 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S	24
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3	0xf0000000 /* series 3 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S	28
 
-#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION		0x00007fff
-#define AR5K_4W_TX_DESC_CTL2_DURATION_UPDATE_ENABLE	0x00008000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0		0x000f0000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S		16
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1		0x00f00000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S		20
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2		0x0f000000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S		24
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3		0xf0000000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S		28
-
-	u32	tx_control_3; /* TX control word 3 */
-
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0		0x0000001f
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1		0x000003e0
+/* TX control word 3 fields/flags */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0		0x0000001f /* series 0 tx rate */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1		0x000003e0 /* series 1 tx rate */
 #define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S	5
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2		0x00007c00
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2		0x00007c00 /* series 2 tx rate */
 #define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S	10
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3		0x000f8000
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3		0x000f8000 /* series 3 tx rate */
 #define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S	15
-#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE	0x01f00000
+#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE	0x01f00000 /* RTS or CTS rate */
 #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S	20
-} __packed;
 
 /*
  * Common TX status descriptor
@@ -264,37 +247,34 @@
 } __packed;
 
 /* TX status word 0 fields/flags */
-#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK	0x00000001
-#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES	0x00000002
-#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN	0x00000004
-#define AR5K_DESC_TX_STATUS0_FILTERED		0x00000008
-/*???
-#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT	0x000000f0
-#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT_S	4
-*/
-#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT	0x000000f0
+#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK	0x00000001 /* TX success */
+#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES	0x00000002 /* excessive retries */
+#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN	0x00000004 /* FIFO underrun */
+#define AR5K_DESC_TX_STATUS0_FILTERED		0x00000008 /* TX filter indication */
+/* according to the HAL sources the spec has short/long retry counts reversed.
+ * we have it reversed to the HAL sources as well, for 5210 and 5211.
+ * For 5212 these fields are defined as RTS_FAIL_COUNT and DATA_FAIL_COUNT,
+ * but used respectively as SHORT and LONG retry count in the code later. This
+ * is consistent with the definitions here... TODO: check */
+#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT	0x000000f0 /* short retry count */
 #define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S	4
-/*???
-#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT	0x00000f00
-#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT_S	8
-*/
-#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT	0x00000f00
+#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT	0x00000f00 /* long retry count */
 #define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S	8
-#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT	0x0000f000
-#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT_S	12
-#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP	0xffff0000
+#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5211	0x0000f000 /* [5211+] virtual collision count */
+#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5212_S	12
+#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP	0xffff0000 /* TX timestamp */
 #define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S	16
 
 /* TX status word 1 fields/flags */
-#define AR5K_DESC_TX_STATUS1_DONE		0x00000001
-#define AR5K_DESC_TX_STATUS1_SEQ_NUM		0x00001ffe
+#define AR5K_DESC_TX_STATUS1_DONE		0x00000001 /* descriptor complete */
+#define AR5K_DESC_TX_STATUS1_SEQ_NUM		0x00001ffe /* TX sequence number */
 #define AR5K_DESC_TX_STATUS1_SEQ_NUM_S		1
-#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH	0x001fe000
+#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH	0x001fe000 /* signal strength of ACK */
 #define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S	13
-#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX	0x00600000
-#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S	21
-#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS	0x00800000
-#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA	0x01000000
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212	0x00600000 /* [5212] final TX attempt series ix */
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212_S	21
+#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212	0x00800000 /* [5212] compression status */
+#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212	0x01000000 /* [5212] transmit antenna */
 
 /*
  * 5210/5211 hardware TX descriptor
@@ -313,18 +293,15 @@
 } __packed;
 
 /*
- * common hardware RX descriptor
+ * Common hardware RX descriptor
  */
 struct ath5k_hw_all_rx_desc {
-	struct ath5k_hw_rx_ctl			rx_ctl;
-	union {
-		struct ath5k_hw_rx_status	rx_stat;
-		struct ath5k_hw_rx_error	rx_err;
-	} u;
+	struct ath5k_hw_rx_ctl		rx_ctl;
+	struct ath5k_hw_rx_status	rx_stat;
 } __packed;
 
 /*
- * Atheros hardware descriptor
+ * Atheros hardware DMA descriptor
  * This is read and written to by the hardware
  */
 struct ath5k_desc {
@@ -346,4 +323,3 @@
 #define AR5K_TXDESC_CTSENA	0x0008
 #define AR5K_TXDESC_INTREQ	0x0010
 #define AR5K_TXDESC_VEOL	0x0020	/*[5211+]*/
-
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 941b511..484f318 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -48,7 +48,6 @@
  */
 void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
 	ath5k_hw_reg_read(ah, AR5K_CR);
 }
@@ -62,7 +61,6 @@
 {
 	unsigned int i;
 
-	ATH5K_TRACE(ah->ah_sc);
 	ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
 
 	/*
@@ -96,8 +94,6 @@
  */
 void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
 {
-	ATH5K_TRACE(ah->ah_sc);
-
 	ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
 }
 
@@ -125,7 +121,6 @@
 {
 	u32 tx_queue;
 
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
 
 	/* Return if queue is declared inactive */
@@ -186,7 +181,6 @@
 	unsigned int i = 40;
 	u32 tx_queue, pending;
 
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
 
 	/* Return if queue is declared inactive */
@@ -297,7 +291,6 @@
 {
 	u16 tx_reg;
 
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
 
 	/*
@@ -340,7 +333,6 @@
 {
 	u16 tx_reg;
 
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
 
 	/*
@@ -400,8 +392,6 @@
 	u32 trigger_level, imr;
 	int ret = -EIO;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	/*
 	 * Disable interrupts by setting the mask
 	 */
@@ -451,7 +441,6 @@
  */
 bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
 }
 
@@ -475,8 +464,6 @@
 {
 	u32 data;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	/*
 	 * Read interrupt status from the Interrupt Status register
 	 * on 5210
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index ed02636..ae316fe 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -35,7 +35,6 @@
 {
 	u32 status, timeout;
 
-	ATH5K_TRACE(ah->ah_sc);
 	/*
 	 * Initialize EEPROM access
 	 */
@@ -715,7 +714,7 @@
 
 		/* Only one curve for RF5111
 		 * find out which one and place
-		 * in in pd_curves.
+		 * in pd_curves.
 		 * Note: ee_x_gain is reversed here */
 		for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) {
 
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 64a27e7..bc90503 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -34,8 +34,6 @@
 	/*5210 has different led mode handling*/
 	u32 led_5210;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	/*Reset led status*/
 	if (ah->ah_version != AR5K_AR5210)
 		AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
@@ -82,7 +80,6 @@
  */
 int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	if (gpio >= AR5K_NUM_GPIO)
 		return -EINVAL;
 
@@ -98,7 +95,6 @@
  */
 int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	if (gpio >= AR5K_NUM_GPIO)
 		return -EINVAL;
 
@@ -114,7 +110,6 @@
  */
 u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	if (gpio >= AR5K_NUM_GPIO)
 		return 0xffffffff;
 
@@ -129,7 +124,6 @@
 int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
 {
 	u32 data;
-	ATH5K_TRACE(ah->ah_sc);
 
 	if (gpio >= AR5K_NUM_GPIO)
 		return -EINVAL;
@@ -153,7 +147,6 @@
 {
 	u32 data;
 
-	ATH5K_TRACE(ah->ah_sc);
 	if (gpio >= AR5K_NUM_GPIO)
 		return;
 
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 5212e27..86fdb6d 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -59,8 +59,6 @@
 
 	beacon_reg = 0;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	switch (op_mode) {
 	case NL80211_IFTYPE_ADHOC:
 		pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
@@ -173,7 +171,6 @@
  */
 static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
 			<= timeout)
 		return -EINVAL;
@@ -192,7 +189,6 @@
  */
 static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
 			<= timeout)
 		return -EINVAL;
@@ -297,7 +293,6 @@
 	u32 low_id, high_id;
 	u32 pcu_reg;
 
-	ATH5K_TRACE(ah->ah_sc);
 	/* Set new station ID */
 	memcpy(common->macaddr, mac, ETH_ALEN);
 
@@ -357,7 +352,6 @@
 void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
 {
 	struct ath_common *common = ath5k_hw_common(ah);
-	ATH5K_TRACE(ah->ah_sc);
 
 	/* Cache bssid mask so that we can restore it
 	 * on reset */
@@ -382,7 +376,6 @@
  */
 void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
 }
 
@@ -397,7 +390,6 @@
  */
 void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
 }
 
@@ -406,8 +398,6 @@
  */
 void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
 {
-	ATH5K_TRACE(ah->ah_sc);
-	/* Set the multicat filter */
 	ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
 	ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
 }
@@ -427,7 +417,6 @@
 {
 	u32 data, filter = 0;
 
-	ATH5K_TRACE(ah->ah_sc);
 	filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
 
 	/*Radar detection for 5212*/
@@ -457,8 +446,6 @@
 {
 	u32 data = 0;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	/* Set PHY error filter register on 5212*/
 	if (ah->ah_version == AR5K_AR5212) {
 		if (filter & AR5K_RX_FILTER_RADARERR)
@@ -533,8 +520,6 @@
 
 	WARN_ON( i == ATH5K_MAX_TSF_READ );
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	return (((u64)tsf_upper1 << 32) | tsf_lower);
 }
 
@@ -548,8 +533,6 @@
  */
 void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
 {
-	ATH5K_TRACE(ah->ah_sc);
-
 	ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
 	ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
 }
@@ -565,8 +548,6 @@
 {
 	u32 val;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
 
 	/*
@@ -586,7 +567,6 @@
 {
 	u32 timer1, timer2, timer3;
 
-	ATH5K_TRACE(ah->ah_sc);
 	/*
 	 * Set the additional timers by mode
 	 */
@@ -674,7 +654,6 @@
 	unsigned int i, type;
 	u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
 
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
 
 	type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry));
@@ -749,8 +728,6 @@
 	bool is_tkip;
 	const u8 *key_ptr;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	is_tkip = (key->alg == ALG_TKIP);
 
 	/*
@@ -836,7 +813,6 @@
 {
 	u32 low_id, high_id;
 
-	ATH5K_TRACE(ah->ah_sc);
 	 /* Invalid entry (key table overflow) */
 	AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
 
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 492cbb1..6284c38 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -378,8 +378,6 @@
 	u32 data, type;
 	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	if (ah->ah_rf_banks == NULL ||
 	ah->ah_gain.g_state == AR5K_RFGAIN_INACTIVE)
 		return AR5K_RFGAIN_INACTIVE;
@@ -1167,7 +1165,7 @@
  * The median of the values in the history is then loaded into the
  * hardware for its own use for RSSI and CCA measurements.
  */
-static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
 {
 	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
 	u32 val;
@@ -1248,7 +1246,6 @@
 /*
  * Perform a PHY calibration on RF5110
  * -Fix BPSK/QAM Constellation (I/Q correction)
- * -Calculate Noise Floor
  */
 static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
 		struct ieee80211_channel *channel)
@@ -1335,8 +1332,6 @@
 		return ret;
 	}
 
-	ath5k_hw_update_noise_floor(ah);
-
 	/*
 	 * Re-enable RX/TX and beacons
 	 */
@@ -1348,22 +1343,20 @@
 }
 
 /*
- * Perform a PHY calibration on RF5111/5112 and newer chips
+ * Perform I/Q calibration on RF5111/5112 and newer chips
  */
-static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
-		struct ieee80211_channel *channel)
+static int
+ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
 {
 	u32 i_pwr, q_pwr;
 	s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
 	int i;
-	ATH5K_TRACE(ah->ah_sc);
 
 	if (!ah->ah_calibration ||
 		ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
-		goto done;
+		return 0;
 
 	/* Calibration has finished, get the results and re-run */
-
 	/* work around empty results which can apparently happen on 5212 */
 	for (i = 0; i <= 10; i++) {
 		iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
@@ -1384,7 +1377,7 @@
 
 	/* protect against divide by 0 and loss of sign bits */
 	if (i_coffd == 0 || q_coffd < 2)
-		goto done;
+		return -1;
 
 	i_coff = (-iq_corr) / i_coffd;
 	i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1410,17 +1403,6 @@
 			AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
 	AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN);
 
-done:
-
-	/* TODO: Separate noise floor calibration from I/Q calibration
-	 * since noise floor calibration interrupts rx path while I/Q
-	 * calibration doesn't. We don't need to run noise floor calibration
-	 * as often as I/Q calibration.*/
-	ath5k_hw_update_noise_floor(ah);
-
-	/* Initiate a gain_F calibration */
-	ath5k_hw_request_rfgain_probe(ah);
-
 	return 0;
 }
 
@@ -1434,8 +1416,10 @@
 
 	if (ah->ah_radio == AR5K_RF5110)
 		ret = ath5k_hw_rf5110_calibrate(ah, channel);
-	else
-		ret = ath5k_hw_rf511x_calibrate(ah, channel);
+	else {
+		ret = ath5k_hw_rf511x_iq_calibrate(ah);
+		ath5k_hw_request_rfgain_probe(ah);
+	}
 
 	return ret;
 }
@@ -1693,7 +1677,6 @@
 
 int ath5k_hw_phy_disable(struct ath5k_hw *ah)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	/*Just a try M.F.*/
 	ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
 
@@ -1709,8 +1692,6 @@
 	u32 srev;
 	u16 ret;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	/*
 	 * Set the radio chip access register
 	 */
@@ -1755,8 +1736,6 @@
 static void /*TODO:Boundary check*/
 ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
 {
-	ATH5K_TRACE(ah->ah_sc);
-
 	if (ah->ah_version != AR5K_AR5210)
 		ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
 }
@@ -1789,19 +1768,50 @@
 
 	if (enable) {
 		AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
-				AR5K_PHY_RESTART_DIV_GC, 0xc);
+				AR5K_PHY_RESTART_DIV_GC, 4);
 
 		AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
 					AR5K_PHY_FAST_ANT_DIV_EN);
 	} else {
 		AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
-				AR5K_PHY_RESTART_DIV_GC, 0x8);
+				AR5K_PHY_RESTART_DIV_GC, 0);
 
 		AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
 					AR5K_PHY_FAST_ANT_DIV_EN);
 	}
 }
 
+void
+ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
+{
+	u8 ant0, ant1;
+
+	/*
+	 * In case a fixed antenna was set as default
+	 * use the same switch table twice.
+	 */
+	if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A)
+		ant0 = ant1 = AR5K_ANT_SWTABLE_A;
+	else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B)
+		ant0 = ant1 = AR5K_ANT_SWTABLE_B;
+	else {
+		ant0 = AR5K_ANT_SWTABLE_A;
+		ant1 = AR5K_ANT_SWTABLE_B;
+	}
+
+	/* Set antenna idle switch table */
+	AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL,
+			AR5K_PHY_ANT_CTL_SWTABLE_IDLE,
+			(ah->ah_ant_ctl[ee_mode][AR5K_ANT_CTL] |
+			AR5K_PHY_ANT_CTL_TXRX_EN));
+
+	/* Set antenna switch tables */
+	ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant0],
+		AR5K_PHY_ANT_SWITCH_TABLE_0);
+	ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant1],
+		AR5K_PHY_ANT_SWITCH_TABLE_1);
+}
+
 /*
  * Set antenna operating mode
  */
@@ -1823,8 +1833,6 @@
 
 	def_ant = ah->ah_def_ant;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	switch (channel->hw_value & CHANNEL_MODES) {
 	case CHANNEL_A:
 	case CHANNEL_T:
@@ -1923,6 +1931,7 @@
 	if (sta_id1)
 		AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, sta_id1);
 
+	ath5k_hw_set_antenna_switch(ah, ee_mode);
 	/* Note: set diversity before default antenna
 	 * because it won't work correctly */
 	ath5k_hw_set_fast_div(ah, ee_mode, fast_div);
@@ -2988,7 +2997,6 @@
 	u8 type;
 	int ret;
 
-	ATH5K_TRACE(ah->ah_sc);
 	if (txpower > AR5K_TUNE_MAX_TXPOWER) {
 		ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
 		return -EINVAL;
@@ -3084,8 +3092,6 @@
 	struct ieee80211_channel *channel = ah->ah_current_channel;
 	u8 ee_mode;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	switch (channel->hw_value & CHANNEL_MODES) {
 	case CHANNEL_A:
 	case CHANNEL_T:
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index f5831da..4186ff4 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -31,7 +31,6 @@
 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
 		struct ath5k_txq_info *queue_info)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
 	return 0;
 }
@@ -42,7 +41,6 @@
 int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
 				const struct ath5k_txq_info *queue_info)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
 
 	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
@@ -69,8 +67,6 @@
 	unsigned int queue;
 	int ret;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	/*
 	 * Get queue by type
 	 */
@@ -149,7 +145,6 @@
 u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
 {
 	u32 pending;
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
 
 	/* Return if queue is declared inactive */
@@ -177,7 +172,6 @@
  */
 void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 {
-	ATH5K_TRACE(ah->ah_sc);
 	if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
 		return;
 
@@ -195,7 +189,6 @@
 	u32 cw_min, cw_max, retry_lg, retry_sh;
 	struct ath5k_txq_info *tq = &ah->ah_txq[queue];
 
-	ATH5K_TRACE(ah->ah_sc);
 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
 
 	tq = &ah->ah_txq[queue];
@@ -523,8 +516,6 @@
 {
 	u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
 		return -EINVAL;
 
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 307f80e..498aa28 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -201,8 +201,6 @@
 	int ret;
 	u32 mask = val ? val : ~0U;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	/* Read-and-clear RX Descriptor Pointer*/
 	ath5k_hw_reg_read(ah, AR5K_RXDP);
 
@@ -246,7 +244,6 @@
 	unsigned int i;
 	u32 staid, data;
 
-	ATH5K_TRACE(ah->ah_sc);
 	staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
 
 	switch (mode) {
@@ -393,8 +390,6 @@
 	mode = 0;
 	clock = 0;
 
-	ATH5K_TRACE(ah->ah_sc);
-
 	/* Wakeup the device */
 	ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
 	if (ret) {
@@ -734,7 +729,7 @@
 }
 
 static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
-		struct ieee80211_channel *channel, u8 *ant, u8 ee_mode)
+		struct ieee80211_channel *channel, u8 ee_mode)
 {
 	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
 	s16 cck_ofdm_pwr_delta;
@@ -768,17 +763,9 @@
 						ee->ee_cck_ofdm_gain_delta;
 	}
 
-	/* Set antenna idle switch table */
-	AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL,
-			AR5K_PHY_ANT_CTL_SWTABLE_IDLE,
-			(ah->ah_ant_ctl[ee_mode][0] |
-			AR5K_PHY_ANT_CTL_TXRX_EN));
-
-	/* Set antenna switch tables */
-	ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[0]],
-		AR5K_PHY_ANT_SWITCH_TABLE_0);
-	ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[1]],
-		AR5K_PHY_ANT_SWITCH_TABLE_1);
+	/* XXX: necessary here? is called from ath5k_hw_set_antenna_mode()
+	 * too */
+	ath5k_hw_set_antenna_switch(ah, ee_mode);
 
 	/* Noise floor threshold */
 	ath5k_hw_reg_write(ah,
@@ -855,7 +842,6 @@
 			AR5K_PHY_NF_THRESH62,
 			ee->ee_thr_62[ee_mode]);
 
-
 	/* False detect backoff for channels
 	 * that have spur noise. Write the new
 	 * cyclic power RSSI threshold. */
@@ -891,14 +877,11 @@
 	struct ieee80211_channel *channel, bool change_channel)
 {
 	struct ath_common *common = ath5k_hw_common(ah);
-	u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo;
+	u32 s_seq[10], s_led[3], staid1_flags, tsf_up, tsf_lo;
 	u32 phy_tst1;
-	u8 mode, freq, ee_mode, ant[2];
+	u8 mode, freq, ee_mode;
 	int i, ret;
 
-	ATH5K_TRACE(ah->ah_sc);
-
-	s_ant = 0;
 	ee_mode = 0;
 	staid1_flags = 0;
 	tsf_up = 0;
@@ -995,9 +978,6 @@
 			}
 		}
 
-		/* Save default antenna */
-		s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
-
 		if (ah->ah_version == AR5K_AR5212) {
 			/* Restore normal 32/40MHz clock operation
 			 * to avoid register access delay on certain
@@ -1094,22 +1074,17 @@
 		/* Write OFDM timings on 5212*/
 		if (ah->ah_version == AR5K_AR5212 &&
 			channel->hw_value & CHANNEL_OFDM) {
-			struct ath5k_eeprom_info *ee =
-					&ah->ah_capabilities.cap_eeprom;
 
 			ret = ath5k_hw_write_ofdm_timings(ah, channel);
 			if (ret)
 				return ret;
 
-			/* Note: According to docs we can have a newer
-			 * EEPROM on old hardware, so we need to verify
-			 * that our hardware is new enough to have spur
-			 * mitigation registers (delta phase etc) */
-			if (ah->ah_mac_srev >= AR5K_SREV_AR5424 ||
-			(ah->ah_mac_srev >= AR5K_SREV_AR5424 &&
-			ee->ee_version >= AR5K_EEPROM_VERSION_5_3))
+			/* Spur info is available only from EEPROM versions
+			 * bigger than 5.3 but but the EEPOM routines will use
+			 * static values for older versions */
+			if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
 				ath5k_hw_set_spur_mitigation_filter(ah,
-								channel);
+								    channel);
 		}
 
 		/*Enable/disable 802.11b mode on 5111
@@ -1123,21 +1098,8 @@
 				    AR5K_TXCFG_B_MODE);
 		}
 
-		/*
-		 * In case a fixed antenna was set as default
-		 * use the same switch table twice.
-		 */
-		if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A)
-				ant[0] = ant[1] = AR5K_ANT_SWTABLE_A;
-		else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B)
-				ant[0] = ant[1] = AR5K_ANT_SWTABLE_B;
-		else {
-			ant[0] = AR5K_ANT_SWTABLE_A;
-			ant[1] = AR5K_ANT_SWTABLE_B;
-		}
-
 		/* Commit values from EEPROM */
-		ath5k_hw_commit_eeprom_settings(ah, channel, ant, ee_mode);
+		ath5k_hw_commit_eeprom_settings(ah, channel, ee_mode);
 
 	} else {
 		/*
@@ -1175,8 +1137,6 @@
 				ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
 			}
 		}
-
-		ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
 	}
 
 	/* Ledstate */
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
new file mode 100644
index 0000000..90757de
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -0,0 +1,116 @@
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "base.h"
+#include "ath5k.h"
+#include "reg.h"
+
+#define SIMPLE_SHOW_STORE(name, get, set)				\
+static ssize_t ath5k_attr_show_##name(struct device *dev,		\
+			struct device_attribute *attr,			\
+			char *buf)					\
+{									\
+	struct ath5k_softc *sc = dev_get_drvdata(dev);			\
+	return snprintf(buf, PAGE_SIZE, "%d\n", get); 			\
+}									\
+									\
+static ssize_t ath5k_attr_store_##name(struct device *dev,		\
+			struct device_attribute *attr,			\
+			const char *buf, size_t count)			\
+{									\
+	struct ath5k_softc *sc = dev_get_drvdata(dev);			\
+	int val;							\
+									\
+	val = (int)simple_strtoul(buf, NULL, 10);			\
+	set(sc->ah, val);						\
+	return count;							\
+}									\
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR,				\
+		   ath5k_attr_show_##name, ath5k_attr_store_##name)
+
+#define SIMPLE_SHOW(name, get)						\
+static ssize_t ath5k_attr_show_##name(struct device *dev,		\
+			struct device_attribute *attr,			\
+			char *buf)					\
+{									\
+	struct ath5k_softc *sc = dev_get_drvdata(dev);			\
+	return snprintf(buf, PAGE_SIZE, "%d\n", get); 			\
+}									\
+static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
+
+/*** ANI ***/
+
+SIMPLE_SHOW_STORE(ani_mode, sc->ani_state.ani_mode, ath5k_ani_init);
+SIMPLE_SHOW_STORE(noise_immunity_level, sc->ani_state.noise_imm_level,
+			ath5k_ani_set_noise_immunity_level);
+SIMPLE_SHOW_STORE(spur_level, sc->ani_state.spur_level,
+			ath5k_ani_set_spur_immunity_level);
+SIMPLE_SHOW_STORE(firstep_level, sc->ani_state.firstep_level,
+			ath5k_ani_set_firstep_level);
+SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, sc->ani_state.ofdm_weak_sig,
+			ath5k_ani_set_ofdm_weak_signal_detection);
+SIMPLE_SHOW_STORE(cck_weak_signal_detection, sc->ani_state.cck_weak_sig,
+			ath5k_ani_set_cck_weak_signal_detection);
+SIMPLE_SHOW(spur_level_max, sc->ani_state.max_spur_level);
+
+static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL);
+}
+static DEVICE_ATTR(noise_immunity_level_max, S_IRUGO,
+		   ath5k_attr_show_noise_immunity_level_max, NULL);
+
+static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL);
+}
+static DEVICE_ATTR(firstep_level_max, S_IRUGO,
+		   ath5k_attr_show_firstep_level_max, NULL);
+
+static struct attribute *ath5k_sysfs_entries_ani[] = {
+	&dev_attr_ani_mode.attr,
+	&dev_attr_noise_immunity_level.attr,
+	&dev_attr_spur_level.attr,
+	&dev_attr_firstep_level.attr,
+	&dev_attr_ofdm_weak_signal_detection.attr,
+	&dev_attr_cck_weak_signal_detection.attr,
+	&dev_attr_noise_immunity_level_max.attr,
+	&dev_attr_spur_level_max.attr,
+	&dev_attr_firstep_level_max.attr,
+	NULL
+};
+
+static struct attribute_group ath5k_attribute_group_ani = {
+	.name = "ani",
+	.attrs = ath5k_sysfs_entries_ani,
+};
+
+
+/*** register / unregister ***/
+
+int
+ath5k_sysfs_register(struct ath5k_softc *sc)
+{
+	struct device *dev = &sc->pdev->dev;
+	int err;
+
+	err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
+	if (err) {
+		ATH5K_ERR(sc, "failed to create sysfs group\n");
+		return err;
+	}
+
+	return 0;
+}
+
+void
+ath5k_sysfs_unregister(struct ath5k_softc *sc)
+{
+	struct device *dev = &sc->pdev->dev;
+
+	sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
+}
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index dd112be..973ae4f 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -32,7 +32,8 @@
 		mac.o \
 		ar9002_mac.o \
 		ar9003_mac.o \
-		ar9003_eeprom.o
+		ar9003_eeprom.o \
+		ar9003_paprd.o
 
 obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
 
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ba8b20f..cc648b6 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -17,8 +17,99 @@
 #include "hw.h"
 #include "hw-ops.h"
 
-static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
-					struct ath9k_channel *chan)
+struct ani_ofdm_level_entry {
+	int spur_immunity_level;
+	int fir_step_level;
+	int ofdm_weak_signal_on;
+};
+
+/* values here are relative to the INI */
+
+/*
+ * Legend:
+ *
+ * SI: Spur immunity
+ * FS: FIR Step
+ * WS: OFDM / CCK Weak Signal detection
+ * MRC-CCK: Maximal Ratio Combining for CCK
+ */
+
+static const struct ani_ofdm_level_entry ofdm_level_table[] = {
+	/* SI  FS  WS */
+	{  0,  0,  1  }, /* lvl 0 */
+	{  1,  1,  1  }, /* lvl 1 */
+	{  2,  2,  1  }, /* lvl 2 */
+	{  3,  2,  1  }, /* lvl 3  (default) */
+	{  4,  3,  1  }, /* lvl 4 */
+	{  5,  4,  1  }, /* lvl 5 */
+	{  6,  5,  1  }, /* lvl 6 */
+	{  7,  6,  1  }, /* lvl 7 */
+	{  7,  7,  1  }, /* lvl 8 */
+	{  7,  8,  0  }  /* lvl 9 */
+};
+#define ATH9K_ANI_OFDM_NUM_LEVEL \
+	(sizeof(ofdm_level_table)/sizeof(ofdm_level_table[0]))
+#define ATH9K_ANI_OFDM_MAX_LEVEL \
+	(ATH9K_ANI_OFDM_NUM_LEVEL-1)
+#define ATH9K_ANI_OFDM_DEF_LEVEL \
+	3 /* default level - matches the INI settings */
+
+/*
+ * MRC (Maximal Ratio Combining) has always been used with multi-antenna ofdm.
+ * With OFDM for single stream you just add up all antenna inputs, you're
+ * only interested in what you get after FFT. Signal aligment is also not
+ * required for OFDM because any phase difference adds up in the frequency
+ * domain.
+ *
+ * MRC requires extra work for use with CCK. You need to align the antenna
+ * signals from the different antenna before you can add the signals together.
+ * You need aligment of signals as CCK is in time domain, so addition can cancel
+ * your signal completely if phase is 180 degrees (think of adding sine waves).
+ * You also need to remove noise before the addition and this is where ANI
+ * MRC CCK comes into play. One of the antenna inputs may be stronger but
+ * lower SNR, so just adding after alignment can be dangerous.
+ *
+ * Regardless of alignment in time, the antenna signals add constructively after
+ * FFT and improve your reception. For more information:
+ *
+ * http://en.wikipedia.org/wiki/Maximal-ratio_combining
+ */
+
+struct ani_cck_level_entry {
+	int fir_step_level;
+	int mrc_cck_on;
+};
+
+static const struct ani_cck_level_entry cck_level_table[] = {
+	/* FS  MRC-CCK  */
+	{  0,  1  }, /* lvl 0 */
+	{  1,  1  }, /* lvl 1 */
+	{  2,  1  }, /* lvl 2  (default) */
+	{  3,  1  }, /* lvl 3 */
+	{  4,  0  }, /* lvl 4 */
+	{  5,  0  }, /* lvl 5 */
+	{  6,  0  }, /* lvl 6 */
+	{  7,  0  }, /* lvl 7 (only for high rssi) */
+	{  8,  0  }  /* lvl 8 (only for high rssi) */
+};
+
+#define ATH9K_ANI_CCK_NUM_LEVEL \
+	(sizeof(cck_level_table)/sizeof(cck_level_table[0]))
+#define ATH9K_ANI_CCK_MAX_LEVEL \
+	(ATH9K_ANI_CCK_NUM_LEVEL-1)
+#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \
+	(ATH9K_ANI_CCK_NUM_LEVEL-3)
+#define ATH9K_ANI_CCK_DEF_LEVEL \
+	2 /* default level - matches the INI settings */
+
+/* Private to ani.c */
+static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
+{
+	ath9k_hw_private_ops(ah)->ani_lower_immunity(ah);
+}
+
+int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
+				 struct ath9k_channel *chan)
 {
 	int i;
 
@@ -48,7 +139,7 @@
 	stats->beacons += REG_READ(ah, AR_BEACON_CNT);
 }
 
-static void ath9k_ani_restart(struct ath_hw *ah)
+static void ath9k_ani_restart_old(struct ath_hw *ah)
 {
 	struct ar5416AniState *aniState;
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -96,7 +187,42 @@
 	aniState->cckPhyErrCount = 0;
 }
 
-static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
+static void ath9k_ani_restart_new(struct ath_hw *ah)
+{
+	struct ar5416AniState *aniState;
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	if (!DO_ANI(ah))
+		return;
+
+	aniState = ah->curani;
+	aniState->listenTime = 0;
+
+	aniState->ofdmPhyErrBase = 0;
+	aniState->cckPhyErrBase = 0;
+
+	ath_print(common, ATH_DBG_ANI,
+		  "Writing ofdmbase=%08x   cckbase=%08x\n",
+		  aniState->ofdmPhyErrBase,
+		  aniState->cckPhyErrBase);
+
+	ENABLE_REGWRITE_BUFFER(ah);
+
+	REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
+	REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
+	REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+	REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+	REGWRITE_BUFFER_FLUSH(ah);
+	DISABLE_REGWRITE_BUFFER(ah);
+
+	ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+	aniState->ofdmPhyErrCount = 0;
+	aniState->cckPhyErrCount = 0;
+}
+
+static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah)
 {
 	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 	struct ar5416AniState *aniState;
@@ -168,7 +294,7 @@
 	}
 }
 
-static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
+static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
 {
 	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
 	struct ar5416AniState *aniState;
@@ -206,7 +332,125 @@
 	}
 }
 
-static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
+/* Adjust the OFDM Noise Immunity Level */
+static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
+{
+	struct ar5416AniState *aniState = ah->curani;
+	struct ath_common *common = ath9k_hw_common(ah);
+	const struct ani_ofdm_level_entry *entry_ofdm;
+	const struct ani_cck_level_entry *entry_cck;
+
+	aniState->noiseFloor = BEACON_RSSI(ah);
+
+	ath_print(common, ATH_DBG_ANI,
+		  "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+		  aniState->ofdmNoiseImmunityLevel,
+		  immunityLevel, aniState->noiseFloor,
+		  aniState->rssiThrLow, aniState->rssiThrHigh);
+
+	aniState->ofdmNoiseImmunityLevel = immunityLevel;
+
+	entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
+	entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
+
+	if (aniState->spurImmunityLevel != entry_ofdm->spur_immunity_level)
+		ath9k_hw_ani_control(ah,
+				     ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
+				     entry_ofdm->spur_immunity_level);
+
+	if (aniState->firstepLevel != entry_ofdm->fir_step_level &&
+	    entry_ofdm->fir_step_level >= entry_cck->fir_step_level)
+		ath9k_hw_ani_control(ah,
+				     ATH9K_ANI_FIRSTEP_LEVEL,
+				     entry_ofdm->fir_step_level);
+
+	if ((ah->opmode != NL80211_IFTYPE_STATION &&
+	     ah->opmode != NL80211_IFTYPE_ADHOC) ||
+	    aniState->noiseFloor <= aniState->rssiThrHigh) {
+		if (aniState->ofdmWeakSigDetectOff)
+			/* force on ofdm weak sig detect */
+			ath9k_hw_ani_control(ah,
+				ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+					     true);
+		else if (aniState->ofdmWeakSigDetectOff ==
+			 entry_ofdm->ofdm_weak_signal_on)
+			ath9k_hw_ani_control(ah,
+				ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+				entry_ofdm->ofdm_weak_signal_on);
+	}
+}
+
+static void ath9k_hw_ani_ofdm_err_trigger_new(struct ath_hw *ah)
+{
+	struct ar5416AniState *aniState;
+
+	if (!DO_ANI(ah))
+		return;
+
+	aniState = ah->curani;
+
+	if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
+		ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1);
+}
+
+/*
+ * Set the ANI settings to match an CCK level.
+ */
+static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
+{
+	struct ar5416AniState *aniState = ah->curani;
+	struct ath_common *common = ath9k_hw_common(ah);
+	const struct ani_ofdm_level_entry *entry_ofdm;
+	const struct ani_cck_level_entry *entry_cck;
+
+	aniState->noiseFloor = BEACON_RSSI(ah);
+	ath_print(common, ATH_DBG_ANI,
+		  "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+		  aniState->cckNoiseImmunityLevel, immunityLevel,
+		  aniState->noiseFloor, aniState->rssiThrLow,
+		  aniState->rssiThrHigh);
+
+	if ((ah->opmode == NL80211_IFTYPE_STATION ||
+	     ah->opmode == NL80211_IFTYPE_ADHOC) &&
+	    aniState->noiseFloor <= aniState->rssiThrLow &&
+	    immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
+		immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
+
+	aniState->cckNoiseImmunityLevel = immunityLevel;
+
+	entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
+	entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
+
+	if (aniState->firstepLevel != entry_cck->fir_step_level &&
+	    entry_cck->fir_step_level >= entry_ofdm->fir_step_level)
+		ath9k_hw_ani_control(ah,
+				     ATH9K_ANI_FIRSTEP_LEVEL,
+				     entry_cck->fir_step_level);
+
+	/* Skip MRC CCK for pre AR9003 families */
+	if (!AR_SREV_9300_20_OR_LATER(ah))
+		return;
+
+	if (aniState->mrcCCKOff == entry_cck->mrc_cck_on)
+		ath9k_hw_ani_control(ah,
+				     ATH9K_ANI_MRC_CCK,
+				     entry_cck->mrc_cck_on);
+}
+
+static void ath9k_hw_ani_cck_err_trigger_new(struct ath_hw *ah)
+{
+	struct ar5416AniState *aniState;
+
+	if (!DO_ANI(ah))
+		return;
+
+	aniState = ah->curani;
+
+	if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
+		ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1);
+}
+
+static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
 {
 	struct ar5416AniState *aniState;
 	int32_t rssi;
@@ -259,9 +503,53 @@
 	}
 }
 
+/*
+ * only lower either OFDM or CCK errors per turn
+ * we lower the other one next time
+ */
+static void ath9k_hw_ani_lower_immunity_new(struct ath_hw *ah)
+{
+	struct ar5416AniState *aniState;
+
+	aniState = ah->curani;
+
+	/* lower OFDM noise immunity */
+	if (aniState->ofdmNoiseImmunityLevel > 0 &&
+	    (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) {
+		ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1);
+		return;
+	}
+
+	/* lower CCK noise immunity */
+	if (aniState->cckNoiseImmunityLevel > 0)
+		ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1);
+}
+
+static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah)
+{
+	struct ath9k_channel *chan = ah->curchan;
+	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+	u8 clockrate; /* in MHz */
+
+	if (!ah->curchan) /* should really check for CCK instead */
+		clockrate = ATH9K_CLOCK_RATE_CCK;
+	else if (conf->channel->band == IEEE80211_BAND_2GHZ)
+		clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
+	else if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+		clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
+	else
+		clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
+
+	if (conf_is_ht40(conf))
+		return clockrate * 2;
+
+	return clockrate * 2;
+}
+
 static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
 {
 	struct ar5416AniState *aniState;
+	struct ath_common *common = ath9k_hw_common(ah);
 	u32 txFrameCount, rxFrameCount, cycleCount;
 	int32_t listenTime;
 
@@ -271,15 +559,31 @@
 
 	aniState = ah->curani;
 	if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
-
 		listenTime = 0;
 		ah->stats.ast_ani_lzero++;
+		ath_print(common, ATH_DBG_ANI,
+			  "1st call: aniState->cycleCount=%d\n",
+			  aniState->cycleCount);
 	} else {
 		int32_t ccdelta = cycleCount - aniState->cycleCount;
 		int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
 		int32_t tfdelta = txFrameCount - aniState->txFrameCount;
-		listenTime = (ccdelta - rfdelta - tfdelta) / 44000;
+		int32_t clock_rate;
+
+		/*
+		 * convert HW counter values to ms using mode
+		 * specifix clock rate
+		 */
+		clock_rate = ath9k_hw_chan_2_clockrate_mhz(ah) * 1000;;
+
+		listenTime = (ccdelta - rfdelta - tfdelta) / clock_rate;
+
+		ath_print(common, ATH_DBG_ANI,
+			  "cyclecount=%d, rfcount=%d, "
+			  "tfcount=%d, listenTime=%d CLOCK_RATE=%d\n",
+			  ccdelta, rfdelta, tfdelta, listenTime, clock_rate);
 	}
+
 	aniState->cycleCount = cycleCount;
 	aniState->txFrameCount = txFrameCount;
 	aniState->rxFrameCount = rxFrameCount;
@@ -287,7 +591,7 @@
 	return listenTime;
 }
 
-void ath9k_ani_reset(struct ath_hw *ah)
+static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
 {
 	struct ar5416AniState *aniState;
 	struct ath9k_channel *chan = ah->curchan;
@@ -340,7 +644,7 @@
 			ah->curani->cckTrigLow =
 				ah->config.cck_trig_low;
 		}
-		ath9k_ani_restart(ah);
+		ath9k_ani_restart_old(ah);
 		return;
 	}
 
@@ -362,7 +666,7 @@
 
 	ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
 			     ~ATH9K_RX_FILTER_PHYERR);
-	ath9k_ani_restart(ah);
+	ath9k_ani_restart_old(ah);
 
 	ENABLE_REGWRITE_BUFFER(ah);
 
@@ -373,8 +677,102 @@
 	DISABLE_REGWRITE_BUFFER(ah);
 }
 
-void ath9k_hw_ani_monitor(struct ath_hw *ah,
-			  struct ath9k_channel *chan)
+/*
+ * Restore the ANI parameters in the HAL and reset the statistics.
+ * This routine should be called for every hardware reset and for
+ * every channel change.
+ */
+static void ath9k_ani_reset_new(struct ath_hw *ah, bool is_scanning)
+{
+	struct ar5416AniState *aniState = ah->curani;
+	struct ath9k_channel *chan = ah->curchan;
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	if (!DO_ANI(ah))
+		return;
+
+	BUG_ON(aniState == NULL);
+	ah->stats.ast_ani_reset++;
+
+	/* only allow a subset of functions in AP mode */
+	if (ah->opmode == NL80211_IFTYPE_AP) {
+		if (IS_CHAN_2GHZ(chan)) {
+			ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
+					    ATH9K_ANI_FIRSTEP_LEVEL);
+			if (AR_SREV_9300_20_OR_LATER(ah))
+				ah->ani_function |= ATH9K_ANI_MRC_CCK;
+		} else
+			ah->ani_function = 0;
+	}
+
+	/* always allow mode (on/off) to be controlled */
+	ah->ani_function |= ATH9K_ANI_MODE;
+
+	if (is_scanning ||
+	    (ah->opmode != NL80211_IFTYPE_STATION &&
+	     ah->opmode != NL80211_IFTYPE_ADHOC)) {
+		/*
+		 * If we're scanning or in AP mode, the defaults (ini)
+		 * should be in place. For an AP we assume the historical
+		 * levels for this channel are probably outdated so start
+		 * from defaults instead.
+		 */
+		if (aniState->ofdmNoiseImmunityLevel !=
+		    ATH9K_ANI_OFDM_DEF_LEVEL ||
+		    aniState->cckNoiseImmunityLevel !=
+		    ATH9K_ANI_CCK_DEF_LEVEL) {
+			ath_print(common, ATH_DBG_ANI,
+				  "Restore defaults: opmode %u "
+				  "chan %d Mhz/0x%x is_scanning=%d "
+				  "ofdm:%d cck:%d\n",
+				  ah->opmode,
+				  chan->channel,
+				  chan->channelFlags,
+				  is_scanning,
+				  aniState->ofdmNoiseImmunityLevel,
+				  aniState->cckNoiseImmunityLevel);
+
+			ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL);
+			ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
+		}
+	} else {
+		/*
+		 * restore historical levels for this channel
+		 */
+		ath_print(common, ATH_DBG_ANI,
+			  "Restore history: opmode %u "
+			  "chan %d Mhz/0x%x is_scanning=%d "
+			  "ofdm:%d cck:%d\n",
+			  ah->opmode,
+			  chan->channel,
+			  chan->channelFlags,
+			  is_scanning,
+			  aniState->ofdmNoiseImmunityLevel,
+			  aniState->cckNoiseImmunityLevel);
+
+			ath9k_hw_set_ofdm_nil(ah,
+					      aniState->ofdmNoiseImmunityLevel);
+			ath9k_hw_set_cck_nil(ah,
+					     aniState->cckNoiseImmunityLevel);
+	}
+
+	/*
+	 * enable phy counters if hw supports or if not, enable phy
+	 * interrupts (so we can count each one)
+	 */
+	ath9k_ani_restart_new(ah);
+
+	ENABLE_REGWRITE_BUFFER(ah);
+
+	REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+	REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+	REGWRITE_BUFFER_FLUSH(ah);
+	DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static void ath9k_hw_ani_monitor_old(struct ath_hw *ah,
+				     struct ath9k_channel *chan)
 {
 	struct ar5416AniState *aniState;
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -390,7 +788,7 @@
 	listenTime = ath9k_hw_ani_get_listen_time(ah);
 	if (listenTime < 0) {
 		ah->stats.ast_ani_lneg++;
-		ath9k_ani_restart(ah);
+		ath9k_ani_restart_old(ah);
 		return;
 	}
 
@@ -444,21 +842,166 @@
 		    aniState->cckPhyErrCount <= aniState->listenTime *
 		    aniState->cckTrigLow / 1000)
 			ath9k_hw_ani_lower_immunity(ah);
-		ath9k_ani_restart(ah);
+		ath9k_ani_restart_old(ah);
 	} else if (aniState->listenTime > ah->aniperiod) {
 		if (aniState->ofdmPhyErrCount > aniState->listenTime *
 		    aniState->ofdmTrigHigh / 1000) {
-			ath9k_hw_ani_ofdm_err_trigger(ah);
-			ath9k_ani_restart(ah);
+			ath9k_hw_ani_ofdm_err_trigger_old(ah);
+			ath9k_ani_restart_old(ah);
 		} else if (aniState->cckPhyErrCount >
 			   aniState->listenTime * aniState->cckTrigHigh /
 			   1000) {
-			ath9k_hw_ani_cck_err_trigger(ah);
-			ath9k_ani_restart(ah);
+			ath9k_hw_ani_cck_err_trigger_old(ah);
+			ath9k_ani_restart_old(ah);
 		}
 	}
 }
-EXPORT_SYMBOL(ath9k_hw_ani_monitor);
+
+static void ath9k_hw_ani_monitor_new(struct ath_hw *ah,
+				     struct ath9k_channel *chan)
+{
+	struct ar5416AniState *aniState;
+	struct ath_common *common = ath9k_hw_common(ah);
+	int32_t listenTime;
+	u32 phyCnt1, phyCnt2;
+	u32 ofdmPhyErrCnt, cckPhyErrCnt;
+	u32 ofdmPhyErrRate, cckPhyErrRate;
+
+	if (!DO_ANI(ah))
+		return;
+
+	aniState = ah->curani;
+	if (WARN_ON(!aniState))
+		return;
+
+	listenTime = ath9k_hw_ani_get_listen_time(ah);
+	if (listenTime <= 0) {
+		ah->stats.ast_ani_lneg++;
+		/* restart ANI period if listenTime is invalid */
+		ath_print(common, ATH_DBG_ANI,
+			  "listenTime=%d - on new ani monitor\n",
+			  listenTime);
+		ath9k_ani_restart_new(ah);
+		return;
+	}
+
+	aniState->listenTime += listenTime;
+
+	ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+	phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
+	phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
+
+	if (phyCnt1 < aniState->ofdmPhyErrBase ||
+	    phyCnt2 < aniState->cckPhyErrBase) {
+		if (phyCnt1 < aniState->ofdmPhyErrBase) {
+			ath_print(common, ATH_DBG_ANI,
+				  "phyCnt1 0x%x, resetting "
+				  "counter value to 0x%x\n",
+				  phyCnt1,
+				  aniState->ofdmPhyErrBase);
+			REG_WRITE(ah, AR_PHY_ERR_1,
+				  aniState->ofdmPhyErrBase);
+			REG_WRITE(ah, AR_PHY_ERR_MASK_1,
+				  AR_PHY_ERR_OFDM_TIMING);
+		}
+		if (phyCnt2 < aniState->cckPhyErrBase) {
+			ath_print(common, ATH_DBG_ANI,
+				  "phyCnt2 0x%x, resetting "
+				  "counter value to 0x%x\n",
+				  phyCnt2,
+				  aniState->cckPhyErrBase);
+			REG_WRITE(ah, AR_PHY_ERR_2,
+				  aniState->cckPhyErrBase);
+			REG_WRITE(ah, AR_PHY_ERR_MASK_2,
+				  AR_PHY_ERR_CCK_TIMING);
+		}
+		return;
+	}
+
+	ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
+	ah->stats.ast_ani_ofdmerrs +=
+		ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
+	aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
+
+	cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
+	ah->stats.ast_ani_cckerrs +=
+		cckPhyErrCnt - aniState->cckPhyErrCount;
+	aniState->cckPhyErrCount = cckPhyErrCnt;
+
+	ath_print(common, ATH_DBG_ANI,
+		  "Errors: OFDM=0x%08x-0x%08x=%d   "
+		  "CCK=0x%08x-0x%08x=%d\n",
+		  phyCnt1,
+		  aniState->ofdmPhyErrBase,
+		  ofdmPhyErrCnt,
+		  phyCnt2,
+		  aniState->cckPhyErrBase,
+		  cckPhyErrCnt);
+
+	ofdmPhyErrRate = aniState->ofdmPhyErrCount * 1000 /
+			 aniState->listenTime;
+	cckPhyErrRate =  aniState->cckPhyErrCount * 1000 /
+			 aniState->listenTime;
+
+	ath_print(common, ATH_DBG_ANI,
+		  "listenTime=%d OFDM:%d errs=%d/s CCK:%d "
+		  "errs=%d/s ofdm_turn=%d\n",
+		  listenTime, aniState->ofdmNoiseImmunityLevel,
+		  ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
+		  cckPhyErrRate, aniState->ofdmsTurn);
+
+	if (aniState->listenTime > 5 * ah->aniperiod) {
+		if (ofdmPhyErrRate <= aniState->ofdmTrigLow &&
+		    cckPhyErrRate <= aniState->cckTrigLow) {
+			ath_print(common, ATH_DBG_ANI,
+				  "1. listenTime=%d OFDM:%d errs=%d/s(<%d)  "
+				  "CCK:%d errs=%d/s(<%d) -> "
+				  "ath9k_hw_ani_lower_immunity()\n",
+				  aniState->listenTime,
+				  aniState->ofdmNoiseImmunityLevel,
+				  ofdmPhyErrRate,
+				  aniState->ofdmTrigLow,
+				  aniState->cckNoiseImmunityLevel,
+				  cckPhyErrRate,
+				  aniState->cckTrigLow);
+			ath9k_hw_ani_lower_immunity(ah);
+			aniState->ofdmsTurn = !aniState->ofdmsTurn;
+		}
+		ath_print(common, ATH_DBG_ANI,
+			  "1 listenTime=%d ofdm=%d/s cck=%d/s - "
+			  "calling ath9k_ani_restart_new()\n",
+			  aniState->listenTime, ofdmPhyErrRate, cckPhyErrRate);
+		ath9k_ani_restart_new(ah);
+	} else if (aniState->listenTime > ah->aniperiod) {
+		/* check to see if need to raise immunity */
+		if (ofdmPhyErrRate > aniState->ofdmTrigHigh &&
+		    (cckPhyErrRate <= aniState->cckTrigHigh ||
+		     aniState->ofdmsTurn)) {
+			ath_print(common, ATH_DBG_ANI,
+				  "2 listenTime=%d OFDM:%d errs=%d/s(>%d) -> "
+				  "ath9k_hw_ani_ofdm_err_trigger_new()\n",
+				  aniState->listenTime,
+				  aniState->ofdmNoiseImmunityLevel,
+				  ofdmPhyErrRate,
+				  aniState->ofdmTrigHigh);
+			ath9k_hw_ani_ofdm_err_trigger_new(ah);
+			ath9k_ani_restart_new(ah);
+			aniState->ofdmsTurn = false;
+		} else if (cckPhyErrRate > aniState->cckTrigHigh) {
+			ath_print(common, ATH_DBG_ANI,
+				 "3 listenTime=%d CCK:%d errs=%d/s(>%d) -> "
+				 "ath9k_hw_ani_cck_err_trigger_new()\n",
+				 aniState->listenTime,
+				 aniState->cckNoiseImmunityLevel,
+				 cckPhyErrRate,
+				 aniState->cckTrigHigh);
+			ath9k_hw_ani_cck_err_trigger_new(ah);
+			ath9k_ani_restart_new(ah);
+			aniState->ofdmsTurn = true;
+		}
+	}
+}
 
 void ath9k_enable_mib_counters(struct ath_hw *ah)
 {
@@ -495,6 +1038,7 @@
 	REG_WRITE(ah, AR_FILT_OFDM, 0);
 	REG_WRITE(ah, AR_FILT_CCK, 0);
 }
+EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
 
 u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
 				  u32 *rxc_pcnt,
@@ -542,7 +1086,7 @@
  * any of the MIB counters overflow/trigger so don't assume we're
  * here because a PHY error counter triggered.
  */
-void ath9k_hw_procmibevent(struct ath_hw *ah)
+static void ath9k_hw_proc_mib_event_old(struct ath_hw *ah)
 {
 	u32 phyCnt1, phyCnt2;
 
@@ -555,8 +1099,15 @@
 	/* Clear the mib counters and save them in the stats */
 	ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
 
-	if (!DO_ANI(ah))
+	if (!DO_ANI(ah)) {
+		/*
+		 * We must always clear the interrupt cause by
+		 * resetting the phy error regs.
+		 */
+		REG_WRITE(ah, AR_PHY_ERR_1, 0);
+		REG_WRITE(ah, AR_PHY_ERR_2, 0);
 		return;
+	}
 
 	/* NB: these are not reset-on-read */
 	phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
@@ -584,14 +1135,51 @@
 		 * check will never be true.
 		 */
 		if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
-			ath9k_hw_ani_ofdm_err_trigger(ah);
+			ath9k_hw_ani_ofdm_err_trigger_new(ah);
 		if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
-			ath9k_hw_ani_cck_err_trigger(ah);
+			ath9k_hw_ani_cck_err_trigger_old(ah);
 		/* NB: always restart to insure the h/w counters are reset */
-		ath9k_ani_restart(ah);
+		ath9k_ani_restart_old(ah);
 	}
 }
-EXPORT_SYMBOL(ath9k_hw_procmibevent);
+
+/*
+ * Process a MIB interrupt.  We may potentially be invoked because
+ * any of the MIB counters overflow/trigger so don't assume we're
+ * here because a PHY error counter triggered.
+ */
+static void ath9k_hw_proc_mib_event_new(struct ath_hw *ah)
+{
+	u32 phyCnt1, phyCnt2;
+
+	/* Reset these counters regardless */
+	REG_WRITE(ah, AR_FILT_OFDM, 0);
+	REG_WRITE(ah, AR_FILT_CCK, 0);
+	if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
+		REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
+
+	/* Clear the mib counters and save them in the stats */
+	ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+	if (!DO_ANI(ah)) {
+		/*
+		 * We must always clear the interrupt cause by
+		 * resetting the phy error regs.
+		 */
+		REG_WRITE(ah, AR_PHY_ERR_1, 0);
+		REG_WRITE(ah, AR_PHY_ERR_2, 0);
+		return;
+	}
+
+	/* NB: these are not reset-on-read */
+	phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
+	phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
+
+	/* NB: always restart to insure the h/w counters are reset */
+	if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
+	    ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK))
+		ath9k_ani_restart_new(ah);
+}
 
 void ath9k_hw_ani_setup(struct ath_hw *ah)
 {
@@ -619,22 +1207,70 @@
 
 	memset(ah->ani, 0, sizeof(ah->ani));
 	for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
-		ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
-		ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
-		ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
-		ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
+		if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) {
+			ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
+			ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
+
+			ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_NEW;
+			ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_NEW;
+
+			ah->ani[i].spurImmunityLevel =
+				ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+
+			ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+
+			ah->ani[i].ofdmPhyErrBase = 0;
+			ah->ani[i].cckPhyErrBase = 0;
+
+			if (AR_SREV_9300_20_OR_LATER(ah))
+				ah->ani[i].mrcCCKOff =
+					!ATH9K_ANI_ENABLE_MRC_CCK;
+			else
+				ah->ani[i].mrcCCKOff = true;
+
+			ah->ani[i].ofdmsTurn = true;
+		} else {
+			ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+			ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
+
+			ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+			ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_OLD;
+
+			ah->ani[i].spurImmunityLevel =
+				ATH9K_ANI_SPUR_IMMUNE_LVL_OLD;
+			ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD;
+
+			ah->ani[i].ofdmPhyErrBase =
+				AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+			ah->ani[i].cckPhyErrBase =
+				AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+			ah->ani[i].cckWeakSigThreshold =
+				ATH9K_ANI_CCK_WEAK_SIG_THR;
+		}
+
 		ah->ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
 		ah->ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
 		ah->ani[i].ofdmWeakSigDetectOff =
 			!ATH9K_ANI_USE_OFDM_WEAK_SIG;
-		ah->ani[i].cckWeakSigThreshold =
-			ATH9K_ANI_CCK_WEAK_SIG_THR;
-		ah->ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
-		ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
-		ah->ani[i].ofdmPhyErrBase =
-			AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
-		ah->ani[i].cckPhyErrBase =
-			AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
+		ah->ani[i].cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+	}
+
+	/*
+	 * since we expect some ongoing maintenance on the tables, let's sanity
+	 * check here default level should not modify INI setting.
+	 */
+	if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) {
+		const struct ani_ofdm_level_entry *entry_ofdm;
+		const struct ani_cck_level_entry *entry_cck;
+
+		entry_ofdm = &ofdm_level_table[ATH9K_ANI_OFDM_DEF_LEVEL];
+		entry_cck = &cck_level_table[ATH9K_ANI_CCK_DEF_LEVEL];
+
+		ah->aniperiod = ATH9K_ANI_PERIOD_NEW;
+		ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW;
+	} else {
+		ah->aniperiod = ATH9K_ANI_PERIOD_OLD;
+		ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD;
 	}
 
 	ath_print(common, ATH_DBG_ANI,
@@ -653,7 +1289,34 @@
 
 	ath9k_enable_mib_counters(ah);
 
-	ah->aniperiod = ATH9K_ANI_PERIOD;
 	if (ah->config.enable_ani)
 		ah->proc_phyerr |= HAL_PROCESS_ANI;
 }
+
+void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah)
+{
+	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+	struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+	priv_ops->ani_reset = ath9k_ani_reset_old;
+	priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_old;
+
+	ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_old;
+	ops->ani_monitor = ath9k_hw_ani_monitor_old;
+
+	ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v1\n");
+}
+
+void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah)
+{
+	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+	struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+	priv_ops->ani_reset = ath9k_ani_reset_new;
+	priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_new;
+
+	ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_new;
+	ops->ani_monitor = ath9k_hw_ani_monitor_new;
+
+	ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v2\n");
+}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 3356762..f4d0a4d 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -23,23 +23,55 @@
 
 #define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
 
-#define ATH9K_ANI_OFDM_TRIG_HIGH          500
-#define ATH9K_ANI_OFDM_TRIG_LOW           200
-#define ATH9K_ANI_CCK_TRIG_HIGH           200
-#define ATH9K_ANI_CCK_TRIG_LOW            100
+/* units are errors per second */
+#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD      500
+#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW      1000
+
+/* units are errors per second */
+#define ATH9K_ANI_OFDM_TRIG_LOW_OLD       200
+#define ATH9K_ANI_OFDM_TRIG_LOW_NEW       400
+
+/* units are errors per second */
+#define ATH9K_ANI_CCK_TRIG_HIGH_OLD       200
+#define ATH9K_ANI_CCK_TRIG_HIGH_NEW       600
+
+/* units are errors per second */
+#define ATH9K_ANI_CCK_TRIG_LOW_OLD        100
+#define ATH9K_ANI_CCK_TRIG_LOW_NEW        300
+
 #define ATH9K_ANI_NOISE_IMMUNE_LVL        4
 #define ATH9K_ANI_USE_OFDM_WEAK_SIG       true
 #define ATH9K_ANI_CCK_WEAK_SIG_THR        false
-#define ATH9K_ANI_SPUR_IMMUNE_LVL         7
-#define ATH9K_ANI_FIRSTEP_LVL             0
+
+#define ATH9K_ANI_SPUR_IMMUNE_LVL_OLD     7
+#define ATH9K_ANI_SPUR_IMMUNE_LVL_NEW     3
+
+#define ATH9K_ANI_FIRSTEP_LVL_OLD         0
+#define ATH9K_ANI_FIRSTEP_LVL_NEW         2
+
 #define ATH9K_ANI_RSSI_THR_HIGH           40
 #define ATH9K_ANI_RSSI_THR_LOW            7
-#define ATH9K_ANI_PERIOD                  100
+
+#define ATH9K_ANI_PERIOD_OLD              100
+#define ATH9K_ANI_PERIOD_NEW              1000
+
+/* in ms */
+#define ATH9K_ANI_POLLINTERVAL_OLD        100
+#define ATH9K_ANI_POLLINTERVAL_NEW        1000
 
 #define HAL_NOISE_IMMUNE_MAX              4
 #define HAL_SPUR_IMMUNE_MAX               7
 #define HAL_FIRST_STEP_MAX                2
 
+#define ATH9K_SIG_FIRSTEP_SETTING_MIN     0
+#define ATH9K_SIG_FIRSTEP_SETTING_MAX     20
+#define ATH9K_SIG_SPUR_IMM_SETTING_MIN    0
+#define ATH9K_SIG_SPUR_IMM_SETTING_MAX    22
+
+#define ATH9K_ANI_ENABLE_MRC_CCK          true
+
+/* values here are relative to the INI */
+
 enum ath9k_ani_cmd {
 	ATH9K_ANI_PRESENT = 0x1,
 	ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
@@ -49,7 +81,8 @@
 	ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
 	ATH9K_ANI_MODE = 0x40,
 	ATH9K_ANI_PHYERR_RESET = 0x80,
-	ATH9K_ANI_ALL = 0xff
+	ATH9K_ANI_MRC_CCK = 0x100,
+	ATH9K_ANI_ALL = 0xfff
 };
 
 struct ath9k_mib_stats {
@@ -60,9 +93,31 @@
 	u32 beacons;
 };
 
+/* INI default values for ANI registers */
+struct ath9k_ani_default {
+	u16 m1ThreshLow;
+	u16 m2ThreshLow;
+	u16 m1Thresh;
+	u16 m2Thresh;
+	u16 m2CountThr;
+	u16 m2CountThrLow;
+	u16 m1ThreshLowExt;
+	u16 m2ThreshLowExt;
+	u16 m1ThreshExt;
+	u16 m2ThreshExt;
+	u16 firstep;
+	u16 firstepLow;
+	u16 cycpwrThr1;
+	u16 cycpwrThr1Ext;
+};
+
 struct ar5416AniState {
 	struct ath9k_channel *c;
 	u8 noiseImmunityLevel;
+	u8 ofdmNoiseImmunityLevel;
+	u8 cckNoiseImmunityLevel;
+	bool ofdmsTurn;
+	u8 mrcCCKOff;
 	u8 spurImmunityLevel;
 	u8 firstepLevel;
 	u8 ofdmWeakSigDetectOff;
@@ -85,6 +140,7 @@
 	int16_t pktRssi[2];
 	int16_t ofdmErrRssi[2];
 	int16_t cckErrRssi[2];
+	struct ath9k_ani_default iniDef;
 };
 
 struct ar5416Stats {
@@ -108,15 +164,13 @@
 };
 #define ah_mibStats stats.ast_mibstats
 
-void ath9k_ani_reset(struct ath_hw *ah);
-void ath9k_hw_ani_monitor(struct ath_hw *ah,
-			  struct ath9k_channel *chan);
 void ath9k_enable_mib_counters(struct ath_hw *ah);
 void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
 u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
 				  u32 *rxf_pcnt, u32 *txf_pcnt);
-void ath9k_hw_procmibevent(struct ath_hw *ah);
 void ath9k_hw_ani_setup(struct ath_hw *ah);
 void ath9k_hw_ani_init(struct ath_hw *ah);
+int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
+				 struct ath9k_channel *chan);
 
 #endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index b2c17c9..ee34a49 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -19,7 +19,30 @@
 #include "../regd.h"
 #include "ar9002_phy.h"
 
-/* All code below is for non single-chip solutions */
+/* All code below is for AR5008, AR9001, AR9002 */
+
+static const int firstep_table[] =
+/* level:  0   1   2   3   4   5   6   7   8  */
+	{ -4, -2,  0,  2,  4,  6,  8, 10, 12 }; /* lvl 0-8, default 2 */
+
+static const int cycpwrThr1_table[] =
+/* level:  0   1   2   3   4   5   6   7   8  */
+	{ -6, -4, -2,  0,  2,  4,  6,  8 };     /* lvl 0-7, default 3 */
+
+/*
+ * register values to turn OFDM weak signal detection OFF
+ */
+static const int m1ThreshLow_off = 127;
+static const int m2ThreshLow_off = 127;
+static const int m1Thresh_off = 127;
+static const int m2Thresh_off = 127;
+static const int m2CountThr_off =  31;
+static const int m2CountThrLow_off =  63;
+static const int m1ThreshLowExt_off = 127;
+static const int m2ThreshLowExt_off = 127;
+static const int m1ThreshExt_off = 127;
+static const int m2ThreshExt_off = 127;
+
 
 /**
  * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
@@ -742,17 +765,6 @@
 		return -EINVAL;
 	}
 
-	if (AR_SREV_9287_12_OR_LATER(ah)) {
-		/* Enable ASYNC FIFO */
-		REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
-				AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
-		REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
-		REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
-				AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
-		REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
-				AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
-	}
-
 	/*
 	 * Set correct baseband to analog shift setting to
 	 * access analog chips.
@@ -1037,8 +1049,9 @@
 	return pll;
 }
 
-static bool ar5008_hw_ani_control(struct ath_hw *ah,
-				  enum ath9k_ani_cmd cmd, int param)
+static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
+				      enum ath9k_ani_cmd cmd,
+				      int param)
 {
 	struct ar5416AniState *aniState = ah->curani;
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -1220,6 +1233,265 @@
 	return true;
 }
 
+static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
+				      enum ath9k_ani_cmd cmd,
+				      int param)
+{
+	struct ar5416AniState *aniState = ah->curani;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_channel *chan = ah->curchan;
+	s32 value, value2;
+
+	switch (cmd & ah->ani_function) {
+	case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+		/*
+		 * on == 1 means ofdm weak signal detection is ON
+		 * on == 1 is the default, for less noise immunity
+		 *
+		 * on == 0 means ofdm weak signal detection is OFF
+		 * on == 0 means more noise imm
+		 */
+		u32 on = param ? 1 : 0;
+		/*
+		 * make register setting for default
+		 * (weak sig detect ON) come from INI file
+		 */
+		int m1ThreshLow = on ?
+			aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+		int m2ThreshLow = on ?
+			aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+		int m1Thresh = on ?
+			aniState->iniDef.m1Thresh : m1Thresh_off;
+		int m2Thresh = on ?
+			aniState->iniDef.m2Thresh : m2Thresh_off;
+		int m2CountThr = on ?
+			aniState->iniDef.m2CountThr : m2CountThr_off;
+		int m2CountThrLow = on ?
+			aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+		int m1ThreshLowExt = on ?
+			aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+		int m2ThreshLowExt = on ?
+			aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+		int m1ThreshExt = on ?
+			aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+		int m2ThreshExt = on ?
+			aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
+
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+			      AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+			      m1ThreshLow);
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+			      AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+			      m2ThreshLow);
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+			      AR_PHY_SFCORR_M1_THRESH, m1Thresh);
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+			      AR_PHY_SFCORR_M2_THRESH, m2Thresh);
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+			      AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+			      AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+			      m2CountThrLow);
+
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+			      AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+			      AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+			      AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
+		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+			      AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
+
+		if (on)
+			REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+				    AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+		else
+			REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+				    AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+		if (!on != aniState->ofdmWeakSigDetectOff) {
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: ofdm weak signal: %s=>%s\n",
+				  chan->channel,
+				  !aniState->ofdmWeakSigDetectOff ?
+					"on" : "off",
+				  on ? "on" : "off");
+			if (on)
+				ah->stats.ast_ani_ofdmon++;
+			else
+				ah->stats.ast_ani_ofdmoff++;
+			aniState->ofdmWeakSigDetectOff = !on;
+		}
+		break;
+	}
+	case ATH9K_ANI_FIRSTEP_LEVEL:{
+		u32 level = param;
+
+		if (level >= ARRAY_SIZE(firstep_table)) {
+			ath_print(common, ATH_DBG_ANI,
+				  "ATH9K_ANI_FIRSTEP_LEVEL: level "
+				  "out of range (%u > %u)\n",
+				  level,
+				  (unsigned) ARRAY_SIZE(firstep_table));
+			return false;
+		}
+
+		/*
+		 * make register setting relative to default
+		 * from INI file & cap value
+		 */
+		value = firstep_table[level] -
+			firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+			aniState->iniDef.firstep;
+		if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+			value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+		if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+			value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+		REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+			      AR_PHY_FIND_SIG_FIRSTEP,
+			      value);
+		/*
+		 * we need to set first step low register too
+		 * make register setting relative to default
+		 * from INI file & cap value
+		 */
+		value2 = firstep_table[level] -
+			 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+			 aniState->iniDef.firstepLow;
+		if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+			value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+		if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+			value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+
+		REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
+			      AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
+
+		if (level != aniState->firstepLevel) {
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: level %d=>%d[def:%d] "
+				  "firstep[level]=%d ini=%d\n",
+				  chan->channel,
+				  aniState->firstepLevel,
+				  level,
+				  ATH9K_ANI_FIRSTEP_LVL_NEW,
+				  value,
+				  aniState->iniDef.firstep);
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: level %d=>%d[def:%d] "
+				  "firstep_low[level]=%d ini=%d\n",
+				  chan->channel,
+				  aniState->firstepLevel,
+				  level,
+				  ATH9K_ANI_FIRSTEP_LVL_NEW,
+				  value2,
+				  aniState->iniDef.firstepLow);
+			if (level > aniState->firstepLevel)
+				ah->stats.ast_ani_stepup++;
+			else if (level < aniState->firstepLevel)
+				ah->stats.ast_ani_stepdown++;
+			aniState->firstepLevel = level;
+		}
+		break;
+	}
+	case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+		u32 level = param;
+
+		if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
+			ath_print(common, ATH_DBG_ANI,
+				  "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
+				  "out of range (%u > %u)\n",
+				  level,
+				  (unsigned) ARRAY_SIZE(cycpwrThr1_table));
+			return false;
+		}
+		/*
+		 * make register setting relative to default
+		 * from INI file & cap value
+		 */
+		value = cycpwrThr1_table[level] -
+			cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+			aniState->iniDef.cycpwrThr1;
+		if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+			value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+		if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+			value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+		REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+			      AR_PHY_TIMING5_CYCPWR_THR1,
+			      value);
+
+		/*
+		 * set AR_PHY_EXT_CCA for extension channel
+		 * make register setting relative to default
+		 * from INI file & cap value
+		 */
+		value2 = cycpwrThr1_table[level] -
+			 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+			 aniState->iniDef.cycpwrThr1Ext;
+		if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+			value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+		if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+			value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+		REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+			      AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
+
+		if (level != aniState->spurImmunityLevel) {
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: level %d=>%d[def:%d] "
+				  "cycpwrThr1[level]=%d ini=%d\n",
+				  chan->channel,
+				  aniState->spurImmunityLevel,
+				  level,
+				  ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+				  value,
+				  aniState->iniDef.cycpwrThr1);
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: level %d=>%d[def:%d] "
+				  "cycpwrThr1Ext[level]=%d ini=%d\n",
+				  chan->channel,
+				  aniState->spurImmunityLevel,
+				  level,
+				  ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+				  value2,
+				  aniState->iniDef.cycpwrThr1Ext);
+			if (level > aniState->spurImmunityLevel)
+				ah->stats.ast_ani_spurup++;
+			else if (level < aniState->spurImmunityLevel)
+				ah->stats.ast_ani_spurdown++;
+			aniState->spurImmunityLevel = level;
+		}
+		break;
+	}
+	case ATH9K_ANI_MRC_CCK:
+		/*
+		 * You should not see this as AR5008, AR9001, AR9002
+		 * does not have hardware support for MRC CCK.
+		 */
+		WARN_ON(1);
+		break;
+	case ATH9K_ANI_PRESENT:
+		break;
+	default:
+		ath_print(common, ATH_DBG_ANI,
+			  "invalid cmd %u\n", cmd);
+		return false;
+	}
+
+	ath_print(common, ATH_DBG_ANI,
+		  "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
+		  "MRCcck=%s listenTime=%d CC=%d listen=%d "
+		  "ofdmErrs=%d cckErrs=%d\n",
+		  aniState->spurImmunityLevel,
+		  !aniState->ofdmWeakSigDetectOff ? "on" : "off",
+		  aniState->firstepLevel,
+		  !aniState->mrcCCKOff ? "on" : "off",
+		  aniState->listenTime,
+		  aniState->cycleCount,
+		  aniState->listenTime,
+		  aniState->ofdmPhyErrCount,
+		  aniState->cckPhyErrCount);
+	return true;
+}
+
 static void ar5008_hw_do_getnf(struct ath_hw *ah,
 			      int16_t nfarray[NUM_NF_READINGS])
 {
@@ -1340,6 +1612,71 @@
 	DISABLE_REGWRITE_BUFFER(ah);
 }
 
+/*
+ * Initialize the ANI register values with default (ini) values.
+ * This routine is called during a (full) hardware reset after
+ * all the registers are initialised from the INI.
+ */
+static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+	struct ar5416AniState *aniState;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_channel *chan = ah->curchan;
+	struct ath9k_ani_default *iniDef;
+	int index;
+	u32 val;
+
+	index = ath9k_hw_get_ani_channel_idx(ah, chan);
+	aniState = &ah->ani[index];
+	ah->curani = aniState;
+	iniDef = &aniState->iniDef;
+
+	ath_print(common, ATH_DBG_ANI,
+		  "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+		  ah->hw_version.macVersion,
+		  ah->hw_version.macRev,
+		  ah->opmode,
+		  chan->channel,
+		  chan->channelFlags);
+
+	val = REG_READ(ah, AR_PHY_SFCORR);
+	iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
+	iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
+	iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
+
+	val = REG_READ(ah, AR_PHY_SFCORR_LOW);
+	iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
+	iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
+	iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
+
+	val = REG_READ(ah, AR_PHY_SFCORR_EXT);
+	iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
+	iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
+	iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
+	iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
+	iniDef->firstep = REG_READ_FIELD(ah,
+					 AR_PHY_FIND_SIG,
+					 AR_PHY_FIND_SIG_FIRSTEP);
+	iniDef->firstepLow = REG_READ_FIELD(ah,
+					    AR_PHY_FIND_SIG_LOW,
+					    AR_PHY_FIND_SIG_FIRSTEP_LOW);
+	iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
+					    AR_PHY_TIMING5,
+					    AR_PHY_TIMING5_CYCPWR_THR1);
+	iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
+					       AR_PHY_EXT_CCA,
+					       AR_PHY_EXT_TIMING5_CYCPWR_THR1);
+
+	/* these levels just got reset to defaults by the INI */
+	aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+	aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+	aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
+	aniState->mrcCCKOff = true; /* not available on pre AR9003 */
+
+	aniState->cycleCount = 0;
+}
+
+
 void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
 {
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1361,10 +1698,15 @@
 	priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
 	priv_ops->restore_chainmask = ar5008_restore_chainmask;
 	priv_ops->set_diversity = ar5008_set_diversity;
-	priv_ops->ani_control = ar5008_hw_ani_control;
 	priv_ops->do_getnf = ar5008_hw_do_getnf;
 	priv_ops->loadnf = ar5008_hw_loadnf;
 
+	if (modparam_force_new_ani) {
+		priv_ops->ani_control = ar5008_hw_ani_control_new;
+		priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
+	} else
+		priv_ops->ani_control = ar5008_hw_ani_control_old;
+
 	if (AR_SREV_9100(ah))
 		priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
 	else if (AR_SREV_9160_10_OR_LATER(ah))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index a8a8cdc..0317ac9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -18,6 +18,11 @@
 #include "ar5008_initvals.h"
 #include "ar9001_initvals.h"
 #include "ar9002_initvals.h"
+#include "ar9002_phy.h"
+
+int modparam_force_new_ani;
+module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Force new ANI for AR5008, AR9001, AR9002");
 
 /* General hardware code for the A5008/AR9001/AR9002 hadware families */
 
@@ -436,55 +441,84 @@
 		}
 
 		udelay(1000);
+	}
 
-		/* set bit 19 to allow forcing of pcie core into L1 state */
-		REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+	if (power_off) {
+		/* clear bit 19 to disable L1 */
+		REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
 
-		/* Several PCIe massages to ensure proper behaviour */
+		val = REG_READ(ah, AR_WA);
+
+		/*
+		 * Set PCIe workaround bits
+		 * In AR9280 and AR9285, bit 14 in WA register (disable L1)
+		 * should only  be set when device enters D3 and be
+		 * cleared when device comes back to D0.
+		 */
+		if (ah->config.pcie_waen) {
+			if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+				val |= AR_WA_D3_L1_DISABLE;
+		} else {
+			if (((AR_SREV_9285(ah) ||
+			      AR_SREV_9271(ah) ||
+			      AR_SREV_9287(ah)) &&
+			     (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
+			    (AR_SREV_9280(ah) &&
+			     (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
+				val |= AR_WA_D3_L1_DISABLE;
+			}
+		}
+
+		if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
+			/*
+			 * Disable bit 6 and 7 before entering D3 to
+			 * prevent system hang.
+			 */
+			val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
+		}
+
+		if (AR_SREV_9285E_20(ah))
+			val |= AR_WA_BIT23;
+
+		REG_WRITE(ah, AR_WA, val);
+	} else {
 		if (ah->config.pcie_waen) {
 			val = ah->config.pcie_waen;
 			if (!power_off)
 				val &= (~AR_WA_D3_L1_DISABLE);
 		} else {
-			if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+			if (AR_SREV_9285(ah) ||
+			    AR_SREV_9271(ah) ||
 			    AR_SREV_9287(ah)) {
 				val = AR9285_WA_DEFAULT;
 				if (!power_off)
 					val &= (~AR_WA_D3_L1_DISABLE);
-			} else if (AR_SREV_9280(ah)) {
+			}
+			else if (AR_SREV_9280(ah)) {
 				/*
-				 * On AR9280 chips bit 22 of 0x4004 needs to be
-				 * set otherwise card may disappear.
+				 * For AR9280 chips, bit 22 of 0x4004
+				 * needs to be set.
 				 */
 				val = AR9280_WA_DEFAULT;
 				if (!power_off)
 					val &= (~AR_WA_D3_L1_DISABLE);
-			} else
+			} else {
 				val = AR_WA_DEFAULT;
-		}
-
-		REG_WRITE(ah, AR_WA, val);
-	}
-
-	if (power_off) {
-		/*
-		 * Set PCIe workaround bits
-		 * bit 14 in WA register (disable L1) should only
-		 * be set when device enters D3 and be cleared
-		 * when device comes back to D0.
-		 */
-		if (ah->config.pcie_waen) {
-			if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
-				REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
-		} else {
-			if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
-			      AR_SREV_9287(ah)) &&
-			     (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
-			    (AR_SREV_9280(ah) &&
-			     (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
-				REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
 			}
 		}
+
+		/* WAR for ASPM system hang */
+		if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
+			val |= (AR_WA_BIT6 | AR_WA_BIT7);
+		}
+
+		if (AR_SREV_9285E_20(ah))
+			val |= AR_WA_BIT23;
+
+		REG_WRITE(ah, AR_WA, val);
+
+		/* set bit 19 to allow forcing of pcie core into L1 state */
+		REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
 	}
 }
 
@@ -536,18 +570,29 @@
 	return 0;
 }
 
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
+{
+	if (AR_SREV_9287_13_OR_LATER(ah)) {
+		REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+				AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
+		REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
+		REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+				AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+		REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+				AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+	}
+}
+
 /*
- * Enable ASYNC FIFO
- *
  * If Async FIFO is enabled, the following counters change as MAC now runs
  * at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
  *
  * The values below tested for ht40 2 chain.
  * Overwrite the delay/timeouts initialized in process ini.
  */
-void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
+void ar9002_hw_update_async_fifo(struct ath_hw *ah)
 {
-	if (AR_SREV_9287_12_OR_LATER(ah)) {
+	if (AR_SREV_9287_13_OR_LATER(ah)) {
 		REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
 			  AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
 		REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
@@ -571,9 +616,9 @@
  */
 void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
 {
-	if (AR_SREV_9287_12_OR_LATER(ah)) {
+	if (AR_SREV_9287_13_OR_LATER(ah)) {
 		REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
-				AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+			    AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
 	}
 }
 
@@ -595,4 +640,9 @@
 
 	ar9002_hw_attach_calib_ops(ah);
 	ar9002_hw_attach_mac_ops(ah);
+
+	if (modparam_force_new_ani)
+		ath9k_hw_attach_ani_ops_new(ah);
+	else
+		ath9k_hw_attach_ani_ops_old(ah);
 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index dae7f33..8ab24ee 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -4492,7 +4492,7 @@
 };
 
 
-/* AR9271 initialization values automaticaly created: 06/04/09 */
+/* AR9271 initialization values automaticaly created: 03/31/10 */
 static const u32 ar9271Modes_9271[][6] = {
     { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
     { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -5011,7 +5011,7 @@
     { 0x0000783c, 0x72ee0a72 },
     { 0x00007840, 0xbbfffffc },
     { 0x00007844, 0x000c0db6 },
-    { 0x00007848, 0x6db61b6f },
+    { 0x00007848, 0x6db6246f },
     { 0x0000784c, 0x6d9b66db },
     { 0x00007850, 0x6d8c6dba },
     { 0x00007854, 0x00040000 },
@@ -5218,7 +5218,7 @@
     { 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff },
     { 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 },
     { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
-    { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 },
+    { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a214652, 0x0a214652, 0x0a22a652 },
     { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
     { 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 },
     { 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 81bf6e5..ce8bb00 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -114,6 +114,10 @@
 #define AR_PHY_FIND_SIG_FIRPWR    0x03FC0000
 #define AR_PHY_FIND_SIG_FIRPWR_S  18
 
+#define AR_PHY_FIND_SIG_LOW           0x9840
+#define AR_PHY_FIND_SIG_FIRSTEP_LOW   0x00000FC0L
+#define AR_PHY_FIND_SIG_FIRSTEP_LOW_S 6
+
 #define AR_PHY_AGC_CTL1                  0x985C
 #define AR_PHY_AGC_CTL1_COARSE_LOW       0x00007F80
 #define AR_PHY_AGC_CTL1_COARSE_LOW_S     7
@@ -325,6 +329,9 @@
 #define AR_PHY_EXT_CCA_CYCPWR_THR1_S    9
 #define AR_PHY_EXT_CCA_THRESH62         0x007F0000
 #define AR_PHY_EXT_CCA_THRESH62_S       16
+#define AR_PHY_EXT_TIMING5_CYCPWR_THR1   0x0000FE00L
+#define AR_PHY_EXT_TIMING5_CYCPWR_THR1_S 9
+
 #define AR_PHY_EXT_MINCCA_PWR           0xFF800000
 #define AR_PHY_EXT_MINCCA_PWR_S         23
 #define AR9280_PHY_EXT_MINCCA_PWR       0x01FF0000
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
similarity index 87%
rename from drivers/net/wireless/ath/ath9k/ar9003_initvals.h
rename to drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
index db019dd..d3375fc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
@@ -14,8 +14,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#ifndef INITVALS_9003_H
-#define INITVALS_9003_H
+#ifndef INITVALS_9003_2P0_H
+#define INITVALS_9003_2P0_H
 
 /* AR9003 2.0 */
 
@@ -835,71 +835,71 @@
 
 static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+	{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
 	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
-	{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
-	{0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
-	{0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
-	{0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
-	{0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
-	{0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
-	{0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
-	{0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
-	{0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
-	{0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
-	{0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
-	{0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
-	{0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
-	{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
-	{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
-	{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+	{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+	{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+	{0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+	{0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+	{0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+	{0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+	{0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+	{0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+	{0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+	{0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+	{0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+	{0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+	{0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+	{0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+	{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+	{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+	{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
 	{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
-	{0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
-	{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
-	{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
-	{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
-	{0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
-	{0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+	{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+	{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+	{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+	{0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+	{0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+	{0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
 	{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
-	{0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
-	{0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
-	{0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
-	{0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
-	{0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
-	{0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
-	{0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
-	{0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
-	{0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
-	{0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
-	{0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
-	{0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
-	{0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
-	{0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
-	{0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
-	{0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
-	{0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
-	{0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
-	{0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
-	{0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
-	{0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
-	{0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
-	{0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
-	{0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
-	{0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+	{0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+	{0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+	{0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+	{0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+	{0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+	{0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+	{0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+	{0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+	{0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+	{0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+	{0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+	{0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+	{0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+	{0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+	{0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+	{0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+	{0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+	{0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+	{0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+	{0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+	{0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+	{0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+	{0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+	{0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+	{0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
 	{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
 	{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
 	{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,71 +913,71 @@
 
 static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+	{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
 	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
-	{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
-	{0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
-	{0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
-	{0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
-	{0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
-	{0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
-	{0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
-	{0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
-	{0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
-	{0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
-	{0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
-	{0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
-	{0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
-	{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
-	{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
-	{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+	{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+	{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+	{0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+	{0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+	{0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+	{0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+	{0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+	{0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+	{0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+	{0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+	{0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+	{0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+	{0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+	{0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+	{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+	{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+	{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
 	{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
-	{0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
-	{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
-	{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
-	{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
-	{0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
-	{0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+	{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+	{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+	{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+	{0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+	{0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+	{0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
 	{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
-	{0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
-	{0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
-	{0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
-	{0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
-	{0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
-	{0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
-	{0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
-	{0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
-	{0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
-	{0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
-	{0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
-	{0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
-	{0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
-	{0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
-	{0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
-	{0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
-	{0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
-	{0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
-	{0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
-	{0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
-	{0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
-	{0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
-	{0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
-	{0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
-	{0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+	{0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+	{0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+	{0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+	{0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+	{0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+	{0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+	{0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+	{0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+	{0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+	{0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+	{0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+	{0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+	{0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+	{0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+	{0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+	{0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+	{0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+	{0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+	{0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+	{0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+	{0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+	{0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+	{0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+	{0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+	{0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
 	{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
 	{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
 	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1781,4 +1781,4 @@
 	{0x00004044, 0x00000000},
 };
 
-#endif /* INITVALS_9003_H */
+#endif /* INITVALS_9003_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
similarity index 78%
copy from drivers/net/wireless/ath/ath9k/ar9003_initvals.h
copy to drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index db019dd..ec98ab5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -14,12 +14,12 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#ifndef INITVALS_9003_H
-#define INITVALS_9003_H
+#ifndef INITVALS_9003_2P2_H
+#define INITVALS_9003_2P2_H
 
-/* AR9003 2.0 */
+/* AR9003 2.2 */
 
-static const u32 ar9300_2p0_radio_postamble[][5] = {
+static const u32 ar9300_2p2_radio_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
 	{0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
@@ -32,7 +32,7 @@
 	{0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
 };
 
-static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
 	{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -41,8 +41,8 @@
 	{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
 	{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
 	{0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
-	{0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
-	{0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+	{0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+	{0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
 	{0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
 	{0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
 	{0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
@@ -54,27 +54,27 @@
 	{0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
 	{0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
 	{0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
-	{0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
-	{0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
-	{0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
-	{0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
-	{0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
-	{0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
-	{0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+	{0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+	{0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+	{0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+	{0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+	{0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+	{0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+	{0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
 	{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
 	{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
 	{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
 	{0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
 	{0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
 	{0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
-	{0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
-	{0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+	{0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+	{0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
 	{0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
 	{0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
 	{0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
@@ -86,19 +86,19 @@
 	{0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
 	{0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
 	{0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
-	{0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
-	{0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
-	{0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
-	{0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
-	{0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
-	{0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
-	{0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
+	{0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
+	{0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
+	{0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
+	{0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
+	{0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
+	{0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
 	{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
 	{0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
 	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -110,7 +110,7 @@
 	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 };
 
-static const u32 ar9300Modes_fast_clock_2p0[][3] = {
+static const u32 ar9300Modes_fast_clock_2p2[][3] = {
 	/* Addr      5G_HT20     5G_HT40   */
 	{0x00001030, 0x00000268, 0x000004d0},
 	{0x00001070, 0x0000018c, 0x00000318},
@@ -123,7 +123,7 @@
 	{0x0000a254, 0x00000898, 0x00001130},
 };
 
-static const u32 ar9300_2p0_radio_core[][2] = {
+static const u32 ar9300_2p2_radio_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00016000, 0x36db6db6},
 	{0x00016004, 0x6db6db40},
@@ -161,7 +161,7 @@
 	{0x00016284, 0x3d840208},
 	{0x00016288, 0x05a20408},
 	{0x0001628c, 0x00038c07},
-	{0x00016290, 0x40000004},
+	{0x00016290, 0x00000004},
 	{0x00016294, 0x458aa14f},
 	{0x00016380, 0x00000000},
 	{0x00016384, 0x00000000},
@@ -267,7 +267,7 @@
 	{0x00016bd4, 0x00000000},
 };
 
-static const u32 ar9300Common_rx_gain_table_merlin_2p0[][2] = {
+static const u32 ar9300Common_rx_gain_table_merlin_2p2[][2] = {
 	/* Addr      allmodes  */
 	{0x0000a000, 0x02000101},
 	{0x0000a004, 0x02000102},
@@ -527,7 +527,7 @@
 	{0x0000b1fc, 0x00000776},
 };
 
-static const u32 ar9300_2p0_mac_postamble[][5] = {
+static const u32 ar9300_2p2_mac_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
 	{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
@@ -539,12 +539,12 @@
 	{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
 };
 
-static const u32 ar9300_2p0_soc_postamble[][5] = {
+static const u32 ar9300_2p2_soc_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
 };
 
-static const u32 ar9200_merlin_2p0_radio_core[][2] = {
+static const u32 ar9200_merlin_2p2_radio_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00007800, 0x00040000},
 	{0x00007804, 0xdb005012},
@@ -586,7 +586,7 @@
 	{0x00007894, 0x5a108000},
 };
 
-static const u32 ar9300_2p0_baseband_postamble[][5] = {
+static const u32 ar9300_2p2_baseband_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
 	{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
@@ -594,7 +594,7 @@
 	{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
 	{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
 	{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
-	{0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
+	{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
 	{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
 	{0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
 	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
@@ -610,6 +610,7 @@
 	{0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
 	{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
 	{0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+	{0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
 	{0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
 	{0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
 	{0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
@@ -639,7 +640,7 @@
 	{0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
 };
 
-static const u32 ar9300_2p0_baseband_core[][2] = {
+static const u32 ar9300_2p2_baseband_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00009800, 0xafe68e30},
 	{0x00009804, 0xfd14e000},
@@ -694,7 +695,6 @@
 	{0x0000a224, 0x00000000},
 	{0x0000a228, 0x10002310},
 	{0x0000a22c, 0x01036a1e},
-	{0x0000a234, 0x10000fff},
 	{0x0000a23c, 0x00000000},
 	{0x0000a244, 0x0c000000},
 	{0x0000a2a0, 0x00000001},
@@ -756,7 +756,7 @@
 	{0x0000a43c, 0x00000000},
 	{0x0000a440, 0x00000000},
 	{0x0000a444, 0x00000000},
-	{0x0000a448, 0x04000080},
+	{0x0000a448, 0x06000080},
 	{0x0000a44c, 0x00000001},
 	{0x0000a450, 0x00010000},
 	{0x0000a458, 0x00000000},
@@ -833,73 +833,73 @@
 	{0x0000c420, 0x00000000},
 };
 
-static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
+static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+	{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
 	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
-	{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
-	{0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
-	{0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
-	{0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
-	{0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
-	{0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
-	{0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
-	{0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
-	{0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
-	{0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
-	{0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
-	{0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
-	{0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
-	{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
-	{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
-	{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+	{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+	{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+	{0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+	{0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+	{0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+	{0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+	{0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+	{0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+	{0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+	{0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+	{0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+	{0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+	{0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+	{0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+	{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+	{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+	{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
 	{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
-	{0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
-	{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
-	{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
-	{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
-	{0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
-	{0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+	{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+	{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+	{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+	{0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+	{0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+	{0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
 	{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
-	{0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
-	{0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
-	{0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
-	{0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
-	{0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
-	{0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
-	{0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
-	{0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
-	{0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
-	{0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
-	{0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
-	{0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
-	{0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
-	{0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
-	{0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
-	{0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
-	{0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
-	{0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
-	{0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
-	{0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
-	{0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
-	{0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
-	{0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
-	{0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
-	{0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+	{0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+	{0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+	{0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+	{0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+	{0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+	{0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+	{0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+	{0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+	{0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+	{0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+	{0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+	{0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+	{0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+	{0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+	{0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+	{0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+	{0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+	{0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+	{0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+	{0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+	{0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+	{0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+	{0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+	{0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+	{0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
 	{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
 	{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
 	{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -911,73 +911,73 @@
 	{0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
 };
 
-static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+	{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
 	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
-	{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
-	{0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
-	{0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
-	{0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
-	{0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
-	{0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
-	{0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
-	{0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
-	{0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
-	{0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
-	{0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
-	{0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
-	{0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
-	{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
-	{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
-	{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+	{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+	{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+	{0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+	{0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+	{0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+	{0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+	{0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+	{0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+	{0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+	{0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+	{0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+	{0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+	{0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+	{0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+	{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+	{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+	{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
 	{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
-	{0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
-	{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
-	{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
-	{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
-	{0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
-	{0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
-	{0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+	{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+	{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+	{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+	{0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+	{0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+	{0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
 	{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
-	{0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
-	{0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
-	{0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
-	{0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
-	{0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
-	{0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
-	{0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
-	{0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
-	{0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
-	{0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
-	{0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
-	{0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
-	{0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
-	{0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
-	{0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
-	{0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
-	{0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
-	{0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
-	{0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
-	{0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
-	{0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
-	{0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
-	{0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
-	{0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
-	{0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
-	{0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+	{0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+	{0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+	{0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+	{0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+	{0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+	{0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+	{0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+	{0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+	{0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+	{0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+	{0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+	{0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+	{0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+	{0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+	{0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+	{0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+	{0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+	{0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+	{0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+	{0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+	{0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+	{0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+	{0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+	{0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+	{0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
 	{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
 	{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
 	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -989,7 +989,7 @@
 	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 };
 
-static const u32 ar9300Common_rx_gain_table_2p0[][2] = {
+static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
 	/* Addr      allmodes  */
 	{0x0000a000, 0x00010000},
 	{0x0000a004, 0x00030002},
@@ -1249,7 +1249,7 @@
 	{0x0000b1fc, 0x00000196},
 };
 
-static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
 	{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1258,8 +1258,8 @@
 	{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
 	{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
 	{0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
-	{0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
-	{0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+	{0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+	{0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
 	{0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
 	{0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
 	{0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
@@ -1271,27 +1271,27 @@
 	{0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
 	{0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
 	{0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
-	{0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
-	{0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
-	{0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
-	{0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
-	{0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
-	{0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
-	{0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+	{0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+	{0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+	{0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+	{0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+	{0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+	{0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+	{0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+	{0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
 	{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
 	{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
 	{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
 	{0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
 	{0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
 	{0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
-	{0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
-	{0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+	{0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+	{0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
 	{0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
 	{0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
 	{0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
@@ -1303,31 +1303,31 @@
 	{0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
 	{0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
 	{0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
-	{0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
-	{0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
-	{0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
-	{0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
-	{0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
-	{0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
-	{0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
-	{0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
+	{0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
+	{0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
+	{0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
+	{0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
+	{0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
+	{0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+	{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
 	{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
-	{0x00016048, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+	{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
 	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 	{0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
-	{0x00016448, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+	{0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
 	{0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 	{0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
-	{0x00016848, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+	{0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
 	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 };
 
-static const u32 ar9300_2p0_mac_core[][2] = {
+static const u32 ar9300_2p2_mac_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00000008, 0x00000000},
 	{0x00000030, 0x00020085},
@@ -1437,7 +1437,7 @@
 	{0x00008258, 0x00000000},
 	{0x0000825c, 0x40000000},
 	{0x00008260, 0x00080922},
-	{0x00008264, 0x98a00010},
+	{0x00008264, 0x9bc00010},
 	{0x00008268, 0xffffffff},
 	{0x0000826c, 0x0000ffff},
 	{0x00008270, 0x00000000},
@@ -1491,7 +1491,7 @@
 	{0x000083d0, 0x000301ff},
 };
 
-static const u32 ar9300Common_wo_xlna_rx_gain_table_2p0[][2] = {
+static const u32 ar9300Common_wo_xlna_rx_gain_table_2p2[][2] = {
 	/* Addr      allmodes  */
 	{0x0000a000, 0x00010000},
 	{0x0000a004, 0x00030002},
@@ -1751,34 +1751,35 @@
 	{0x0000b1fc, 0x00000196},
 };
 
-static const u32 ar9300_2p0_soc_preamble[][2] = {
+static const u32 ar9300_2p2_soc_preamble[][2] = {
 	/* Addr      allmodes  */
 	{0x000040a4, 0x00a0c1c9},
 	{0x00007008, 0x00000000},
 	{0x00007020, 0x00000000},
 	{0x00007034, 0x00000002},
 	{0x00007038, 0x000004c2},
+	{0x00007048, 0x00000008},
 };
 
-static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0[][2] = {
+static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
 	/* Addr      allmodes  */
 	{0x00004040, 0x08212e5e},
 	{0x00004040, 0x0008003b},
 	{0x00004044, 0x00000000},
 };
 
-static const u32 ar9300PciePhy_clkreq_enable_L1_2p0[][2] = {
+static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = {
 	/* Addr      allmodes  */
 	{0x00004040, 0x08253e5e},
 	{0x00004040, 0x0008003b},
 	{0x00004044, 0x00000000},
 };
 
-static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
+static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = {
 	/* Addr      allmodes  */
 	{0x00004040, 0x08213e5e},
 	{0x00004040, 0x0008003b},
 	{0x00004044, 0x00000000},
 };
 
-#endif /* INITVALS_9003_H */
+#endif /* INITVALS_9003_2P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 56a9e5f..5a06503 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -739,6 +739,12 @@
 	 */
 	ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
 
+	/* Do Tx IQ Calibration */
+	ar9003_hw_tx_iq_cal(ah);
+	REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+	udelay(5);
+	REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
 	/* Calibrate the AGC */
 	REG_WRITE(ah, AR_PHY_AGC_CONTROL,
 		  REG_READ(ah, AR_PHY_AGC_CONTROL) |
@@ -753,10 +759,6 @@
 		return false;
 	}
 
-	/* Do Tx IQ Calibration */
-	if (ah->config.tx_iq_calibration)
-		ar9003_hw_tx_iq_cal(ah);
-
 	/* Revert chainmasks to their original values before NF cal */
 	ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 23eb60e..343c9a4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -67,6 +67,7 @@
 		  * bit2 - enable fastClock - enabled
 		  * bit3 - enable doubling - enabled
 		  * bit4 - enable internal regulator - disabled
+		  * bit5 - enable pa predistortion - disabled
 		  */
 		.miscConfiguration = 0, /* bit0 - turn down drivestrength */
 		.eepromWriteEnableGpio = 3,
@@ -129,9 +130,11 @@
 		.txEndToRxOn = 0x2,
 		.txFrameToXpaOn = 0xe,
 		.thresh62 = 28,
-		.futureModal = { /* [32] */
+		.papdRateMaskHt20 = LE32(0x80c080),
+		.papdRateMaskHt40 = LE32(0x80c080),
+		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+			0, 0, 0, 0, 0, 0, 0, 0
 		},
 	 },
 	.calFreqPier2G = {
@@ -326,9 +329,11 @@
 		.txEndToRxOn = 0x2,
 		.txFrameToXpaOn = 0xe,
 		.thresh62 = 28,
+		.papdRateMaskHt20 = LE32(0xf0e0e0),
+		.papdRateMaskHt40 = LE32(0xf0e0e0),
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+			0, 0, 0, 0, 0, 0, 0, 0
 		},
 	 },
 	.calFreqPier5G = {
@@ -644,6 +649,8 @@
 		return (pBase->featureEnable & 0x10) >> 4;
 	case EEP_SWREG:
 		return le32_to_cpu(pBase->swreg);
+	case EEP_PAPRD:
+		return !!(pBase->featureEnable & BIT(5));
 	default:
 		return 0;
 	}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 23fb353..3c533bb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -234,7 +234,9 @@
 	u8 txEndToRxOn;
 	u8 txFrameToXpaOn;
 	u8 thresh62;
-	u8 futureModal[32];
+	__le32 papdRateMaskHt20;
+	__le32 papdRateMaskHt40;
+	u8 futureModal[24];
 } __packed;
 
 struct ar9300_cal_data_per_freq_op_loop {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index b15309c..0641689 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -16,7 +16,8 @@
 
 #include "hw.h"
 #include "ar9003_mac.h"
-#include "ar9003_initvals.h"
+#include "ar9003_2p0_initvals.h"
+#include "ar9003_2p2_initvals.h"
 
 /* General hardware code for the AR9003 hadware family */
 
@@ -31,12 +32,8 @@
 	return false;
 }
 
-/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */
-/*
- * XXX: move TX/RX gain INI to its own init_mode_gain_regs after
- * ensuring it does not affect hardware bring up
- */
-static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+/* AR9003 2.0 */
+static void ar9003_2p0_hw_init_mode_regs(struct ath_hw *ah)
 {
 	/* mac */
 	INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -106,27 +103,128 @@
 		       3);
 }
 
+/* AR9003 2.2 */
+static void ar9003_2p2_hw_init_mode_regs(struct ath_hw *ah)
+{
+	/* mac */
+	INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+	INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+		       ar9300_2p2_mac_core,
+		       ARRAY_SIZE(ar9300_2p2_mac_core), 2);
+	INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+		       ar9300_2p2_mac_postamble,
+		       ARRAY_SIZE(ar9300_2p2_mac_postamble), 5);
+
+	/* bb */
+	INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+	INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+		       ar9300_2p2_baseband_core,
+		       ARRAY_SIZE(ar9300_2p2_baseband_core), 2);
+	INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+		       ar9300_2p2_baseband_postamble,
+		       ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5);
+
+	/* radio */
+	INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+	INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+		       ar9300_2p2_radio_core,
+		       ARRAY_SIZE(ar9300_2p2_radio_core), 2);
+	INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+		       ar9300_2p2_radio_postamble,
+		       ARRAY_SIZE(ar9300_2p2_radio_postamble), 5);
+
+	/* soc */
+	INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+		       ar9300_2p2_soc_preamble,
+		       ARRAY_SIZE(ar9300_2p2_soc_preamble), 2);
+	INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+	INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+		       ar9300_2p2_soc_postamble,
+		       ARRAY_SIZE(ar9300_2p2_soc_postamble), 5);
+
+	/* rx/tx gain */
+	INIT_INI_ARRAY(&ah->iniModesRxGain,
+		       ar9300Common_rx_gain_table_2p2,
+		       ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2);
+	INIT_INI_ARRAY(&ah->iniModesTxGain,
+		       ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
+		       ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
+		       5);
+
+	/* Load PCIE SERDES settings from INI */
+
+	/* Awake Setting */
+
+	INIT_INI_ARRAY(&ah->iniPcieSerdes,
+		       ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
+		       ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
+		       2);
+
+	/* Sleep Setting */
+
+	INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+		       ar9300PciePhy_clkreq_enable_L1_2p2,
+		       ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2),
+		       2);
+
+	/* Fast clock modal settings */
+	INIT_INI_ARRAY(&ah->iniModesAdditional,
+		       ar9300Modes_fast_clock_2p2,
+		       ARRAY_SIZE(ar9300Modes_fast_clock_2p2),
+		       3);
+}
+
+/*
+ * The AR9003 family uses a new INI format (pre, core, post
+ * arrays per subsystem).
+ */
+static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+{
+	if (AR_SREV_9300_20(ah))
+		ar9003_2p0_hw_init_mode_regs(ah);
+	else
+		ar9003_2p2_hw_init_mode_regs(ah);
+}
+
 static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
 {
 	switch (ar9003_hw_get_tx_gain_idx(ah)) {
 	case 0:
 	default:
-		INIT_INI_ARRAY(&ah->iniModesTxGain,
-			       ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
-			       ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
-			       5);
+		if (AR_SREV_9300_20(ah))
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+				       ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+				       5);
+		else
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
+				       ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
+				       5);
 		break;
 	case 1:
-		INIT_INI_ARRAY(&ah->iniModesTxGain,
-			       ar9300Modes_high_ob_db_tx_gain_table_2p0,
-			       ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
-			       5);
+		if (AR_SREV_9300_20(ah))
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       ar9300Modes_high_ob_db_tx_gain_table_2p0,
+				       ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
+				       5);
+		else
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       ar9300Modes_high_ob_db_tx_gain_table_2p2,
+				       ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
+				       5);
 		break;
 	case 2:
-		INIT_INI_ARRAY(&ah->iniModesTxGain,
-			       ar9300Modes_low_ob_db_tx_gain_table_2p0,
-			       ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
-			       5);
+		if (AR_SREV_9300_20(ah))
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       ar9300Modes_low_ob_db_tx_gain_table_2p0,
+				       ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
+				       5);
+		else
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       ar9300Modes_low_ob_db_tx_gain_table_2p2,
+				       ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
+				       5);
 		break;
 	}
 }
@@ -136,15 +234,28 @@
 	switch (ar9003_hw_get_rx_gain_idx(ah)) {
 	case 0:
 	default:
-		INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0,
-			       ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
-			       2);
+		if (AR_SREV_9300_20(ah))
+			INIT_INI_ARRAY(&ah->iniModesRxGain,
+				       ar9300Common_rx_gain_table_2p0,
+				       ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
+				       2);
+		else
+			INIT_INI_ARRAY(&ah->iniModesRxGain,
+				       ar9300Common_rx_gain_table_2p2,
+				       ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
+				       2);
 		break;
 	case 1:
-		INIT_INI_ARRAY(&ah->iniModesRxGain,
-			       ar9300Common_wo_xlna_rx_gain_table_2p0,
-			       ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
-			       2);
+		if (AR_SREV_9300_20(ah))
+			INIT_INI_ARRAY(&ah->iniModesRxGain,
+				       ar9300Common_wo_xlna_rx_gain_table_2p0,
+				       ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
+				       2);
+		else
+			INIT_INI_ARRAY(&ah->iniModesRxGain,
+				       ar9300Common_wo_xlna_rx_gain_table_2p2,
+				       ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
+				       2);
 		break;
 	}
 }
@@ -184,6 +295,26 @@
 		/* Several PCIe massages to ensure proper behaviour */
 		if (ah->config.pcie_waen)
 			REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
+		else
+			REG_WRITE(ah, AR_WA, ah->WARegVal);
+	}
+
+	/*
+	 * Configire PCIE after Ini init. SERDES values now come from ini file
+	 * This enables PCIe low power mode.
+	 */
+	if (ah->config.pcieSerDesWrite) {
+		unsigned int i;
+		struct ar5416IniArray *array;
+
+		array = power_off ? &ah->iniPcieSerdes :
+				    &ah->iniPcieSerdesLowPower;
+
+		for (i = 0; i < array->ia_rows; i++) {
+			REG_WRITE(ah,
+				  INI_RA(array, i, 0),
+				  INI_RA(array, i, 1));
+		}
 	}
 }
 
@@ -202,4 +333,6 @@
 	ar9003_hw_attach_phy_ops(ah);
 	ar9003_hw_attach_calib_ops(ah);
 	ar9003_hw_attach_mac_ops(ah);
+
+	ath9k_hw_attach_ani_ops_new(ah);
 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 37ba374..06ef710 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -90,6 +90,8 @@
 				  MAP_ISR_S2_CST);
 			mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
 				  MAP_ISR_S2_TSFOOR);
+			mask2 |= ((isr2 & AR_ISR_S2_BB_WATCHDOG) >>
+				  MAP_ISR_S2_BB_WATCHDOG);
 
 			if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
 				REG_WRITE(ah, AR_ISR_S2, isr2);
@@ -167,6 +169,9 @@
 
 			(void) REG_READ(ah, AR_ISR);
 		}
+
+		if (*masked & ATH9K_INT_BB_WATCHDOG)
+			ar9003_hw_bb_watchdog_read(ah);
 	}
 
 	if (sync_cause) {
@@ -465,6 +470,14 @@
 		ads->ctl11 &= ~AR_VirtMoreFrag;
 }
 
+void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains)
+{
+	struct ar9003_txc *ads = ds;
+
+	ads->ctl12 |= SM(chains, AR_PAPRDChainMask);
+}
+EXPORT_SYMBOL(ar9003_hw_set_paprd_txdesc);
+
 void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
 {
 	struct ath_hw_ops *ops = ath9k_hw_ops(hw);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index f17558b..f76f27d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -40,6 +40,10 @@
 
 #define AR_Not_Sounding	0x20000000
 
+/* ctl 12 */
+#define AR_PAPRDChainMask	0x00000e00
+#define AR_PAPRDChainMask_S	9
+
 #define MAP_ISR_S2_CST          6
 #define MAP_ISR_S2_GTT          6
 #define MAP_ISR_S2_TIM          3
@@ -47,6 +51,7 @@
 #define MAP_ISR_S2_DTIMSYNC     7
 #define MAP_ISR_S2_DTIM         7
 #define MAP_ISR_S2_TSFOOR       4
+#define MAP_ISR_S2_BB_WATCHDOG  6
 
 #define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
new file mode 100644
index 0000000..49e0c86
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+
+void ar9003_paprd_enable(struct ath_hw *ah, bool val)
+{
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
+		      AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1,
+		      AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2,
+		      AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+}
+EXPORT_SYMBOL(ar9003_paprd_enable);
+
+static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
+{
+	struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+	struct ar9300_modal_eep_header *hdr;
+	const u32 ctrl0[3] = {
+		AR_PHY_PAPRD_CTRL0_B0,
+		AR_PHY_PAPRD_CTRL0_B1,
+		AR_PHY_PAPRD_CTRL0_B2
+	};
+	const u32 ctrl1[3] = {
+		AR_PHY_PAPRD_CTRL1_B0,
+		AR_PHY_PAPRD_CTRL1_B1,
+		AR_PHY_PAPRD_CTRL1_B2
+	};
+	u32 am_mask, ht40_mask;
+	int i;
+
+	if (ah->curchan && IS_CHAN_5GHZ(ah->curchan))
+		hdr = &eep->modalHeader5G;
+	else
+		hdr = &eep->modalHeader2G;
+
+	am_mask = le32_to_cpu(hdr->papdRateMaskHt20);
+	ht40_mask = le32_to_cpu(hdr->papdRateMaskHt40);
+
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK, am_mask);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK, am_mask);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK, ht40_mask);
+
+	for (i = 0; i < 3; i++) {
+		REG_RMW_FIELD(ah, ctrl0[i],
+			      AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1);
+		REG_RMW_FIELD(ah, ctrl1[i],
+			      AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE, 1);
+		REG_RMW_FIELD(ah, ctrl1[i],
+			      AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE, 1);
+		REG_RMW_FIELD(ah, ctrl1[i],
+			      AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
+		REG_RMW_FIELD(ah, ctrl1[i],
+			      AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK, 181);
+		REG_RMW_FIELD(ah, ctrl1[i],
+			      AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT, 361);
+		REG_RMW_FIELD(ah, ctrl1[i],
+			      AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
+		REG_RMW_FIELD(ah, ctrl0[i],
+			      AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH, 3);
+	}
+
+	ar9003_paprd_enable(ah, false);
+
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+		      AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP, 0x30);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+		      AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE, 1);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+		      AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE, 1);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+		      AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE, 0);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+		      AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE, 0);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+		      AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+		      AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
+		      AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, 147);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+		      AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+		      AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN, 4);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+		      AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+		      AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+		      AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+		      AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
+		      -15);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+		      AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+		      AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA, 0);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+		      AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR, 400);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+		      AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES,
+		      100);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_0_B0,
+		      AR_PHY_PAPRD_PRE_POST_SCALING, 261376);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_1_B0,
+		      AR_PHY_PAPRD_PRE_POST_SCALING, 248079);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_2_B0,
+		      AR_PHY_PAPRD_PRE_POST_SCALING, 233759);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_3_B0,
+		      AR_PHY_PAPRD_PRE_POST_SCALING, 220464);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_4_B0,
+		      AR_PHY_PAPRD_PRE_POST_SCALING, 208194);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_5_B0,
+		      AR_PHY_PAPRD_PRE_POST_SCALING, 196949);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_6_B0,
+		      AR_PHY_PAPRD_PRE_POST_SCALING, 185706);
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_7_B0,
+		      AR_PHY_PAPRD_PRE_POST_SCALING, 175487);
+}
+
+static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
+{
+	u32 *entry = ah->paprd_gain_table_entries;
+	u8 *index = ah->paprd_gain_table_index;
+	u32 reg = AR_PHY_TXGAIN_TABLE;
+	int i;
+
+	memset(entry, 0, sizeof(ah->paprd_gain_table_entries));
+	memset(index, 0, sizeof(ah->paprd_gain_table_index));
+
+	for (i = 0; i < 32; i++) {
+		entry[i] = REG_READ(ah, reg);
+		index[i] = (entry[i] >> 24) & 0xff;
+		reg += 4;
+	}
+}
+
+static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
+					    int target_power)
+{
+	int olpc_gain_delta = 0;
+	int alpha_therm, alpha_volt;
+	int therm_cal_value, volt_cal_value;
+	int therm_value, volt_value;
+	int thermal_gain_corr, voltage_gain_corr;
+	int desired_scale, desired_gain = 0;
+	u32 reg;
+
+	REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+		    AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+	desired_scale = REG_READ_FIELD(ah, AR_PHY_TPC_12,
+				       AR_PHY_TPC_12_DESIRED_SCALE_HT40_5);
+	alpha_therm = REG_READ_FIELD(ah, AR_PHY_TPC_19,
+				     AR_PHY_TPC_19_ALPHA_THERM);
+	alpha_volt = REG_READ_FIELD(ah, AR_PHY_TPC_19,
+				    AR_PHY_TPC_19_ALPHA_VOLT);
+	therm_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
+					 AR_PHY_TPC_18_THERM_CAL_VALUE);
+	volt_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
+					AR_PHY_TPC_18_VOLT_CAL_VALUE);
+	therm_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
+				     AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE);
+	volt_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
+				    AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE);
+
+	if (chain == 0)
+		reg = AR_PHY_TPC_11_B0;
+	else if (chain == 1)
+		reg = AR_PHY_TPC_11_B1;
+	else
+		reg = AR_PHY_TPC_11_B2;
+
+	olpc_gain_delta = REG_READ_FIELD(ah, reg,
+					 AR_PHY_TPC_11_OLPC_GAIN_DELTA);
+
+	if (olpc_gain_delta >= 128)
+		olpc_gain_delta = olpc_gain_delta - 256;
+
+	thermal_gain_corr = (alpha_therm * (therm_value - therm_cal_value) +
+			     (256 / 2)) / 256;
+	voltage_gain_corr = (alpha_volt * (volt_value - volt_cal_value) +
+			     (128 / 2)) / 128;
+	desired_gain = target_power - olpc_gain_delta - thermal_gain_corr -
+	    voltage_gain_corr + desired_scale;
+
+	return desired_gain;
+}
+
+static void ar9003_tx_force_gain(struct ath_hw *ah, unsigned int gain_index)
+{
+	int selected_gain_entry, txbb1dbgain, txbb6dbgain, txmxrgain;
+	int padrvgnA, padrvgnB, padrvgnC, padrvgnD;
+	u32 *gain_table_entries = ah->paprd_gain_table_entries;
+
+	selected_gain_entry = gain_table_entries[gain_index];
+	txbb1dbgain = selected_gain_entry & 0x7;
+	txbb6dbgain = (selected_gain_entry >> 3) & 0x3;
+	txmxrgain = (selected_gain_entry >> 5) & 0xf;
+	padrvgnA = (selected_gain_entry >> 9) & 0xf;
+	padrvgnB = (selected_gain_entry >> 13) & 0xf;
+	padrvgnC = (selected_gain_entry >> 17) & 0xf;
+	padrvgnD = (selected_gain_entry >> 21) & 0x3;
+
+	REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+		      AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN, txbb1dbgain);
+	REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+		      AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN, txbb6dbgain);
+	REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+		      AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN, txmxrgain);
+	REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+		      AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA, padrvgnA);
+	REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+		      AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB, padrvgnB);
+	REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+		      AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC, padrvgnC);
+	REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+		      AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND, padrvgnD);
+	REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+		      AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL, 0);
+	REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+		      AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN, 0);
+	REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCED_DAC_GAIN, 0);
+	REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCE_DAC_GAIN, 0);
+}
+
+static inline int find_expn(int num)
+{
+	return fls(num) - 1;
+}
+
+static inline int find_proper_scale(int expn, int N)
+{
+	return (expn > N) ? expn - 10 : 0;
+}
+
+#define NUM_BIN 23
+
+static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
+{
+	unsigned int thresh_accum_cnt;
+	int x_est[NUM_BIN + 1], Y[NUM_BIN + 1], theta[NUM_BIN + 1];
+	int PA_in[NUM_BIN + 1];
+	int B1_tmp[NUM_BIN + 1], B2_tmp[NUM_BIN + 1];
+	unsigned int B1_abs_max, B2_abs_max;
+	int max_index, scale_factor;
+	int y_est[NUM_BIN + 1];
+	int x_est_fxp1_nonlin, x_tilde[NUM_BIN + 1];
+	unsigned int x_tilde_abs;
+	int G_fxp, Y_intercept, order_x_by_y, M, I, L, sum_y_sqr, sum_y_quad;
+	int Q_x, Q_B1, Q_B2, beta_raw, alpha_raw, scale_B;
+	int Q_scale_B, Q_beta, Q_alpha, alpha, beta, order_1, order_2;
+	int order1_5x, order2_3x, order1_5x_rem, order2_3x_rem;
+	int y5, y3, tmp;
+	int theta_low_bin = 0;
+	int i;
+
+	/* disregard any bin that contains <= 16 samples */
+	thresh_accum_cnt = 16;
+	scale_factor = 5;
+	max_index = 0;
+	memset(theta, 0, sizeof(theta));
+	memset(x_est, 0, sizeof(x_est));
+	memset(Y, 0, sizeof(Y));
+	memset(y_est, 0, sizeof(y_est));
+	memset(x_tilde, 0, sizeof(x_tilde));
+
+	for (i = 0; i < NUM_BIN; i++) {
+		s32 accum_cnt, accum_tx, accum_rx, accum_ang;
+
+		/* number of samples */
+		accum_cnt = data_L[i] & 0xffff;
+
+		if (accum_cnt <= thresh_accum_cnt)
+			continue;
+
+		/* sum(tx amplitude) */
+		accum_tx = ((data_L[i] >> 16) & 0xffff) |
+		    ((data_U[i] & 0x7ff) << 16);
+
+		/* sum(rx amplitude distance to lower bin edge) */
+		accum_rx = ((data_U[i] >> 11) & 0x1f) |
+		    ((data_L[i + 23] & 0xffff) << 5);
+
+		/* sum(angles) */
+		accum_ang = ((data_L[i + 23] >> 16) & 0xffff) |
+		    ((data_U[i + 23] & 0x7ff) << 16);
+
+		accum_tx <<= scale_factor;
+		accum_rx <<= scale_factor;
+		x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
+		    scale_factor;
+
+		Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
+			    scale_factor) +
+			    (1 << scale_factor) * max_index + 16;
+
+		if (accum_ang >= (1 << 26))
+			accum_ang -= 1 << 27;
+
+		theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) /
+		    accum_cnt;
+
+		max_index++;
+	}
+
+	/*
+	 * Find average theta of first 5 bin and all of those to same value.
+	 * Curve is linear at that range.
+	 */
+	for (i = 1; i < 6; i++)
+		theta_low_bin += theta[i];
+
+	theta_low_bin = theta_low_bin / 5;
+	for (i = 1; i < 6; i++)
+		theta[i] = theta_low_bin;
+
+	/* Set values at origin */
+	theta[0] = theta_low_bin;
+	for (i = 0; i <= max_index; i++)
+		theta[i] -= theta_low_bin;
+
+	x_est[0] = 0;
+	Y[0] = 0;
+	scale_factor = 8;
+
+	/* low signal gain */
+	if (x_est[6] == x_est[3])
+		return false;
+
+	G_fxp =
+	    (((Y[6] - Y[3]) * 1 << scale_factor) +
+	     (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
+
+	Y_intercept =
+	    (G_fxp * (x_est[0] - x_est[3]) +
+	     (1 << scale_factor)) / (1 << scale_factor) + Y[3];
+
+	for (i = 0; i <= max_index; i++)
+		y_est[i] = Y[i] - Y_intercept;
+
+	for (i = 0; i <= 3; i++) {
+		y_est[i] = i * 32;
+
+		/* prevent division by zero */
+		if (G_fxp == 0)
+			return false;
+
+		x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
+	}
+
+	x_est_fxp1_nonlin =
+	    x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
+				G_fxp) / G_fxp;
+
+	order_x_by_y =
+	    (x_est_fxp1_nonlin + y_est[max_index]) / y_est[max_index];
+
+	if (order_x_by_y == 0)
+		M = 10;
+	else if (order_x_by_y == 1)
+		M = 9;
+	else
+		M = 8;
+
+	I = (max_index > 15) ? 7 : max_index >> 1;
+	L = max_index - I;
+	scale_factor = 8;
+	sum_y_sqr = 0;
+	sum_y_quad = 0;
+	x_tilde_abs = 0;
+
+	for (i = 0; i <= L; i++) {
+		unsigned int y_sqr;
+		unsigned int y_quad;
+		unsigned int tmp_abs;
+
+		/* prevent division by zero */
+		if (y_est[i + I] == 0)
+			return false;
+
+		x_est_fxp1_nonlin =
+		    x_est[i + I] - ((1 << scale_factor) * y_est[i + I] +
+				    G_fxp) / G_fxp;
+
+		x_tilde[i] =
+		    (x_est_fxp1_nonlin * (1 << M) + y_est[i + I]) / y_est[i +
+									  I];
+		x_tilde[i] =
+		    (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
+		x_tilde[i] =
+		    (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
+		y_sqr =
+		    (y_est[i + I] * y_est[i + I] +
+		     (scale_factor * scale_factor)) / (scale_factor *
+						       scale_factor);
+		tmp_abs = abs(x_tilde[i]);
+		if (tmp_abs > x_tilde_abs)
+			x_tilde_abs = tmp_abs;
+
+		y_quad = y_sqr * y_sqr;
+		sum_y_sqr = sum_y_sqr + y_sqr;
+		sum_y_quad = sum_y_quad + y_quad;
+		B1_tmp[i] = y_sqr * (L + 1);
+		B2_tmp[i] = y_sqr;
+	}
+
+	B1_abs_max = 0;
+	B2_abs_max = 0;
+	for (i = 0; i <= L; i++) {
+		int abs_val;
+
+		B1_tmp[i] -= sum_y_sqr;
+		B2_tmp[i] = sum_y_quad - sum_y_sqr * B2_tmp[i];
+
+		abs_val = abs(B1_tmp[i]);
+		if (abs_val > B1_abs_max)
+			B1_abs_max = abs_val;
+
+		abs_val = abs(B2_tmp[i]);
+		if (abs_val > B2_abs_max)
+			B2_abs_max = abs_val;
+	}
+
+	Q_x = find_proper_scale(find_expn(x_tilde_abs), 10);
+	Q_B1 = find_proper_scale(find_expn(B1_abs_max), 10);
+	Q_B2 = find_proper_scale(find_expn(B2_abs_max), 10);
+
+	beta_raw = 0;
+	alpha_raw = 0;
+	for (i = 0; i <= L; i++) {
+		x_tilde[i] = x_tilde[i] / (1 << Q_x);
+		B1_tmp[i] = B1_tmp[i] / (1 << Q_B1);
+		B2_tmp[i] = B2_tmp[i] / (1 << Q_B2);
+		beta_raw = beta_raw + B1_tmp[i] * x_tilde[i];
+		alpha_raw = alpha_raw + B2_tmp[i] * x_tilde[i];
+	}
+
+	scale_B =
+	    ((sum_y_quad / scale_factor) * (L + 1) -
+	     (sum_y_sqr / scale_factor) * sum_y_sqr) * scale_factor;
+
+	Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
+	scale_B = scale_B / (1 << Q_scale_B);
+	Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
+	Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
+	beta_raw = beta_raw / (1 << Q_beta);
+	alpha_raw = alpha_raw / (1 << Q_alpha);
+	alpha = (alpha_raw << 10) / scale_B;
+	beta = (beta_raw << 10) / scale_B;
+	order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B;
+	order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B;
+	order1_5x = order_1 / 5;
+	order2_3x = order_2 / 3;
+	order1_5x_rem = order_1 - 5 * order1_5x;
+	order2_3x_rem = order_2 - 3 * order2_3x;
+
+	for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+		tmp = i * 32;
+		y5 = ((beta * tmp) >> 6) >> order1_5x;
+		y5 = (y5 * tmp) >> order1_5x;
+		y5 = (y5 * tmp) >> order1_5x;
+		y5 = (y5 * tmp) >> order1_5x;
+		y5 = (y5 * tmp) >> order1_5x;
+		y5 = y5 >> order1_5x_rem;
+		y3 = (alpha * tmp) >> order2_3x;
+		y3 = (y3 * tmp) >> order2_3x;
+		y3 = (y3 * tmp) >> order2_3x;
+		y3 = y3 >> order2_3x_rem;
+		PA_in[i] = y5 + y3 + (256 * tmp) / G_fxp;
+
+		if (i >= 2) {
+			tmp = PA_in[i] - PA_in[i - 1];
+			if (tmp < 0)
+				PA_in[i] =
+				    PA_in[i - 1] + (PA_in[i - 1] -
+						    PA_in[i - 2]);
+		}
+
+		PA_in[i] = (PA_in[i] < 1400) ? PA_in[i] : 1400;
+	}
+
+	beta_raw = 0;
+	alpha_raw = 0;
+
+	for (i = 0; i <= L; i++) {
+		int theta_tilde =
+		    ((theta[i + I] << M) + y_est[i + I]) / y_est[i + I];
+		theta_tilde =
+		    ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
+		theta_tilde =
+		    ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
+		beta_raw = beta_raw + B1_tmp[i] * theta_tilde;
+		alpha_raw = alpha_raw + B2_tmp[i] * theta_tilde;
+	}
+
+	Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
+	Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
+	beta_raw = beta_raw / (1 << Q_beta);
+	alpha_raw = alpha_raw / (1 << Q_alpha);
+
+	alpha = (alpha_raw << 10) / scale_B;
+	beta = (beta_raw << 10) / scale_B;
+	order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B + 5;
+	order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B + 5;
+	order1_5x = order_1 / 5;
+	order2_3x = order_2 / 3;
+	order1_5x_rem = order_1 - 5 * order1_5x;
+	order2_3x_rem = order_2 - 3 * order2_3x;
+
+	for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+		int PA_angle;
+
+		/* pa_table[4] is calculated from PA_angle for i=5 */
+		if (i == 4)
+			continue;
+
+		tmp = i * 32;
+		if (beta > 0)
+			y5 = (((beta * tmp - 64) >> 6) -
+			      (1 << order1_5x)) / (1 << order1_5x);
+		else
+			y5 = ((((beta * tmp - 64) >> 6) +
+			       (1 << order1_5x)) / (1 << order1_5x));
+
+		y5 = (y5 * tmp) / (1 << order1_5x);
+		y5 = (y5 * tmp) / (1 << order1_5x);
+		y5 = (y5 * tmp) / (1 << order1_5x);
+		y5 = (y5 * tmp) / (1 << order1_5x);
+		y5 = y5 / (1 << order1_5x_rem);
+
+		if (beta > 0)
+			y3 = (alpha * tmp -
+			      (1 << order2_3x)) / (1 << order2_3x);
+		else
+			y3 = (alpha * tmp +
+			      (1 << order2_3x)) / (1 << order2_3x);
+		y3 = (y3 * tmp) / (1 << order2_3x);
+		y3 = (y3 * tmp) / (1 << order2_3x);
+		y3 = y3 / (1 << order2_3x_rem);
+
+		if (i < 4) {
+			PA_angle = 0;
+		} else {
+			PA_angle = y5 + y3;
+			if (PA_angle < -150)
+				PA_angle = -150;
+			else if (PA_angle > 150)
+				PA_angle = 150;
+		}
+
+		pa_table[i] = ((PA_in[i] & 0x7ff) << 11) + (PA_angle & 0x7ff);
+		if (i == 5) {
+			PA_angle = (PA_angle + 2) >> 1;
+			pa_table[i - 1] = ((PA_in[i - 1] & 0x7ff) << 11) +
+			    (PA_angle & 0x7ff);
+		}
+	}
+
+	*gain = G_fxp;
+	return true;
+}
+
+void ar9003_paprd_populate_single_table(struct ath_hw *ah,
+					struct ath9k_channel *chan, int chain)
+{
+	u32 *paprd_table_val = chan->pa_table[chain];
+	u32 small_signal_gain = chan->small_signal_gain[chain];
+	u32 training_power;
+	u32 reg = 0;
+	int i;
+
+	training_power =
+	    REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
+			   AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
+	training_power -= 4;
+
+	if (chain == 0)
+		reg = AR_PHY_PAPRD_MEM_TAB_B0;
+	else if (chain == 1)
+		reg = AR_PHY_PAPRD_MEM_TAB_B1;
+	else if (chain == 2)
+		reg = AR_PHY_PAPRD_MEM_TAB_B2;
+
+	for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+		REG_WRITE(ah, reg, paprd_table_val[i]);
+		reg = reg + 4;
+	}
+
+	if (chain == 0)
+		reg = AR_PHY_PA_GAIN123_B0;
+	else if (chain == 1)
+		reg = AR_PHY_PA_GAIN123_B1;
+	else
+		reg = AR_PHY_PA_GAIN123_B2;
+
+	REG_RMW_FIELD(ah, reg, AR_PHY_PA_GAIN123_PA_GAIN1, small_signal_gain);
+
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B0,
+		      AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+		      training_power);
+
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1,
+		      AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+		      training_power);
+
+	REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2,
+		      AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+		      training_power);
+}
+EXPORT_SYMBOL(ar9003_paprd_populate_single_table);
+
+int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
+{
+
+	unsigned int i, desired_gain, gain_index;
+	unsigned int train_power;
+
+	train_power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
+				     AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
+
+	train_power = train_power - 4;
+
+	desired_gain = ar9003_get_desired_gain(ah, chain, train_power);
+
+	gain_index = 0;
+	for (i = 0; i < 32; i++) {
+		if (ah->paprd_gain_table_index[i] >= desired_gain)
+			break;
+		gain_index++;
+	}
+
+	ar9003_tx_force_gain(ah, gain_index);
+
+	REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+			AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+
+	return 0;
+}
+EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
+
+int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
+			      int chain)
+{
+	u16 *small_signal_gain = &chan->small_signal_gain[chain];
+	u32 *pa_table = chan->pa_table[chain];
+	u32 *data_L, *data_U;
+	int i, status = 0;
+	u32 *buf;
+	u32 reg;
+
+	memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain]));
+
+	buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC);
+	if (!buf)
+		return -ENOMEM;
+
+	data_L = &buf[0];
+	data_U = &buf[48];
+
+	REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+		    AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
+
+	reg = AR_PHY_CHAN_INFO_TAB_0;
+	for (i = 0; i < 48; i++)
+		data_L[i] = REG_READ(ah, reg + (i << 2));
+
+	REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+		    AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
+
+	for (i = 0; i < 48; i++)
+		data_U[i] = REG_READ(ah, reg + (i << 2));
+
+	if (!create_pa_curve(data_L, data_U, pa_table, small_signal_gain))
+		status = -2;
+
+	REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+		    AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+
+	kfree(buf);
+
+	return status;
+}
+EXPORT_SYMBOL(ar9003_paprd_create_curve);
+
+int ar9003_paprd_init_table(struct ath_hw *ah)
+{
+	ar9003_paprd_setup_single_table(ah);
+	ar9003_paprd_get_gain_table(ah);
+	return 0;
+}
+EXPORT_SYMBOL(ar9003_paprd_init_table);
+
+bool ar9003_paprd_is_done(struct ath_hw *ah)
+{
+	return !!REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+				AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+}
+EXPORT_SYMBOL(ar9003_paprd_is_done);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 80431a2..19bc05c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -17,6 +17,28 @@
 #include "hw.h"
 #include "ar9003_phy.h"
 
+static const int firstep_table[] =
+/* level:  0   1   2   3   4   5   6   7   8  */
+	{ -4, -2,  0,  2,  4,  6,  8, 10, 12 }; /* lvl 0-8, default 2 */
+
+static const int cycpwrThr1_table[] =
+/* level:  0   1   2   3   4   5   6   7   8  */
+	{ -6, -4, -2,  0,  2,  4,  6,  8 };     /* lvl 0-7, default 3 */
+
+/*
+ * register values to turn OFDM weak signal detection OFF
+ */
+static const int m1ThreshLow_off = 127;
+static const int m2ThreshLow_off = 127;
+static const int m1Thresh_off = 127;
+static const int m2Thresh_off = 127;
+static const int m2CountThr_off =  31;
+static const int m2CountThrLow_off =  63;
+static const int m1ThreshLowExt_off = 127;
+static const int m2ThreshLowExt_off = 127;
+static const int m1ThreshExt_off = 127;
+static const int m2ThreshExt_off = 127;
+
 /**
  * ar9003_hw_set_channel - set channel on single-chip device
  * @ah: atheros hardware structure
@@ -94,7 +116,7 @@
 }
 
 /**
- * ar9003_hw_spur_mitigate - convert baseband spur frequency
+ * ar9003_hw_spur_mitigate_mrc_cck - convert baseband spur frequency
  * @ah: atheros hardware structure
  * @chan:
  *
@@ -521,15 +543,6 @@
 		u32 val = INI_RA(iniArr, i, column);
 
 		REG_WRITE(ah, reg, val);
-
-		/*
-		 * Determine if this is a shift register value, and insert the
-		 * configured delay if so.
-		 */
-		if (reg >= 0x16000 && reg < 0x17000
-		    && ah->config.analog_shiftreg)
-			udelay(100);
-
 		DO_DELAY(regWrites);
 	}
 }
@@ -732,71 +745,68 @@
 {
 	struct ar5416AniState *aniState = ah->curani;
 	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_channel *chan = ah->curchan;
+	s32 value, value2;
 
 	switch (cmd & ah->ani_function) {
-	case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
-		u32 level = param;
-
-		if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
-			ath_print(common, ATH_DBG_ANI,
-				  "level out of range (%u > %u)\n",
-				  level,
-				  (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
-			return false;
-		}
-
-		REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
-			      AR_PHY_DESIRED_SZ_TOT_DES,
-			      ah->totalSizeDesired[level]);
-		REG_RMW_FIELD(ah, AR_PHY_AGC,
-			      AR_PHY_AGC_COARSE_LOW,
-			      ah->coarse_low[level]);
-		REG_RMW_FIELD(ah, AR_PHY_AGC,
-			      AR_PHY_AGC_COARSE_HIGH,
-			      ah->coarse_high[level]);
-		REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
-			      AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
-
-		if (level > aniState->noiseImmunityLevel)
-			ah->stats.ast_ani_niup++;
-		else if (level < aniState->noiseImmunityLevel)
-			ah->stats.ast_ani_nidown++;
-		aniState->noiseImmunityLevel = level;
-		break;
-	}
 	case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
-		const int m1ThreshLow[] = { 127, 50 };
-		const int m2ThreshLow[] = { 127, 40 };
-		const int m1Thresh[] = { 127, 0x4d };
-		const int m2Thresh[] = { 127, 0x40 };
-		const int m2CountThr[] = { 31, 16 };
-		const int m2CountThrLow[] = { 63, 48 };
+		/*
+		 * on == 1 means ofdm weak signal detection is ON
+		 * on == 1 is the default, for less noise immunity
+		 *
+		 * on == 0 means ofdm weak signal detection is OFF
+		 * on == 0 means more noise imm
+		 */
 		u32 on = param ? 1 : 0;
+		/*
+		 * make register setting for default
+		 * (weak sig detect ON) come from INI file
+		 */
+		int m1ThreshLow = on ?
+			aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+		int m2ThreshLow = on ?
+			aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+		int m1Thresh = on ?
+			aniState->iniDef.m1Thresh : m1Thresh_off;
+		int m2Thresh = on ?
+			aniState->iniDef.m2Thresh : m2Thresh_off;
+		int m2CountThr = on ?
+			aniState->iniDef.m2CountThr : m2CountThr_off;
+		int m2CountThrLow = on ?
+			aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+		int m1ThreshLowExt = on ?
+			aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+		int m2ThreshLowExt = on ?
+			aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+		int m1ThreshExt = on ?
+			aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+		int m2ThreshExt = on ?
+			aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
 
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
 			      AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
-			      m1ThreshLow[on]);
+			      m1ThreshLow);
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
 			      AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
-			      m2ThreshLow[on]);
+			      m2ThreshLow);
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
-			      AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]);
+			      AR_PHY_SFCORR_M1_THRESH, m1Thresh);
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
-			      AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]);
+			      AR_PHY_SFCORR_M2_THRESH, m2Thresh);
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
-			      AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]);
+			      AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
 			      AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
-			      m2CountThrLow[on]);
+			      m2CountThrLow);
 
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]);
+			      AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]);
+			      AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]);
+			      AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
 		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]);
+			      AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
 
 		if (on)
 			REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
@@ -806,6 +816,12 @@
 				    AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
 
 		if (!on != aniState->ofdmWeakSigDetectOff) {
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: ofdm weak signal: %s=>%s\n",
+				  chan->channel,
+				  !aniState->ofdmWeakSigDetectOff ?
+					"on" : "off",
+				  on ? "on" : "off");
 			if (on)
 				ah->stats.ast_ani_ofdmon++;
 			else
@@ -814,64 +830,167 @@
 		}
 		break;
 	}
-	case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
-		const int weakSigThrCck[] = { 8, 6 };
-		u32 high = param ? 1 : 0;
-
-		REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
-			      AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
-			      weakSigThrCck[high]);
-		if (high != aniState->cckWeakSigThreshold) {
-			if (high)
-				ah->stats.ast_ani_cckhigh++;
-			else
-				ah->stats.ast_ani_ccklow++;
-			aniState->cckWeakSigThreshold = high;
-		}
-		break;
-	}
 	case ATH9K_ANI_FIRSTEP_LEVEL:{
-		const int firstep[] = { 0, 4, 8 };
 		u32 level = param;
 
-		if (level >= ARRAY_SIZE(firstep)) {
+		if (level >= ARRAY_SIZE(firstep_table)) {
 			ath_print(common, ATH_DBG_ANI,
-				  "level out of range (%u > %u)\n",
+				  "ATH9K_ANI_FIRSTEP_LEVEL: level "
+				  "out of range (%u > %u)\n",
 				  level,
-				  (unsigned) ARRAY_SIZE(firstep));
+				  (unsigned) ARRAY_SIZE(firstep_table));
 			return false;
 		}
+
+		/*
+		 * make register setting relative to default
+		 * from INI file & cap value
+		 */
+		value = firstep_table[level] -
+			firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+			aniState->iniDef.firstep;
+		if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+			value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+		if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+			value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
 		REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
 			      AR_PHY_FIND_SIG_FIRSTEP,
-			      firstep[level]);
-		if (level > aniState->firstepLevel)
-			ah->stats.ast_ani_stepup++;
-		else if (level < aniState->firstepLevel)
-			ah->stats.ast_ani_stepdown++;
-		aniState->firstepLevel = level;
+			      value);
+		/*
+		 * we need to set first step low register too
+		 * make register setting relative to default
+		 * from INI file & cap value
+		 */
+		value2 = firstep_table[level] -
+			 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+			 aniState->iniDef.firstepLow;
+		if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+			value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+		if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+			value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+
+		REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
+			      AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
+
+		if (level != aniState->firstepLevel) {
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: level %d=>%d[def:%d] "
+				  "firstep[level]=%d ini=%d\n",
+				  chan->channel,
+				  aniState->firstepLevel,
+				  level,
+				  ATH9K_ANI_FIRSTEP_LVL_NEW,
+				  value,
+				  aniState->iniDef.firstep);
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: level %d=>%d[def:%d] "
+				  "firstep_low[level]=%d ini=%d\n",
+				  chan->channel,
+				  aniState->firstepLevel,
+				  level,
+				  ATH9K_ANI_FIRSTEP_LVL_NEW,
+				  value2,
+				  aniState->iniDef.firstepLow);
+			if (level > aniState->firstepLevel)
+				ah->stats.ast_ani_stepup++;
+			else if (level < aniState->firstepLevel)
+				ah->stats.ast_ani_stepdown++;
+			aniState->firstepLevel = level;
+		}
 		break;
 	}
 	case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
-		const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
 		u32 level = param;
 
-		if (level >= ARRAY_SIZE(cycpwrThr1)) {
+		if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
 			ath_print(common, ATH_DBG_ANI,
-				  "level out of range (%u > %u)\n",
+				  "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
+				  "out of range (%u > %u)\n",
 				  level,
-				  (unsigned) ARRAY_SIZE(cycpwrThr1));
+				  (unsigned) ARRAY_SIZE(cycpwrThr1_table));
 			return false;
 		}
+		/*
+		 * make register setting relative to default
+		 * from INI file & cap value
+		 */
+		value = cycpwrThr1_table[level] -
+			cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+			aniState->iniDef.cycpwrThr1;
+		if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+			value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+		if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+			value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
 		REG_RMW_FIELD(ah, AR_PHY_TIMING5,
 			      AR_PHY_TIMING5_CYCPWR_THR1,
-			      cycpwrThr1[level]);
-		if (level > aniState->spurImmunityLevel)
-			ah->stats.ast_ani_spurup++;
-		else if (level < aniState->spurImmunityLevel)
-			ah->stats.ast_ani_spurdown++;
-		aniState->spurImmunityLevel = level;
+			      value);
+
+		/*
+		 * set AR_PHY_EXT_CCA for extension channel
+		 * make register setting relative to default
+		 * from INI file & cap value
+		 */
+		value2 = cycpwrThr1_table[level] -
+			 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+			 aniState->iniDef.cycpwrThr1Ext;
+		if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+			value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+		if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+			value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+		REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+			      AR_PHY_EXT_CYCPWR_THR1, value2);
+
+		if (level != aniState->spurImmunityLevel) {
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: level %d=>%d[def:%d] "
+				  "cycpwrThr1[level]=%d ini=%d\n",
+				  chan->channel,
+				  aniState->spurImmunityLevel,
+				  level,
+				  ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+				  value,
+				  aniState->iniDef.cycpwrThr1);
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: level %d=>%d[def:%d] "
+				  "cycpwrThr1Ext[level]=%d ini=%d\n",
+				  chan->channel,
+				  aniState->spurImmunityLevel,
+				  level,
+				  ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+				  value2,
+				  aniState->iniDef.cycpwrThr1Ext);
+			if (level > aniState->spurImmunityLevel)
+				ah->stats.ast_ani_spurup++;
+			else if (level < aniState->spurImmunityLevel)
+				ah->stats.ast_ani_spurdown++;
+			aniState->spurImmunityLevel = level;
+		}
 		break;
 	}
+	case ATH9K_ANI_MRC_CCK:{
+		/*
+		 * is_on == 1 means MRC CCK ON (default, less noise imm)
+		 * is_on == 0 means MRC CCK is OFF (more noise imm)
+		 */
+		bool is_on = param ? 1 : 0;
+		REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+			      AR_PHY_MRC_CCK_ENABLE, is_on);
+		REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+			      AR_PHY_MRC_CCK_MUX_REG, is_on);
+		if (!is_on != aniState->mrcCCKOff) {
+			ath_print(common, ATH_DBG_ANI,
+				  "** ch %d: MRC CCK: %s=>%s\n",
+				  chan->channel,
+				  !aniState->mrcCCKOff ? "on" : "off",
+				  is_on ? "on" : "off");
+		if (is_on)
+			ah->stats.ast_ani_ccklow++;
+		else
+			ah->stats.ast_ani_cckhigh++;
+		aniState->mrcCCKOff = !is_on;
+		}
+	break;
+	}
 	case ATH9K_ANI_PRESENT:
 		break;
 	default:
@@ -880,25 +999,19 @@
 		return false;
 	}
 
-	ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
 	ath_print(common, ATH_DBG_ANI,
-		  "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
-		  "ofdmWeakSigDetectOff=%d\n",
-		  aniState->noiseImmunityLevel,
+		  "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
+		  "MRCcck=%s listenTime=%d CC=%d listen=%d "
+		  "ofdmErrs=%d cckErrs=%d\n",
 		  aniState->spurImmunityLevel,
-		  !aniState->ofdmWeakSigDetectOff);
-	ath_print(common, ATH_DBG_ANI,
-		  "cckWeakSigThreshold=%d, "
-		  "firstepLevel=%d, listenTime=%d\n",
-		  aniState->cckWeakSigThreshold,
+		  !aniState->ofdmWeakSigDetectOff ? "on" : "off",
 		  aniState->firstepLevel,
-		  aniState->listenTime);
-	ath_print(common, ATH_DBG_ANI,
-		"cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
-		aniState->cycleCount,
-		aniState->ofdmPhyErrCount,
-		aniState->cckPhyErrCount);
-
+		  !aniState->mrcCCKOff ? "on" : "off",
+		  aniState->listenTime,
+		  aniState->cycleCount,
+		  aniState->listenTime,
+		  aniState->ofdmPhyErrCount,
+		  aniState->cckPhyErrCount);
 	return true;
 }
 
@@ -1111,6 +1224,70 @@
 	}
 }
 
+/*
+ * Initialize the ANI register values with default (ini) values.
+ * This routine is called during a (full) hardware reset after
+ * all the registers are initialised from the INI.
+ */
+static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+	struct ar5416AniState *aniState;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_channel *chan = ah->curchan;
+	struct ath9k_ani_default *iniDef;
+	int index;
+	u32 val;
+
+	index = ath9k_hw_get_ani_channel_idx(ah, chan);
+	aniState = &ah->ani[index];
+	ah->curani = aniState;
+	iniDef = &aniState->iniDef;
+
+	ath_print(common, ATH_DBG_ANI,
+		  "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+		  ah->hw_version.macVersion,
+		  ah->hw_version.macRev,
+		  ah->opmode,
+		  chan->channel,
+		  chan->channelFlags);
+
+	val = REG_READ(ah, AR_PHY_SFCORR);
+	iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
+	iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
+	iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
+
+	val = REG_READ(ah, AR_PHY_SFCORR_LOW);
+	iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
+	iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
+	iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
+
+	val = REG_READ(ah, AR_PHY_SFCORR_EXT);
+	iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
+	iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
+	iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
+	iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
+	iniDef->firstep = REG_READ_FIELD(ah,
+					 AR_PHY_FIND_SIG,
+					 AR_PHY_FIND_SIG_FIRSTEP);
+	iniDef->firstepLow = REG_READ_FIELD(ah,
+					    AR_PHY_FIND_SIG_LOW,
+					    AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW);
+	iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
+					    AR_PHY_TIMING5,
+					    AR_PHY_TIMING5_CYCPWR_THR1);
+	iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
+					       AR_PHY_EXT_CCA,
+					       AR_PHY_EXT_CYCPWR_THR1);
+
+	/* these levels just got reset to defaults by the INI */
+	aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+	aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+	aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
+	aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
+
+	aniState->cycleCount = 0;
+}
+
 void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 {
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1131,4 +1308,124 @@
 	priv_ops->ani_control = ar9003_hw_ani_control;
 	priv_ops->do_getnf = ar9003_hw_do_getnf;
 	priv_ops->loadnf = ar9003_hw_loadnf;
+	priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
 }
+
+void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 idle_tmo_ms = ah->bb_watchdog_timeout_ms;
+	u32 val, idle_count;
+
+	if (!idle_tmo_ms) {
+		/* disable IRQ, disable chip-reset for BB panic */
+		REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
+			  REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) &
+			  ~(AR_PHY_WATCHDOG_RST_ENABLE |
+			    AR_PHY_WATCHDOG_IRQ_ENABLE));
+
+		/* disable watchdog in non-IDLE mode, disable in IDLE mode */
+		REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
+			  REG_READ(ah, AR_PHY_WATCHDOG_CTL_1) &
+			  ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
+			    AR_PHY_WATCHDOG_IDLE_ENABLE));
+
+		ath_print(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
+		return;
+	}
+
+	/* enable IRQ, disable chip-reset for BB watchdog */
+	val = REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) & AR_PHY_WATCHDOG_CNTL2_MASK;
+	REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
+		  (val | AR_PHY_WATCHDOG_IRQ_ENABLE) &
+		  ~AR_PHY_WATCHDOG_RST_ENABLE);
+
+	/* bound limit to 10 secs */
+	if (idle_tmo_ms > 10000)
+		idle_tmo_ms = 10000;
+
+	/*
+	 * The time unit for watchdog event is 2^15 44/88MHz cycles.
+	 *
+	 * For HT20 we have a time unit of 2^15/44 MHz = .74 ms per tick
+	 * For HT40 we have a time unit of 2^15/88 MHz = .37 ms per tick
+	 *
+	 * Given we use fast clock now in 5 GHz, these time units should
+	 * be common for both 2 GHz and 5 GHz.
+	 */
+	idle_count = (100 * idle_tmo_ms) / 74;
+	if (ah->curchan && IS_CHAN_HT40(ah->curchan))
+		idle_count = (100 * idle_tmo_ms) / 37;
+
+	/*
+	 * enable watchdog in non-IDLE mode, disable in IDLE mode,
+	 * set idle time-out.
+	 */
+	REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
+		  AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
+		  AR_PHY_WATCHDOG_IDLE_MASK |
+		  (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
+
+	ath_print(common, ATH_DBG_RESET,
+		  "Enabled BB Watchdog timeout (%u ms)\n",
+		  idle_tmo_ms);
+}
+
+void ar9003_hw_bb_watchdog_read(struct ath_hw *ah)
+{
+	/*
+	 * we want to avoid printing in ISR context so we save the
+	 * watchdog status to be printed later in bottom half context.
+	 */
+	ah->bb_watchdog_last_status = REG_READ(ah, AR_PHY_WATCHDOG_STATUS);
+
+	/*
+	 * the watchdog timer should reset on status read but to be sure
+	 * sure we write 0 to the watchdog status bit.
+	 */
+	REG_WRITE(ah, AR_PHY_WATCHDOG_STATUS,
+		  ah->bb_watchdog_last_status & ~AR_PHY_WATCHDOG_STATUS_CLR);
+}
+
+void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 rxc_pcnt = 0, rxf_pcnt = 0, txf_pcnt = 0, status;
+
+	if (likely(!(common->debug_mask & ATH_DBG_RESET)))
+		return;
+
+	status = ah->bb_watchdog_last_status;
+	ath_print(common, ATH_DBG_RESET,
+		  "\n==== BB update: BB status=0x%08x ====\n", status);
+	ath_print(common, ATH_DBG_RESET,
+		  "** BB state: wd=%u det=%u rdar=%u rOFDM=%d "
+		  "rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
+		  MS(status, AR_PHY_WATCHDOG_INFO),
+		  MS(status, AR_PHY_WATCHDOG_DET_HANG),
+		  MS(status, AR_PHY_WATCHDOG_RADAR_SM),
+		  MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM),
+		  MS(status, AR_PHY_WATCHDOG_RX_CCK_SM),
+		  MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM),
+		  MS(status, AR_PHY_WATCHDOG_TX_CCK_SM),
+		  MS(status, AR_PHY_WATCHDOG_AGC_SM),
+		  MS(status,AR_PHY_WATCHDOG_SRCH_SM));
+
+	ath_print(common, ATH_DBG_RESET,
+		  "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
+		  REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
+		  REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
+	ath_print(common, ATH_DBG_RESET,
+		  "** BB mode: BB_gen_controls=0x%08x **\n",
+		  REG_READ(ah, AR_PHY_GEN_CTRL));
+
+	if (ath9k_hw_GetMibCycleCountsPct(ah, &rxc_pcnt, &rxf_pcnt, &txf_pcnt))
+		ath_print(common, ATH_DBG_RESET,
+			  "** BB busy times: rx_clear=%d%%, "
+			  "rx_frame=%d%%, tx_frame=%d%% **\n",
+			  rxc_pcnt, rxf_pcnt, txf_pcnt);
+
+	ath_print(common, ATH_DBG_RESET,
+		  "==== BB update: done ====\n\n");
+}
+EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index f08cc8b..3394dfe 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -149,6 +149,8 @@
 #define AR_PHY_EXT_CCA_THRESH62_S       16
 #define AR_PHY_EXT_MINCCA_PWR   0x01FF0000
 #define AR_PHY_EXT_MINCCA_PWR_S 16
+#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L
+#define AR_PHY_EXT_CYCPWR_THR1_S 9
 #define AR_PHY_TIMING5_CYCPWR_THR1  0x000000FE
 #define AR_PHY_TIMING5_CYCPWR_THR1_S    1
 #define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE  0x00000001
@@ -283,6 +285,12 @@
 #define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ                           0x1ffffe00
 #define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S                                  9
 
+#define AR_PHY_MRC_CCK_CTRL         (AR_AGC_BASE + 0x1d0)
+#define AR_PHY_MRC_CCK_ENABLE       0x00000001
+#define AR_PHY_MRC_CCK_ENABLE_S              0
+#define AR_PHY_MRC_CCK_MUX_REG      0x00000002
+#define AR_PHY_MRC_CCK_MUX_REG_S             1
+
 #define AR_PHY_RX_OCGAIN        (AR_AGC_BASE + 0x200)
 
 #define AR_PHY_CCA_NOM_VAL_9300_2GHZ          -110
@@ -451,7 +459,11 @@
 #define AR_PHY_TSTDAC            (AR_SM_BASE + 0x168)
 
 #define AR_PHY_CHAN_STATUS       (AR_SM_BASE + 0x16c)
-#define AR_PHY_CHAN_INFO_MEMORY  (AR_SM_BASE + 0x170)
+
+#define AR_PHY_CHAN_INFO_MEMORY				(AR_SM_BASE + 0x170)
+#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ	0x00000008
+#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S	3
+
 #define AR_PHY_CHNINFO_NOISEPWR  (AR_SM_BASE + 0x174)
 #define AR_PHY_CHNINFO_GAINDIFF  (AR_SM_BASE + 0x178)
 #define AR_PHY_CHNINFO_FINETIM   (AR_SM_BASE + 0x17c)
@@ -467,30 +479,86 @@
 #define AR_PHY_PWRTX_MAX         (AR_SM_BASE + 0x1f0)
 #define AR_PHY_POWER_TX_SUB      (AR_SM_BASE + 0x1f4)
 
-#define AR_PHY_TPC_4_B0          (AR_SM_BASE + 0x204)
-#define AR_PHY_TPC_5_B0          (AR_SM_BASE + 0x208)
-#define AR_PHY_TPC_6_B0          (AR_SM_BASE + 0x20c)
-#define AR_PHY_TPC_11_B0         (AR_SM_BASE + 0x220)
-#define AR_PHY_TPC_18            (AR_SM_BASE + 0x23c)
-#define AR_PHY_TPC_19            (AR_SM_BASE + 0x240)
+#define AR_PHY_TPC_1				(AR_SM_BASE + 0x1f8)
+#define AR_PHY_TPC_1_FORCED_DAC_GAIN		0x0000003e
+#define AR_PHY_TPC_1_FORCED_DAC_GAIN_S		1
+#define AR_PHY_TPC_1_FORCE_DAC_GAIN		0x00000001
+#define AR_PHY_TPC_1_FORCE_DAC_GAIN_S		0
 
-#define AR_PHY_TX_FORCED_GAIN    (AR_SM_BASE + 0x258)
+#define AR_PHY_TPC_4_B0				(AR_SM_BASE + 0x204)
+#define AR_PHY_TPC_5_B0				(AR_SM_BASE + 0x208)
+#define AR_PHY_TPC_6_B0				(AR_SM_BASE + 0x20c)
+
+#define AR_PHY_TPC_11_B0			(AR_SM_BASE + 0x220)
+#define AR_PHY_TPC_11_B1			(AR_SM1_BASE + 0x220)
+#define AR_PHY_TPC_11_B2			(AR_SM2_BASE + 0x220)
+#define AR_PHY_TPC_11_OLPC_GAIN_DELTA		0x00ff0000
+#define AR_PHY_TPC_11_OLPC_GAIN_DELTA_S		16
+
+#define AR_PHY_TPC_12				(AR_SM_BASE + 0x224)
+#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5	0x3e000000
+#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5_S	25
+
+#define AR_PHY_TPC_18				(AR_SM_BASE + 0x23c)
+#define AR_PHY_TPC_18_THERM_CAL_VALUE           0x000000ff
+#define AR_PHY_TPC_18_THERM_CAL_VALUE_S         0
+#define AR_PHY_TPC_18_VOLT_CAL_VALUE		0x0000ff00
+#define AR_PHY_TPC_18_VOLT_CAL_VALUE_S		8
+
+#define AR_PHY_TPC_19				(AR_SM_BASE + 0x240)
+#define AR_PHY_TPC_19_ALPHA_VOLT		0x001f0000
+#define AR_PHY_TPC_19_ALPHA_VOLT_S		16
+#define AR_PHY_TPC_19_ALPHA_THERM		0xff
+#define AR_PHY_TPC_19_ALPHA_THERM_S		0
+
+#define AR_PHY_TX_FORCED_GAIN				(AR_SM_BASE + 0x258)
+#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN		0x00000001
+#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN_S		0
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN	0x0000000e
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_S	1
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN	0x00000030
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_S	4
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN		0x000003c0
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN_S	6
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA		0x00003c00
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA_S		10
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB		0x0003c000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB_S		14
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC		0x003c0000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC_S		18
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND		0x00c00000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND_S		22
+#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL		0x01000000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL_S	24
+
 
 #define AR_PHY_PDADC_TAB_0       (AR_SM_BASE + 0x280)
 
+#define AR_PHY_TXGAIN_TABLE      (AR_SM_BASE + 0x300)
+
 #define AR_PHY_TX_IQCAL_CONTROL_1   (AR_SM_BASE + 0x448)
 #define AR_PHY_TX_IQCAL_START       (AR_SM_BASE + 0x440)
 #define AR_PHY_TX_IQCAL_STATUS_B0   (AR_SM_BASE + 0x48c)
 #define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0    (AR_SM_BASE + 0x450)
 
-#define AR_PHY_PANIC_WD_STATUS      (AR_SM_BASE + 0x5c0)
-#define AR_PHY_PANIC_WD_CTL_1       (AR_SM_BASE + 0x5c4)
-#define AR_PHY_PANIC_WD_CTL_2       (AR_SM_BASE + 0x5c8)
-#define AR_PHY_BT_CTL               (AR_SM_BASE + 0x5cc)
+#define AR_PHY_WATCHDOG_STATUS      (AR_SM_BASE + 0x5c0)
+#define AR_PHY_WATCHDOG_CTL_1       (AR_SM_BASE + 0x5c4)
+#define AR_PHY_WATCHDOG_CTL_2       (AR_SM_BASE + 0x5c8)
+#define AR_PHY_WATCHDOG_CTL         (AR_SM_BASE + 0x5cc)
 #define AR_PHY_ONLY_WARMRESET       (AR_SM_BASE + 0x5d0)
 #define AR_PHY_ONLY_CTL             (AR_SM_BASE + 0x5d4)
 #define AR_PHY_ECO_CTRL             (AR_SM_BASE + 0x5dc)
-#define AR_PHY_BB_THERM_ADC_1       (AR_SM_BASE + 0x248)
+
+#define AR_PHY_BB_THERM_ADC_1				(AR_SM_BASE + 0x248)
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM		0x000000ff
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S		0
+
+#define AR_PHY_BB_THERM_ADC_4				(AR_SM_BASE + 0x254)
+#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE	0x000000ff
+#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S	0
+#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE		0x0000ff00
+#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S	8
+
 
 #define AR_PHY_65NM_CH0_SYNTH4      0x1608c
 #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT   0x00000002
@@ -660,17 +728,9 @@
 #define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE      0x00003fff
 #define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S    0
 
-#define AR_PHY_TPC_18_THERM_CAL_VALUE           0xff
-#define AR_PHY_TPC_18_THERM_CAL_VALUE_S         0
-#define AR_PHY_TPC_19_ALPHA_THERM               0xff
-#define AR_PHY_TPC_19_ALPHA_THERM_S             0
-
 #define AR_PHY_65NM_CH0_RXTX4_THERM_ON          0x10000000
 #define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S        28
 
-#define AR_PHY_BB_THERM_ADC_1_INIT_THERM        0x000000ff
-#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S      0
-
 /*
  * Channel 1 Register Map
  */
@@ -812,35 +872,173 @@
 #define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
 #define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
 
-#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001
-#define AR_PHY_BB_PANIC_IDLE_ENABLE     0x00000002
-#define AR_PHY_BB_PANIC_IDLE_MASK       0xFFFF0000
-#define AR_PHY_BB_PANIC_NON_IDLE_MASK   0x0000FFFC
+#define AR_PHY_WATCHDOG_NON_IDLE_ENABLE    0x00000001
+#define AR_PHY_WATCHDOG_IDLE_ENABLE        0x00000002
+#define AR_PHY_WATCHDOG_IDLE_MASK          0xFFFF0000
+#define AR_PHY_WATCHDOG_NON_IDLE_MASK      0x0000FFFC
 
-#define AR_PHY_BB_PANIC_RST_ENABLE      0x00000002
-#define AR_PHY_BB_PANIC_IRQ_ENABLE      0x00000004
-#define AR_PHY_BB_PANIC_CNTL2_MASK      0xFFFFFFF9
+#define AR_PHY_WATCHDOG_RST_ENABLE         0x00000002
+#define AR_PHY_WATCHDOG_IRQ_ENABLE         0x00000004
+#define AR_PHY_WATCHDOG_CNTL2_MASK         0xFFFFFFF9
 
-#define AR_PHY_BB_WD_STATUS             0x00000007
-#define AR_PHY_BB_WD_STATUS_S           0
-#define AR_PHY_BB_WD_DET_HANG           0x00000008
-#define AR_PHY_BB_WD_DET_HANG_S         3
-#define AR_PHY_BB_WD_RADAR_SM           0x000000F0
-#define AR_PHY_BB_WD_RADAR_SM_S         4
-#define AR_PHY_BB_WD_RX_OFDM_SM         0x00000F00
-#define AR_PHY_BB_WD_RX_OFDM_SM_S       8
-#define AR_PHY_BB_WD_RX_CCK_SM          0x0000F000
-#define AR_PHY_BB_WD_RX_CCK_SM_S        12
-#define AR_PHY_BB_WD_TX_OFDM_SM         0x000F0000
-#define AR_PHY_BB_WD_TX_OFDM_SM_S       16
-#define AR_PHY_BB_WD_TX_CCK_SM          0x00F00000
-#define AR_PHY_BB_WD_TX_CCK_SM_S        20
-#define AR_PHY_BB_WD_AGC_SM             0x0F000000
-#define AR_PHY_BB_WD_AGC_SM_S           24
-#define AR_PHY_BB_WD_SRCH_SM            0xF0000000
-#define AR_PHY_BB_WD_SRCH_SM_S          28
+#define AR_PHY_WATCHDOG_INFO               0x00000007
+#define AR_PHY_WATCHDOG_INFO_S             0
+#define AR_PHY_WATCHDOG_DET_HANG           0x00000008
+#define AR_PHY_WATCHDOG_DET_HANG_S         3
+#define AR_PHY_WATCHDOG_RADAR_SM           0x000000F0
+#define AR_PHY_WATCHDOG_RADAR_SM_S         4
+#define AR_PHY_WATCHDOG_RX_OFDM_SM         0x00000F00
+#define AR_PHY_WATCHDOG_RX_OFDM_SM_S       8
+#define AR_PHY_WATCHDOG_RX_CCK_SM          0x0000F000
+#define AR_PHY_WATCHDOG_RX_CCK_SM_S        12
+#define AR_PHY_WATCHDOG_TX_OFDM_SM         0x000F0000
+#define AR_PHY_WATCHDOG_TX_OFDM_SM_S       16
+#define AR_PHY_WATCHDOG_TX_CCK_SM          0x00F00000
+#define AR_PHY_WATCHDOG_TX_CCK_SM_S        20
+#define AR_PHY_WATCHDOG_AGC_SM             0x0F000000
+#define AR_PHY_WATCHDOG_AGC_SM_S           24
+#define AR_PHY_WATCHDOG_SRCH_SM            0xF0000000
+#define AR_PHY_WATCHDOG_SRCH_SM_S          28
 
-#define AR_PHY_BB_WD_STATUS_CLR         0x00000008
+#define AR_PHY_WATCHDOG_STATUS_CLR         0x00000008
+
+/*
+ * PAPRD registers
+ */
+#define AR_PHY_XPA_TIMING_CTL		(AR_SM_BASE + 0x64)
+
+#define AR_PHY_PAPRD_AM2AM		(AR_CHAN_BASE + 0xe4)
+#define AR_PHY_PAPRD_AM2AM_MASK		0x01ffffff
+#define AR_PHY_PAPRD_AM2AM_MASK_S	0
+
+#define AR_PHY_PAPRD_AM2PM		(AR_CHAN_BASE + 0xe8)
+#define AR_PHY_PAPRD_AM2PM_MASK		0x01ffffff
+#define AR_PHY_PAPRD_AM2PM_MASK_S	0
+
+#define AR_PHY_PAPRD_HT40		(AR_CHAN_BASE + 0xec)
+#define AR_PHY_PAPRD_HT40_MASK		0x01ffffff
+#define AR_PHY_PAPRD_HT40_MASK_S	0
+
+#define AR_PHY_PAPRD_CTRL0_B0				(AR_CHAN_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_B1				(AR_CHAN1_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_B2				(AR_CHAN2_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE			0x00000001
+#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE_S		0
+#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK	0x00000002
+#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK_S	1
+#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH		0xf8000000
+#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH_S		27
+
+#define AR_PHY_PAPRD_CTRL1_B0				(AR_CHAN_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_B1				(AR_CHAN1_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_B2				(AR_CHAN2_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA		0x00000001
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA_S	0
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE	0x00000002
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE_S	1
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE	0x00000004
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE_S	2
+#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL	0x000001f8
+#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_S	3
+#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK	0x0001fe00
+#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK_S	9
+#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT		0x0ffe0000
+#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S	17
+
+#define AR_PHY_PAPRD_TRAINER_CNTL1				(AR_SM_BASE + 0x490)
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE	0x00000001
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S	0
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING	0x0000007e
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_S	1
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE	0x00000100
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_S	8
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE	0x00000200
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_S	9
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE	0x00000400
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_S	10
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE		0x00000800
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_S		11
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP		0x0003f000
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S		12
+
+#define AR_PHY_PAPRD_TRAINER_CNTL2				(AR_SM_BASE + 0x494)
+#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN	0xFFFFFFFF
+#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S	0
+
+#define AR_PHY_PAPRD_TRAINER_CNTL3				(AR_SM_BASE + 0x498)
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE	0x0000003f
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S	0
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP		0x00000fc0
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_S	6
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL	0x0001f000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_S	12
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES	0x000e0000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_S	17
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN	0x00f00000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_S	20
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN	0x0f000000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_S	24
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE	0x20000000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S	29
+
+#define AR_PHY_PAPRD_TRAINER_CNTL4				(AR_SM_BASE + 0x49c)
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES	0x03ff0000
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S	16
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA	0x0000f000
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_S	12
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR		0x00000fff
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_S		0
+
+#define AR_PHY_PAPRD_PRE_POST_SCALE_0_B0			(AR_CHAN_BASE + 0x100)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_1_B0			(AR_CHAN_BASE + 0x104)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_2_B0			(AR_CHAN_BASE + 0x108)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_3_B0			(AR_CHAN_BASE + 0x10c)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_4_B0			(AR_CHAN_BASE + 0x110)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_5_B0			(AR_CHAN_BASE + 0x114)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_6_B0			(AR_CHAN_BASE + 0x118)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_7_B0			(AR_CHAN_BASE + 0x11c)
+#define AR_PHY_PAPRD_PRE_POST_SCALING				0x3FFFF
+#define AR_PHY_PAPRD_PRE_POST_SCALING_S				0
+
+#define AR_PHY_PAPRD_TRAINER_STAT1				(AR_SM_BASE + 0x4a0)
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE		0x00000001
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S		0
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE	0x00000002
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_S	1
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR		0x00000004
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_S		2
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE		0x00000008
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_S		3
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX		0x000001f0
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_S		4
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR		0x0001fe00
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S		9
+
+#define AR_PHY_PAPRD_TRAINER_STAT2				(AR_SM_BASE + 0x4a4)
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL		0x0000ffff
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S		0
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX		0x001f0000
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_S		16
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX		0x00600000
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S		21
+
+#define AR_PHY_PAPRD_TRAINER_STAT3				(AR_SM_BASE + 0x4a8)
+#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT	0x000fffff
+#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S	0
+
+#define AR_PHY_PAPRD_MEM_TAB_B0			(AR_CHAN_BASE + 0x120)
+#define AR_PHY_PAPRD_MEM_TAB_B1			(AR_CHAN1_BASE + 0x120)
+#define AR_PHY_PAPRD_MEM_TAB_B2			(AR_CHAN2_BASE + 0x120)
+
+#define AR_PHY_PA_GAIN123_B0			(AR_CHAN_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_B1			(AR_CHAN1_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_B2			(AR_CHAN2_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_PA_GAIN1		0x3FF
+#define AR_PHY_PA_GAIN123_PA_GAIN1_S		0
+
+#define AR_PHY_POWERTX_RATE5			(AR_SM_BASE + 0x1d0)
+#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0	0x3F
+#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0_S	0
 
 void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
 
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 5ea8773..72d5e52 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -20,6 +20,7 @@
 #include <linux/etherdevice.h>
 #include <linux/device.h>
 #include <linux/leds.h>
+#include <linux/completion.h>
 
 #include "debug.h"
 #include "common.h"
@@ -136,6 +137,8 @@
 #define ATH_MAX_ANTENNA         3
 #define ATH_RXBUF               512
 #define ATH_TXBUF               512
+#define ATH_TXBUF_RESERVE       5
+#define ATH_MAX_QDEPTH          (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
 #define ATH_TXMAXTRY            13
 #define ATH_MGT_TXMAXTRY        4
 
@@ -192,6 +195,7 @@
 
 #define ATH_TXFIFO_DEPTH 8
 struct ath_txq {
+	int axq_class;
 	u32 axq_qnum;
 	u32 *axq_link;
 	struct list_head axq_q;
@@ -206,6 +210,71 @@
 	u8 txq_tailidx;
 };
 
+struct ath_atx_ac {
+	int sched;
+	int qnum;
+	struct list_head list;
+	struct list_head tid_q;
+};
+
+struct ath_buf_state {
+	int bfs_nframes;
+	u16 bfs_al;
+	u16 bfs_frmlen;
+	int bfs_seqno;
+	int bfs_tidno;
+	int bfs_retries;
+	u8 bf_type;
+	u8 bfs_paprd;
+	unsigned long bfs_paprd_timestamp;
+	u32 bfs_keyix;
+	enum ath9k_key_type bfs_keytype;
+};
+
+struct ath_buf {
+	struct list_head list;
+	struct ath_buf *bf_lastbf;	/* last buf of this unit (a frame or
+					   an aggregate) */
+	struct ath_buf *bf_next;	/* next subframe in the aggregate */
+	struct sk_buff *bf_mpdu;	/* enclosing frame structure */
+	void *bf_desc;			/* virtual addr of desc */
+	dma_addr_t bf_daddr;		/* physical addr of desc */
+	dma_addr_t bf_buf_addr;		/* physical addr of data buffer */
+	bool bf_stale;
+	bool bf_isnullfunc;
+	bool bf_tx_aborted;
+	u16 bf_flags;
+	struct ath_buf_state bf_state;
+	dma_addr_t bf_dmacontext;
+	struct ath_wiphy *aphy;
+};
+
+struct ath_atx_tid {
+	struct list_head list;
+	struct list_head buf_q;
+	struct ath_node *an;
+	struct ath_atx_ac *ac;
+	struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
+	u16 seq_start;
+	u16 seq_next;
+	u16 baw_size;
+	int tidno;
+	int baw_head;   /* first un-acked tx buffer */
+	int baw_tail;   /* next unused tx buffer slot */
+	int sched;
+	int paused;
+	u8 state;
+};
+
+struct ath_node {
+	struct ath_common *common;
+	struct ath_atx_tid tid[WME_NUM_TID];
+	struct ath_atx_ac ac[WME_NUM_AC];
+	u16 maxampdu;
+	u8 mpdudensity;
+	int last_rssi;
+};
+
 #define AGGR_CLEANUP         BIT(1)
 #define AGGR_ADDBA_COMPLETE  BIT(2)
 #define AGGR_ADDBA_PROGRESS  BIT(3)
@@ -214,6 +283,7 @@
 	struct ath_txq *txq;
 	int if_id;
 	enum ath9k_internal_frame_type frame_type;
+	u8 paprd;
 };
 
 #define ATH_TX_ERROR        0x01
@@ -223,11 +293,12 @@
 struct ath_tx {
 	u16 seq_no;
 	u32 txqsetup;
-	int hwq_map[ATH9K_WME_AC_VO+1];
+	int hwq_map[WME_NUM_AC];
 	spinlock_t txbuflock;
 	struct list_head txbuf;
 	struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
 	struct ath_descdma txdma;
+	int pending_frames[WME_NUM_AC];
 };
 
 struct ath_rx_edma {
@@ -267,7 +338,6 @@
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
 int ath_tx_init(struct ath_softc *sc, int nbufs);
 void ath_tx_cleanup(struct ath_softc *sc);
-struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
 int ath_txq_update(struct ath_softc *sc, int qnum,
 		   struct ath9k_tx_queue_info *q);
 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -351,10 +421,14 @@
 
 #define ATH_STA_SHORT_CALINTERVAL 1000    /* 1 second */
 #define ATH_AP_SHORT_CALINTERVAL  100     /* 100 ms */
-#define ATH_ANI_POLLINTERVAL      100     /* 100 ms */
+#define ATH_ANI_POLLINTERVAL_OLD  100     /* 100 ms */
+#define ATH_ANI_POLLINTERVAL_NEW  1000    /* 1000 ms */
 #define ATH_LONG_CALINTERVAL      30000   /* 30 seconds */
 #define ATH_RESTART_CALINTERVAL   1200000 /* 20 minutes */
 
+#define ATH_PAPRD_TIMEOUT	100 /* msecs */
+
+void ath_paprd_calibrate(struct work_struct *work);
 void ath_ani_calibrate(unsigned long data);
 
 /**********/
@@ -487,6 +561,8 @@
 	spinlock_t sc_serial_rw;
 	spinlock_t sc_pm_lock;
 	struct mutex mutex;
+	struct work_struct paprd_work;
+	struct completion paprd_complete;
 
 	u32 intrstatus;
 	u32 sc_flags; /* SC_OP_* */
@@ -545,7 +621,6 @@
 
 void ath9k_tasklet(unsigned long data);
 int ath_reset(struct ath_softc *sc, bool retry_tx);
-int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
 int ath_cabq_update(struct ath_softc *);
 
@@ -556,13 +631,12 @@
 
 extern struct ieee80211_ops ath9k_ops;
 extern int modparam_nohwcrypt;
+extern int led_blink;
 
 irqreturn_t ath_isr(int irq, void *dev);
 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
 		    const struct ath_bus_ops *bus_ops);
 void ath9k_deinit_device(struct ath_softc *sc);
-const char *ath_mac_bb_name(u32 mac_bb_version);
-const char *ath_rf_name(u16 rf_version);
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
 void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
 			   struct ath9k_channel *ichan);
@@ -613,8 +687,6 @@
 void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
 void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
 
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
-
 void ath_start_rfkill_poll(struct ath_softc *sc);
 extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
 
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index f43d85a..4d4b22d 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -38,8 +38,7 @@
 		qi.tqi_cwmax = 0;
 	} else {
 		/* Adhoc mode; important thing is to use 2x cwmin. */
-		qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA,
-				       ATH9K_WME_AC_BE);
+		qnum = sc->tx.hwq_map[WME_AC_BE];
 		ath9k_hw_get_txq_props(ah, qnum, &qi_be);
 		qi.tqi_aifs = qi_be.tqi_aifs;
 		qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 7707341..16e2849 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -27,270 +27,6 @@
 MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
 MODULE_LICENSE("Dual BSD/GPL");
 
-/* Common RX processing */
-
-/* Assumes you've already done the endian to CPU conversion */
-static bool ath9k_rx_accept(struct ath_common *common,
-			    struct sk_buff *skb,
-			    struct ieee80211_rx_status *rxs,
-			    struct ath_rx_status *rx_stats,
-			    bool *decrypt_error)
-{
-	struct ath_hw *ah = common->ah;
-	struct ieee80211_hdr *hdr;
-	__le16 fc;
-
-	hdr = (struct ieee80211_hdr *) skb->data;
-	fc = hdr->frame_control;
-
-	if (!rx_stats->rs_datalen)
-		return false;
-        /*
-         * rs_status follows rs_datalen so if rs_datalen is too large
-         * we can take a hint that hardware corrupted it, so ignore
-         * those frames.
-         */
-	if (rx_stats->rs_datalen > common->rx_bufsize)
-		return false;
-
-	/*
-	 * rs_more indicates chained descriptors which can be used
-	 * to link buffers together for a sort of scatter-gather
-	 * operation.
-	 * reject the frame, we don't support scatter-gather yet and
-	 * the frame is probably corrupt anyway
-	 */
-	if (rx_stats->rs_more)
-		return false;
-
-	/*
-	 * The rx_stats->rs_status will not be set until the end of the
-	 * chained descriptors so it can be ignored if rs_more is set. The
-	 * rs_more will be false at the last element of the chained
-	 * descriptors.
-	 */
-	if (rx_stats->rs_status != 0) {
-		if (rx_stats->rs_status & ATH9K_RXERR_CRC)
-			rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
-		if (rx_stats->rs_status & ATH9K_RXERR_PHY)
-			return false;
-
-		if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
-			*decrypt_error = true;
-		} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
-			if (ieee80211_is_ctl(fc))
-				/*
-				 * Sometimes, we get invalid
-				 * MIC failures on valid control frames.
-				 * Remove these mic errors.
-				 */
-				rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
-			else
-				rxs->flag |= RX_FLAG_MMIC_ERROR;
-		}
-		/*
-		 * Reject error frames with the exception of
-		 * decryption and MIC failures. For monitor mode,
-		 * we also ignore the CRC error.
-		 */
-		if (ah->opmode == NL80211_IFTYPE_MONITOR) {
-			if (rx_stats->rs_status &
-			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
-			      ATH9K_RXERR_CRC))
-				return false;
-		} else {
-			if (rx_stats->rs_status &
-			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
-				return false;
-			}
-		}
-	}
-	return true;
-}
-
-static int ath9k_process_rate(struct ath_common *common,
-			      struct ieee80211_hw *hw,
-			      struct ath_rx_status *rx_stats,
-			      struct ieee80211_rx_status *rxs,
-			      struct sk_buff *skb)
-{
-	struct ieee80211_supported_band *sband;
-	enum ieee80211_band band;
-	unsigned int i = 0;
-
-	band = hw->conf.channel->band;
-	sband = hw->wiphy->bands[band];
-
-	if (rx_stats->rs_rate & 0x80) {
-		/* HT rate */
-		rxs->flag |= RX_FLAG_HT;
-		if (rx_stats->rs_flags & ATH9K_RX_2040)
-			rxs->flag |= RX_FLAG_40MHZ;
-		if (rx_stats->rs_flags & ATH9K_RX_GI)
-			rxs->flag |= RX_FLAG_SHORT_GI;
-		rxs->rate_idx = rx_stats->rs_rate & 0x7f;
-		return 0;
-	}
-
-	for (i = 0; i < sband->n_bitrates; i++) {
-		if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
-			rxs->rate_idx = i;
-			return 0;
-		}
-		if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
-			rxs->flag |= RX_FLAG_SHORTPRE;
-			rxs->rate_idx = i;
-			return 0;
-		}
-	}
-
-	/*
-	 * No valid hardware bitrate found -- we should not get here
-	 * because hardware has already validated this frame as OK.
-	 */
-	ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
-		  "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
-	if ((common->debug_mask & ATH_DBG_XMIT))
-		print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
-
-	return -EINVAL;
-}
-
-static void ath9k_process_rssi(struct ath_common *common,
-			       struct ieee80211_hw *hw,
-			       struct sk_buff *skb,
-			       struct ath_rx_status *rx_stats)
-{
-	struct ath_hw *ah = common->ah;
-	struct ieee80211_sta *sta;
-	struct ieee80211_hdr *hdr;
-	struct ath_node *an;
-	int last_rssi = ATH_RSSI_DUMMY_MARKER;
-	__le16 fc;
-
-	hdr = (struct ieee80211_hdr *)skb->data;
-	fc = hdr->frame_control;
-
-	rcu_read_lock();
-	/*
-	 * XXX: use ieee80211_find_sta! This requires quite a bit of work
-	 * under the current ath9k virtual wiphy implementation as we have
-	 * no way of tying a vif to wiphy. Typically vifs are attached to
-	 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
-	 * wiphy you'd have to iterate over every wiphy and each sdata.
-	 */
-	sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
-	if (sta) {
-		an = (struct ath_node *) sta->drv_priv;
-		if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
-		   !rx_stats->rs_moreaggr)
-			ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
-		last_rssi = an->last_rssi;
-	}
-	rcu_read_unlock();
-
-	if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
-		rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
-					      ATH_RSSI_EP_MULTIPLIER);
-	if (rx_stats->rs_rssi < 0)
-		rx_stats->rs_rssi = 0;
-
-	/* Update Beacon RSSI, this is used by ANI. */
-	if (ieee80211_is_beacon(fc))
-		ah->stats.avgbrssi = rx_stats->rs_rssi;
-}
-
-/*
- * For Decrypt or Demic errors, we only mark packet status here and always push
- * up the frame up to let mac80211 handle the actual error case, be it no
- * decryption key or real decryption error. This let us keep statistics there.
- */
-int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
-				struct ieee80211_hw *hw,
-				struct sk_buff *skb,
-				struct ath_rx_status *rx_stats,
-				struct ieee80211_rx_status *rx_status,
-				bool *decrypt_error)
-{
-	struct ath_hw *ah = common->ah;
-
-	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
-
-	/*
-	 * everything but the rate is checked here, the rate check is done
-	 * separately to avoid doing two lookups for a rate for each frame.
-	 */
-	if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
-		return -EINVAL;
-
-	ath9k_process_rssi(common, hw, skb, rx_stats);
-
-	if (ath9k_process_rate(common, hw, rx_stats, rx_status, skb))
-		return -EINVAL;
-
-	rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
-	rx_status->band = hw->conf.channel->band;
-	rx_status->freq = hw->conf.channel->center_freq;
-	rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
-	rx_status->antenna = rx_stats->rs_antenna;
-	rx_status->flag |= RX_FLAG_TSFT;
-
-	return 0;
-}
-EXPORT_SYMBOL(ath9k_cmn_rx_skb_preprocess);
-
-void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
-				  struct sk_buff *skb,
-				  struct ath_rx_status *rx_stats,
-				  struct ieee80211_rx_status *rxs,
-				  bool decrypt_error)
-{
-	struct ath_hw *ah = common->ah;
-	struct ieee80211_hdr *hdr;
-	int hdrlen, padpos, padsize;
-	u8 keyix;
-	__le16 fc;
-
-	/* see if any padding is done by the hw and remove it */
-	hdr = (struct ieee80211_hdr *) skb->data;
-	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-	fc = hdr->frame_control;
-	padpos = ath9k_cmn_padpos(hdr->frame_control);
-
-	/* The MAC header is padded to have 32-bit boundary if the
-	 * packet payload is non-zero. The general calculation for
-	 * padsize would take into account odd header lengths:
-	 * padsize = (4 - padpos % 4) % 4; However, since only
-	 * even-length headers are used, padding can only be 0 or 2
-	 * bytes and we can optimize this a bit. In addition, we must
-	 * not try to remove padding from short control frames that do
-	 * not have payload. */
-	padsize = padpos & 3;
-	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
-		memmove(skb->data + padsize, skb->data, padpos);
-		skb_pull(skb, padsize);
-	}
-
-	keyix = rx_stats->rs_keyix;
-
-	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
-	    ieee80211_has_protected(fc)) {
-		rxs->flag |= RX_FLAG_DECRYPTED;
-	} else if (ieee80211_has_protected(fc)
-		   && !decrypt_error && skb->len >= hdrlen + 4) {
-		keyix = skb->data[hdrlen + 3] >> 6;
-
-		if (test_bit(keyix, common->keymap))
-			rxs->flag |= RX_FLAG_DECRYPTED;
-	}
-	if (ah->sw_mgmt_crypto &&
-	    (rxs->flag & RX_FLAG_DECRYPTED) &&
-	    ieee80211_is_mgmt(fc))
-		/* Use software decrypt for management frames. */
-		rxs->flag &= ~RX_FLAG_DECRYPTED;
-}
-EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
-
 int ath9k_cmn_padpos(__le16 frame_control)
 {
 	int padpos = 24;
@@ -475,10 +211,14 @@
 	return -1;
 }
 
-static int ath_reserve_key_cache_slot(struct ath_common *common)
+static int ath_reserve_key_cache_slot(struct ath_common *common,
+				      enum ieee80211_key_alg alg)
 {
 	int i;
 
+	if (alg == ALG_TKIP)
+		return ath_reserve_key_cache_slot_tkip(common);
+
 	/* First, try to find slots that would not be available for TKIP. */
 	if (common->splitmic) {
 		for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
@@ -547,6 +287,7 @@
 	struct ath_hw *ah = common->ah;
 	struct ath9k_keyval hk;
 	const u8 *mac = NULL;
+	u8 gmac[ETH_ALEN];
 	int ret = 0;
 	int idx;
 
@@ -570,9 +311,23 @@
 	memcpy(hk.kv_val, key->key, key->keylen);
 
 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
-		/* For now, use the default keys for broadcast keys. This may
-		 * need to change with virtual interfaces. */
-		idx = key->keyidx;
+		switch (vif->type) {
+		case NL80211_IFTYPE_AP:
+			memcpy(gmac, vif->addr, ETH_ALEN);
+			gmac[0] |= 0x01;
+			mac = gmac;
+			idx = ath_reserve_key_cache_slot(common, key->alg);
+			break;
+		case NL80211_IFTYPE_ADHOC:
+			memcpy(gmac, sta->addr, ETH_ALEN);
+			gmac[0] |= 0x01;
+			mac = gmac;
+			idx = ath_reserve_key_cache_slot(common, key->alg);
+			break;
+		default:
+			idx = key->keyidx;
+			break;
+		}
 	} else if (key->keyidx) {
 		if (WARN_ON(!sta))
 			return -EOPNOTSUPP;
@@ -589,14 +344,12 @@
 			return -EOPNOTSUPP;
 		mac = sta->addr;
 
-		if (key->alg == ALG_TKIP)
-			idx = ath_reserve_key_cache_slot_tkip(common);
-		else
-			idx = ath_reserve_key_cache_slot(common);
-		if (idx < 0)
-			return -ENOSPC; /* no free key cache entries */
+		idx = ath_reserve_key_cache_slot(common, key->alg);
 	}
 
+	if (idx < 0)
+		return -ENOSPC; /* no free key cache entries */
+
 	if (key->alg == ALG_TKIP)
 		ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
 				      vif->type == NL80211_IFTYPE_AP);
@@ -644,6 +397,19 @@
 }
 EXPORT_SYMBOL(ath9k_cmn_key_delete);
 
+int ath9k_cmn_count_streams(unsigned int chainmask, int max)
+{
+	int streams = 0;
+
+	do {
+		if (++streams == max)
+			break;
+	} while ((chainmask = chainmask & (chainmask - 1)));
+
+	return streams;
+}
+EXPORT_SYMBOL(ath9k_cmn_count_streams);
+
 static int __init ath9k_cmn_init(void)
 {
 	return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index e08f7e5..97809d3 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -52,82 +52,6 @@
 #define ATH_EP_RND(x, mul) 						\
 	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
 
-struct ath_atx_ac {
-	int sched;
-	int qnum;
-	struct list_head list;
-	struct list_head tid_q;
-};
-
-struct ath_buf_state {
-	int bfs_nframes;
-	u16 bfs_al;
-	u16 bfs_frmlen;
-	int bfs_seqno;
-	int bfs_tidno;
-	int bfs_retries;
-	u8 bf_type;
-	u32 bfs_keyix;
-	enum ath9k_key_type bfs_keytype;
-};
-
-struct ath_buf {
-	struct list_head list;
-	struct ath_buf *bf_lastbf;	/* last buf of this unit (a frame or
-					   an aggregate) */
-	struct ath_buf *bf_next;	/* next subframe in the aggregate */
-	struct sk_buff *bf_mpdu;	/* enclosing frame structure */
-	void *bf_desc;			/* virtual addr of desc */
-	dma_addr_t bf_daddr;		/* physical addr of desc */
-	dma_addr_t bf_buf_addr;		/* physical addr of data buffer */
-	bool bf_stale;
-	bool bf_isnullfunc;
-	bool bf_tx_aborted;
-	u16 bf_flags;
-	struct ath_buf_state bf_state;
-	dma_addr_t bf_dmacontext;
-	struct ath_wiphy *aphy;
-};
-
-struct ath_atx_tid {
-	struct list_head list;
-	struct list_head buf_q;
-	struct ath_node *an;
-	struct ath_atx_ac *ac;
-	struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
-	u16 seq_start;
-	u16 seq_next;
-	u16 baw_size;
-	int tidno;
-	int baw_head;   /* first un-acked tx buffer */
-	int baw_tail;   /* next unused tx buffer slot */
-	int sched;
-	int paused;
-	u8 state;
-};
-
-struct ath_node {
-	struct ath_common *common;
-	struct ath_atx_tid tid[WME_NUM_TID];
-	struct ath_atx_ac ac[WME_NUM_AC];
-	u16 maxampdu;
-	u8 mpdudensity;
-	int last_rssi;
-};
-
-int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
-				struct ieee80211_hw *hw,
-				struct sk_buff *skb,
-				struct ath_rx_status *rx_stats,
-				struct ieee80211_rx_status *rx_status,
-				bool *decrypt_error);
-
-void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
-				  struct sk_buff *skb,
-				  struct ath_rx_status *rx_stats,
-				  struct ieee80211_rx_status *rxs,
-				  bool decrypt_error);
-
 int ath9k_cmn_padpos(__le16 frame_control);
 int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
 void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
@@ -140,3 +64,4 @@
 			 struct ieee80211_key_conf *key);
 void ath9k_cmn_key_delete(struct ath_common *common,
 			  struct ieee80211_key_conf *key);
+int ath9k_cmn_count_streams(unsigned int chainmask, int max);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 29898f8..54aae93 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -42,7 +42,7 @@
 	char buf[32];
 	unsigned int len;
 
-	len = snprintf(buf, sizeof(buf), "0x%08x\n", common->debug_mask);
+	len = sprintf(buf, "0x%08x\n", common->debug_mask);
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -57,7 +57,7 @@
 
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
-		return -EINVAL;
+		return -EFAULT;
 
 	buf[len] = '\0';
 	if (strict_strtoul(buf, 0, &mask))
@@ -86,7 +86,7 @@
 	char buf[32];
 	unsigned int len;
 
-	len = snprintf(buf, sizeof(buf), "0x%08x\n", common->tx_chainmask);
+	len = sprintf(buf, "0x%08x\n", common->tx_chainmask);
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -101,7 +101,7 @@
 
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
-		return -EINVAL;
+		return -EFAULT;
 
 	buf[len] = '\0';
 	if (strict_strtoul(buf, 0, &mask))
@@ -128,7 +128,7 @@
 	char buf[32];
 	unsigned int len;
 
-	len = snprintf(buf, sizeof(buf), "0x%08x\n", common->rx_chainmask);
+	len = sprintf(buf, "0x%08x\n", common->rx_chainmask);
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -143,7 +143,7 @@
 
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
-		return -EINVAL;
+		return -EFAULT;
 
 	buf[len] = '\0';
 	if (strict_strtoul(buf, 0, &mask))
@@ -176,7 +176,7 @@
 
 	buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
 	if (!buf)
-		return 0;
+		return -ENOMEM;
 
 	ath9k_ps_wakeup(sc);
 
@@ -248,6 +248,9 @@
 
 	ath9k_ps_restore(sc);
 
+	if (len > DMA_BUF_LEN)
+		len = DMA_BUF_LEN;
+
 	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
 	kfree(buf);
 	return retval;
@@ -269,6 +272,8 @@
 			sc->debug.stats.istats.rxlp++;
 		if (status & ATH9K_INT_RXHP)
 			sc->debug.stats.istats.rxhp++;
+		if (status & ATH9K_INT_BB_WATCHDOG)
+			sc->debug.stats.istats.bb_watchdog++;
 	} else {
 		if (status & ATH9K_INT_RX)
 			sc->debug.stats.istats.rxok++;
@@ -319,6 +324,9 @@
 			"%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
 		len += snprintf(buf + len, sizeof(buf) - len,
 			"%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
+		len += snprintf(buf + len, sizeof(buf) - len,
+			"%8s: %10u\n", "WATCHDOG",
+			sc->debug.stats.istats.bb_watchdog);
 	} else {
 		len += snprintf(buf + len, sizeof(buf) - len,
 			"%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
@@ -358,6 +366,9 @@
 	len += snprintf(buf + len, sizeof(buf) - len,
 		"%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total);
 
+	if (len > sizeof(buf))
+		len = sizeof(buf);
+
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -397,11 +408,10 @@
 	if (sc->cur_rate_table == NULL)
 		return 0;
 
-	max = 80 + sc->cur_rate_table->rate_cnt * 1024;
-	buf = kmalloc(max + 1, GFP_KERNEL);
+	max = 80 + sc->cur_rate_table->rate_cnt * 1024 + 1;
+	buf = kmalloc(max, GFP_KERNEL);
 	if (buf == NULL)
-		return 0;
-	buf[max] = 0;
+		return -ENOMEM;
 
 	len += sprintf(buf, "%6s %6s %6s "
 		       "%10s %10s %10s %10s\n",
@@ -443,6 +453,9 @@
 			stats->per);
 	}
 
+	if (len > max)
+		len = max;
+
 	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
 	kfree(buf);
 	return retval;
@@ -505,6 +518,9 @@
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"addrmask: %pM\n", addr);
 
+	if (len > sizeof(buf))
+		len = sizeof(buf);
+
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -614,10 +630,10 @@
 	do {								\
 		len += snprintf(buf + len, size - len,			\
 				"%s%13u%11u%10u%10u\n", str,		\
-		sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BE]].elem, \
-		sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BK]].elem, \
-		sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VI]].elem, \
-		sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VO]].elem); \
+		sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \
+		sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \
+		sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \
+		sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \
 } while(0)
 
 static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
@@ -630,7 +646,7 @@
 
 	buf = kzalloc(size, GFP_KERNEL);
 	if (buf == NULL)
-		return 0;
+		return -ENOMEM;
 
 	len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
 
@@ -648,6 +664,9 @@
 	PR("DATA Underrun:   ", data_underrun);
 	PR("DELIM Underrun:  ", delim_underrun);
 
+	if (len > size)
+		len = size;
+
 	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
 	kfree(buf);
 
@@ -700,7 +719,7 @@
 
 	buf = kzalloc(size, GFP_KERNEL);
 	if (buf == NULL)
-		return 0;
+		return -ENOMEM;
 
 	len += snprintf(buf + len, size - len,
 			"%18s : %10u\n", "CRC ERR",
@@ -751,6 +770,9 @@
 	PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
 	PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
 
+	if (len > size)
+		len = size;
+
 	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
 	kfree(buf);
 
@@ -802,7 +824,7 @@
 	char buf[32];
 	unsigned int len;
 
-	len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.regidx);
+	len = sprintf(buf, "0x%08x\n", sc->debug.regidx);
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -816,7 +838,7 @@
 
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
-		return -EINVAL;
+		return -EFAULT;
 
 	buf[len] = '\0';
 	if (strict_strtoul(buf, 0, &regidx))
@@ -843,7 +865,7 @@
 	u32 regval;
 
 	regval = REG_READ_D(ah, sc->debug.regidx);
-	len = snprintf(buf, sizeof(buf), "0x%08x\n", regval);
+	len = sprintf(buf, "0x%08x\n", regval);
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -858,7 +880,7 @@
 
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
-		return -EINVAL;
+		return -EFAULT;
 
 	buf[len] = '\0';
 	if (strict_strtoul(buf, 0, &regval))
@@ -934,6 +956,10 @@
 			sc->debug.debugfs_phy, sc, &fops_regval))
 		goto err;
 
+	if (!debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
+			sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
+		goto err;
+
 	sc->debug.regidx = 0;
 	return 0;
 err:
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 5147b87..5d21704 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -53,6 +53,7 @@
  * @cabend: RX End of CAB traffic
  * @dtimsync: DTIM sync lossage
  * @dtim: RX Beacon with DTIM
+ * @bb_watchdog: Baseband watchdog
  */
 struct ath_interrupt_stats {
 	u32 total;
@@ -76,6 +77,7 @@
 	u32 cabend;
 	u32 dtimsync;
 	u32 dtim;
+	u32 bb_watchdog;
 };
 
 struct ath_rc_stats {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index ca8704a..1266333 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -24,6 +24,14 @@
 	return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
 }
 
+void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
+{
+        REG_WRITE(ah, reg, val);
+
+        if (ah->config.analog_shiftreg)
+		udelay(100);
+}
+
 void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
 			       u32 shift, u32 val)
 {
@@ -250,6 +258,27 @@
 	return twiceMaxEdgePower;
 }
 
+void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+
+	switch (ar5416_get_ntxchains(ah->txchainmask)) {
+	case 1:
+		break;
+	case 2:
+		regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
+		break;
+	case 3:
+		regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
+		break;
+	default:
+		ath_print(common, ATH_DBG_EEPROM,
+			  "Invalid chainmask configuration\n");
+		break;
+	}
+}
+
 int ath9k_hw_eeprom_init(struct ath_hw *ah)
 {
 	int status;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 21354c1..bdd8aa0 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -263,7 +263,8 @@
 	EEP_PWR_TABLE_OFFSET,
 	EEP_DRIVE_STRENGTH,
 	EEP_INTERNAL_REGULATOR,
-	EEP_SWREG
+	EEP_SWREG,
+	EEP_PAPRD,
 };
 
 enum ar5416_rates {
@@ -679,6 +680,7 @@
 	u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
 };
 
+void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val);
 void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
 			       u32 shift, u32 val);
 int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
@@ -704,6 +706,7 @@
 				u16 numRates, bool isHt40Target);
 u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
 				bool is2GHz, int num_band_edges);
+void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah);
 int ath9k_hw_eeprom_init(struct ath_hw *ah);
 
 #define ar5416_get_ntxchains(_txchainmask)			\
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 41a77d1..e25a2ab 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -249,6 +249,7 @@
 	struct chan_centers centers;
 #define PD_GAIN_BOUNDARY_DEFAULT 58;
 
+	memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
 	ath9k_hw_get_channel_centers(ah, chan, &centers);
 
 	for (numPiers = 0; numPiers < availPiers; numPiers++) {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index b471db5..39a4105 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -17,17 +17,19 @@
 #include "hw.h"
 #include "ar9002_phy.h"
 
-static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
+#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16))
+
+static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
 {
 	return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF;
 }
 
-static int ath9k_hw_AR9287_get_eeprom_rev(struct ath_hw *ah)
+static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
 {
 	return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
 }
 
-static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
+static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
 {
 	struct ar9287_eeprom *eep = &ah->eeprom.map9287;
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -40,20 +42,20 @@
 			  "Reading from EEPROM, not flash\n");
 	}
 
-	for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16);
-			addr++)	{
-		if (!ath9k_hw_nvram_read(common,
-					 addr + eep_start_loc, eep_data)) {
+	for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+		if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
+					 eep_data)) {
 			ath_print(common, ATH_DBG_EEPROM,
 				  "Unable to read eeprom region\n");
 			return false;
 		}
 		eep_data++;
 	}
+
 	return true;
 }
 
-static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
+static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
 {
 	u32 sum = 0, el, integer;
 	u16 temp, word, magic, magic2, *eepdata;
@@ -63,8 +65,8 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (!ath9k_hw_use_flash(ah)) {
-		if (!ath9k_hw_nvram_read(common,
-					 AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
+		if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
+					 &magic)) {
 			ath_print(common, ATH_DBG_FATAL,
 				  "Reading Magic # failed\n");
 			return false;
@@ -72,6 +74,7 @@
 
 		ath_print(common, ATH_DBG_EEPROM,
 			  "Read Magic = 0x%04X\n", magic);
+
 		if (magic != AR5416_EEPROM_MAGIC) {
 			magic2 = swab16(magic);
 
@@ -79,9 +82,7 @@
 				need_swap = true;
 				eepdata = (u16 *)(&ah->eeprom);
 
-				for (addr = 0;
-				     addr < sizeof(struct ar9287_eeprom) / sizeof(u16);
-				     addr++) {
+				for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
 					temp = swab16(*eepdata);
 					*eepdata = temp;
 					eepdata++;
@@ -89,13 +90,14 @@
 			} else {
 				ath_print(common, ATH_DBG_FATAL,
 					  "Invalid EEPROM Magic. "
-					  "endianness mismatch.\n");
+					  "Endianness mismatch.\n");
 				return -EINVAL;
 			}
 		}
 	}
-	ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ?
-		  "True" : "False");
+
+	ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+		  need_swap ? "True" : "False");
 
 	if (need_swap)
 		el = swab16(ah->eeprom.map9287.baseEepHeader.length);
@@ -108,6 +110,7 @@
 		el = el / sizeof(u16);
 
 	eepdata = (u16 *)(&ah->eeprom);
+
 	for (i = 0; i < el; i++)
 		sum ^= *eepdata++;
 
@@ -161,7 +164,7 @@
 	return 0;
 }
 
-static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
+static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
 				      enum eeprom_param param)
 {
 	struct ar9287_eeprom *eep = &ah->eeprom.map9287;
@@ -170,6 +173,7 @@
 	u16 ver_minor;
 
 	ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK;
+
 	switch (param) {
 	case EEP_NFTHRESH_2:
 		return pModal->noiseFloorThreshCh[0];
@@ -214,29 +218,30 @@
 	}
 }
 
-
-static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
-				   struct ath9k_channel *chan,
-				   struct cal_data_per_freq_ar9287 *pRawDataSet,
-				   u8 *bChans,  u16 availPiers,
-				   u16 tPdGainOverlap, int16_t *pMinCalPower,
-				   u16 *pPdGainBoundaries, u8 *pPDADCValues,
-				   u16 numXpdGains)
+static void ath9k_hw_get_ar9287_gain_boundaries_pdadcs(struct ath_hw *ah,
+			       struct ath9k_channel *chan,
+			       struct cal_data_per_freq_ar9287 *pRawDataSet,
+			       u8 *bChans, u16 availPiers,
+			       u16 tPdGainOverlap,
+			       int16_t *pMinCalPower,
+			       u16 *pPdGainBoundaries,
+			       u8 *pPDADCValues,
+			       u16 numXpdGains)
 {
-#define TMP_VAL_VPD_TABLE \
+#define TMP_VAL_VPD_TABLE						\
 	((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep));
 
-	int       i, j, k;
-	int16_t   ss;
-	u16  idxL = 0, idxR = 0, numPiers;
-	u8   *pVpdL, *pVpdR, *pPwrL, *pPwrR;
-	u8   minPwrT4[AR9287_NUM_PD_GAINS];
-	u8   maxPwrT4[AR9287_NUM_PD_GAINS];
-	int16_t   vpdStep;
-	int16_t   tmpVal;
-	u16  sizeCurrVpdTable, maxIndex, tgtIndex;
-	bool    match;
-	int16_t  minDelta = 0;
+	int i, j, k;
+	int16_t ss;
+	u16 idxL = 0, idxR = 0, numPiers;
+	u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
+	u8 minPwrT4[AR9287_NUM_PD_GAINS];
+	u8 maxPwrT4[AR9287_NUM_PD_GAINS];
+	int16_t vpdStep;
+	int16_t tmpVal;
+	u16 sizeCurrVpdTable, maxIndex, tgtIndex;
+	bool match;
+	int16_t minDelta = 0;
 	struct chan_centers centers;
 	static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS]
 		[AR5416_MAX_PWR_RANGE_IN_HALF_DB];
@@ -245,6 +250,7 @@
 	static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS]
 		[AR5416_MAX_PWR_RANGE_IN_HALF_DB];
 
+	memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
 	ath9k_hw_get_channel_centers(ah, chan, &centers);
 
 	for (numPiers = 0; numPiers < availPiers; numPiers++) {
@@ -253,18 +259,18 @@
 	}
 
 	match = ath9k_hw_get_lower_upper_index(
-				   (u8)FREQ2FBIN(centers.synth_center,
-				    IS_CHAN_2GHZ(chan)), bChans, numPiers,
-				    &idxL, &idxR);
+		(u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
+		bChans, numPiers, &idxL, &idxR);
 
 	if (match) {
 		for (i = 0; i < numXpdGains; i++) {
 			minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
 			maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
 			ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
-					pRawDataSet[idxL].pwrPdg[i],
-					pRawDataSet[idxL].vpdPdg[i],
-					AR9287_PD_GAIN_ICEPTS, vpdTableI[i]);
+						pRawDataSet[idxL].pwrPdg[i],
+						pRawDataSet[idxL].vpdPdg[i],
+						AR9287_PD_GAIN_ICEPTS,
+						vpdTableI[i]);
 		}
 	} else {
 		for (i = 0; i < numXpdGains; i++) {
@@ -275,61 +281,59 @@
 
 			minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
 
-			maxPwrT4[i] =
-				min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1],
-				    pPwrR[AR9287_PD_GAIN_ICEPTS - 1]);
+			maxPwrT4[i] = min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1],
+					  pPwrR[AR9287_PD_GAIN_ICEPTS - 1]);
 
 			ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
-					pPwrL, pVpdL,
-					AR9287_PD_GAIN_ICEPTS,
-					vpdTableL[i]);
+						pPwrL, pVpdL,
+						AR9287_PD_GAIN_ICEPTS,
+						vpdTableL[i]);
 			ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
-					pPwrR, pVpdR,
-					AR9287_PD_GAIN_ICEPTS,
-					vpdTableR[i]);
+						pPwrR, pVpdR,
+						AR9287_PD_GAIN_ICEPTS,
+						vpdTableR[i]);
 
 			for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
-				vpdTableI[i][j] =
-					(u8)(ath9k_hw_interpolate((u16)
-					FREQ2FBIN(centers. synth_center,
-					IS_CHAN_2GHZ(chan)),
-					bChans[idxL], bChans[idxR],
-					vpdTableL[i][j], vpdTableR[i][j]));
+				vpdTableI[i][j] = (u8)(ath9k_hw_interpolate(
+				       (u16)FREQ2FBIN(centers. synth_center,
+						      IS_CHAN_2GHZ(chan)),
+				       bChans[idxL], bChans[idxR],
+				       vpdTableL[i][j], vpdTableR[i][j]));
 			}
 		}
 	}
-	*pMinCalPower = (int16_t)(minPwrT4[0] / 2);
 
+	*pMinCalPower = (int16_t)(minPwrT4[0] / 2);
 	k = 0;
+
 	for (i = 0; i < numXpdGains; i++) {
 		if (i == (numXpdGains - 1))
-			pPdGainBoundaries[i] = (u16)(maxPwrT4[i] / 2);
+			pPdGainBoundaries[i] =
+				(u16)(maxPwrT4[i] / 2);
 		else
-			pPdGainBoundaries[i] = (u16)((maxPwrT4[i] +
-						      minPwrT4[i+1]) / 4);
+			pPdGainBoundaries[i] =
+				(u16)((maxPwrT4[i] + minPwrT4[i+1]) / 4);
 
 		pPdGainBoundaries[i] = min((u16)AR5416_MAX_RATE_POWER,
-					    pPdGainBoundaries[i]);
+					   pPdGainBoundaries[i]);
 
 
-		if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) {
-			minDelta = pPdGainBoundaries[0] - 23;
-			pPdGainBoundaries[0] = 23;
-		} else
-			minDelta = 0;
+		minDelta = 0;
 
 		if (i == 0) {
 			if (AR_SREV_9280_10_OR_LATER(ah))
 				ss = (int16_t)(0 - (minPwrT4[i] / 2));
 			else
 				ss = 0;
-		} else
+		} else {
 			ss = (int16_t)((pPdGainBoundaries[i-1] -
-				       (minPwrT4[i] / 2)) -
+					(minPwrT4[i] / 2)) -
 				       tPdGainOverlap + 1 + minDelta);
+		}
 
 		vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
 		vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
+
 		while ((ss < 0) && (k < (AR9287_NUM_PDADC_VALUES - 1)))	{
 			tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
 			pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
@@ -348,12 +352,13 @@
 		vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
 				    vpdTableI[i][sizeCurrVpdTable - 2]);
 		vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
+
 		if (tgtIndex > maxIndex) {
 			while ((ss <= tgtIndex) &&
 				(k < (AR9287_NUM_PDADC_VALUES - 1))) {
 				tmpVal = (int16_t) TMP_VAL_VPD_TABLE;
-				pPDADCValues[k++] = (u8)((tmpVal > 255) ?
-							  255 : tmpVal);
+				pPDADCValues[k++] =
+					(u8)((tmpVal > 255) ? 255 : tmpVal);
 				ss++;
 			}
 		}
@@ -375,10 +380,9 @@
 static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
 			    struct ath9k_channel *chan,
 			    struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop,
-			    u8 *pCalChans,  u16 availPiers,
-			    int8_t *pPwr)
+			    u8 *pCalChans,  u16 availPiers, int8_t *pPwr)
 {
-	u16  idxL = 0, idxR = 0, numPiers;
+	u16 idxL = 0, idxR = 0, numPiers;
 	bool match;
 	struct chan_centers centers;
 
@@ -390,15 +394,14 @@
 	}
 
 	match = ath9k_hw_get_lower_upper_index(
-			(u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
-			pCalChans, numPiers,
-			&idxL, &idxR);
+		(u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
+		pCalChans, numPiers, &idxL, &idxR);
 
 	if (match) {
 		*pPwr = (int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0];
 	} else {
 		*pPwr = ((int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0] +
-			    (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2;
+			 (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2;
 	}
 
 }
@@ -409,16 +412,22 @@
 	u32 tmpVal;
 	u32 a;
 
+	/* Enable OLPC for chain 0 */
+
 	tmpVal = REG_READ(ah, 0xa270);
 	tmpVal = tmpVal & 0xFCFFFFFF;
 	tmpVal = tmpVal | (0x3 << 24);
 	REG_WRITE(ah, 0xa270, tmpVal);
 
+	/* Enable OLPC for chain 1 */
+
 	tmpVal = REG_READ(ah, 0xb270);
 	tmpVal = tmpVal & 0xFCFFFFFF;
 	tmpVal = tmpVal | (0x3 << 24);
 	REG_WRITE(ah, 0xb270, tmpVal);
 
+	/* Write the OLPC ref power for chain 0 */
+
 	if (chain == 0) {
 		tmpVal = REG_READ(ah, 0xa398);
 		tmpVal = tmpVal & 0xff00ffff;
@@ -427,6 +436,8 @@
 		REG_WRITE(ah, 0xa398, tmpVal);
 	}
 
+	/* Write the OLPC ref power for chain 1 */
+
 	if (chain == 1) {
 		tmpVal = REG_READ(ah, 0xb398);
 		tmpVal = tmpVal & 0xff00ffff;
@@ -436,28 +447,29 @@
 	}
 }
 
-static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
+static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
 						struct ath9k_channel *chan,
 						int16_t *pTxPowerIndexOffset)
 {
-	struct ath_common *common = ath9k_hw_common(ah);
 	struct cal_data_per_freq_ar9287 *pRawDataset;
 	struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
-	u8  *pCalBChans = NULL;
+	u8 *pCalBChans = NULL;
 	u16 pdGainOverlap_t2;
-	u8  pdadcValues[AR9287_NUM_PDADC_VALUES];
+	u8 pdadcValues[AR9287_NUM_PDADC_VALUES];
 	u16 gainBoundaries[AR9287_PD_GAINS_IN_MASK];
 	u16 numPiers = 0, i, j;
-	int16_t  tMinCalPower;
+	int16_t tMinCalPower;
 	u16 numXpdGain, xpdMask;
 	u16 xpdGainValues[AR9287_NUM_PD_GAINS] = {0, 0, 0, 0};
-	u32 reg32, regOffset, regChainOffset;
-	int16_t   modalIdx, diff = 0;
+	u32 reg32, regOffset, regChainOffset, regval;
+	int16_t modalIdx, diff = 0;
 	struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
+
 	modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
 	xpdMask = pEepData->modalHeader.xpdGain;
+
 	if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
-			AR9287_EEP_MINOR_VER_2)
+	    AR9287_EEP_MINOR_VER_2)
 		pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap;
 	else
 		pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
@@ -466,15 +478,16 @@
 	if (IS_CHAN_2GHZ(chan)) {
 		pCalBChans = pEepData->calFreqPier2G;
 		numPiers = AR9287_NUM_2G_CAL_PIERS;
-		if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+		if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
 			pRawDatasetOpenLoop =
-				(struct cal_data_op_loop_ar9287 *)
-				pEepData->calPierData2G[0];
+			(struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[0];
 			ah->initPDADC = pRawDatasetOpenLoop->vpdPdg[0][0];
 		}
 	}
 
 	numXpdGain = 0;
+
+	/* Calculate the value of xpdgains from the xpdGain Mask */
 	for (i = 1; i <= AR9287_PD_GAINS_IN_MASK; i++) {
 		if ((xpdMask >> (AR9287_PD_GAINS_IN_MASK - i)) & 1) {
 			if (numXpdGain >= AR9287_NUM_PD_GAINS)
@@ -496,99 +509,80 @@
 
 	for (i = 0; i < AR9287_MAX_CHAINS; i++)	{
 		regChainOffset = i * 0x1000;
+
 		if (pEepData->baseEepHeader.txMask & (1 << i)) {
-			pRawDatasetOpenLoop = (struct cal_data_op_loop_ar9287 *)
-					       pEepData->calPierData2G[i];
-			if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+			pRawDatasetOpenLoop =
+			(struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[i];
+
+			if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
 				int8_t txPower;
 				ar9287_eeprom_get_tx_gain_index(ah, chan,
-							  pRawDatasetOpenLoop,
-							  pCalBChans, numPiers,
-							  &txPower);
+							pRawDatasetOpenLoop,
+							pCalBChans, numPiers,
+							&txPower);
 				ar9287_eeprom_olpc_set_pdadcs(ah, txPower, i);
 			} else {
 				pRawDataset =
 					(struct cal_data_per_freq_ar9287 *)
 					pEepData->calPierData2G[i];
-				ath9k_hw_get_AR9287_gain_boundaries_pdadcs(
-						  ah, chan, pRawDataset,
-						  pCalBChans, numPiers,
-						  pdGainOverlap_t2,
-						  &tMinCalPower, gainBoundaries,
-						  pdadcValues, numXpdGain);
+
+				ath9k_hw_get_ar9287_gain_boundaries_pdadcs(ah, chan,
+							   pRawDataset,
+							   pCalBChans, numPiers,
+							   pdGainOverlap_t2,
+							   &tMinCalPower,
+							   gainBoundaries,
+							   pdadcValues,
+							   numXpdGain);
 			}
 
 			if (i == 0) {
-				if (!ath9k_hw_AR9287_get_eeprom(
-					    ah, EEP_OL_PWRCTRL)) {
-					REG_WRITE(ah, AR_PHY_TPCRG5 +
-					  regChainOffset,
-					  SM(pdGainOverlap_t2,
-					     AR_PHY_TPCRG5_PD_GAIN_OVERLAP) |
-					  SM(gainBoundaries[0],
-					     AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
-					  | SM(gainBoundaries[1],
-					       AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
-					  | SM(gainBoundaries[2],
-					       AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
-					  | SM(gainBoundaries[3],
-					       AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
+				if (!ath9k_hw_ar9287_get_eeprom(ah,
+							EEP_OL_PWRCTRL)) {
+
+					regval = SM(pdGainOverlap_t2,
+						    AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
+						| SM(gainBoundaries[0],
+						     AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
+						| SM(gainBoundaries[1],
+						     AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
+						| SM(gainBoundaries[2],
+						     AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
+						| SM(gainBoundaries[3],
+						     AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4);
+
+					REG_WRITE(ah,
+						  AR_PHY_TPCRG5 + regChainOffset,
+						  regval);
 				}
 			}
 
 			if ((int32_t)AR9287_PWR_TABLE_OFFSET_DB !=
-				     pEepData->baseEepHeader.pwrTableOffset) {
-				diff = (u16)
-				       (pEepData->baseEepHeader.pwrTableOffset
-					- (int32_t)AR9287_PWR_TABLE_OFFSET_DB);
+			    pEepData->baseEepHeader.pwrTableOffset) {
+				diff = (u16)(pEepData->baseEepHeader.pwrTableOffset -
+					     (int32_t)AR9287_PWR_TABLE_OFFSET_DB);
 				diff *= 2;
 
-				for (j = 0;
-				     j < ((u16)AR9287_NUM_PDADC_VALUES-diff);
-				     j++)
+				for (j = 0; j < ((u16)AR9287_NUM_PDADC_VALUES-diff); j++)
 					pdadcValues[j] = pdadcValues[j+diff];
 
 				for (j = (u16)(AR9287_NUM_PDADC_VALUES-diff);
 				     j < AR9287_NUM_PDADC_VALUES; j++)
 					pdadcValues[j] =
-					  pdadcValues[
-					  AR9287_NUM_PDADC_VALUES-diff];
+					  pdadcValues[AR9287_NUM_PDADC_VALUES-diff];
 			}
 
-			if (!ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
-				regOffset = AR_PHY_BASE + (672 << 2) +
-							   regChainOffset;
+			if (!ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+				regOffset = AR_PHY_BASE +
+					(672 << 2) + regChainOffset;
+
 				for (j = 0; j < 32; j++) {
-					reg32 = ((pdadcValues[4*j + 0]
-						  & 0xFF) << 0)  |
-						((pdadcValues[4*j + 1]
-						  & 0xFF) << 8)  |
-						((pdadcValues[4*j + 2]
-						  & 0xFF) << 16) |
-						((pdadcValues[4*j + 3]
-						  & 0xFF) << 24) ;
+					reg32 = ((pdadcValues[4*j + 0] & 0xFF) << 0)
+						| ((pdadcValues[4*j + 1] & 0xFF) << 8)
+						| ((pdadcValues[4*j + 2] & 0xFF) << 16)
+						| ((pdadcValues[4*j + 3] & 0xFF) << 24);
+
 					REG_WRITE(ah, regOffset, reg32);
-
-					ath_print(common, ATH_DBG_EEPROM,
-						  "PDADC (%d,%4x): %4.4x "
-						  "%8.8x\n",
-						  i, regChainOffset, regOffset,
-						  reg32);
-
-					ath_print(common, ATH_DBG_EEPROM,
-						  "PDADC: Chain %d | "
-						  "PDADC %3d Value %3d | "
-						  "PDADC %3d Value %3d | "
-						  "PDADC %3d Value %3d | "
-						  "PDADC %3d Value %3d |\n",
-						  i, 4 * j, pdadcValues[4 * j],
-						  4 * j + 1,
-						  pdadcValues[4 * j + 1],
-						  4 * j + 2,
-						  pdadcValues[4 * j + 2],
-						  4 * j + 3,
-						  pdadcValues[4 * j + 3]);
-
 					regOffset += 4;
 				}
 			}
@@ -598,30 +592,45 @@
 	*pTxPowerIndexOffset = 0;
 }
 
-static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
-		struct ath9k_channel *chan, int16_t *ratesArray, u16 cfgCtl,
-		u16 AntennaReduction, u16 twiceMaxRegulatoryPower,
-		u16 powerLimit)
+static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
+						     struct ath9k_channel *chan,
+						     int16_t *ratesArray,
+						     u16 cfgCtl,
+						     u16 AntennaReduction,
+						     u16 twiceMaxRegulatoryPower,
+						     u16 powerLimit)
 {
+#define CMP_CTL \
+	(((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
+	 pEepData->ctlIndex[i])
+
+#define CMP_NO_CTL \
+	(((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
+	 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))
+
 #define REDUCE_SCALED_POWER_BY_TWO_CHAIN     6
 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN   10
+
 	struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
 	u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
 	static const u16 tpScaleReductionTable[5] =
 		{ 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
 	int i;
-	int16_t  twiceLargestAntenna;
+	int16_t twiceLargestAntenna;
 	struct cal_ctl_data_ar9287 *rep;
 	struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
 				    targetPowerCck = {0, {0, 0, 0, 0} };
 	struct cal_target_power_leg targetPowerOfdmExt = {0, {0, 0, 0, 0} },
 				    targetPowerCckExt = {0, {0, 0, 0, 0} };
-	struct cal_target_power_ht  targetPowerHt20,
+	struct cal_target_power_ht targetPowerHt20,
 				    targetPowerHt40 = {0, {0, 0, 0, 0} };
 	u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
-	u16 ctlModesFor11g[] =
-		{CTL_11B, CTL_11G, CTL_2GHT20,
-		 CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40};
+	u16 ctlModesFor11g[] = {CTL_11B,
+				CTL_11G,
+				CTL_2GHT20,
+				CTL_11B_EXT,
+				CTL_11G_EXT,
+				CTL_2GHT40};
 	u16 numCtlModes = 0, *pCtlMode = NULL, ctlMode, freq;
 	struct chan_centers centers;
 	int tx_chainmask;
@@ -631,19 +640,28 @@
 
 	ath9k_hw_get_channel_centers(ah, chan, &centers);
 
+	/* Compute TxPower reduction due to Antenna Gain */
 	twiceLargestAntenna = max(pEepData->modalHeader.antennaGainCh[0],
 				  pEepData->modalHeader.antennaGainCh[1]);
+	twiceLargestAntenna = (int16_t)min((AntennaReduction) -
+					   twiceLargestAntenna, 0);
 
-	twiceLargestAntenna =  (int16_t)min((AntennaReduction) -
-					    twiceLargestAntenna, 0);
-
+	/*
+	 * scaledPower is the minimum of the user input power level
+	 * and the regulatory allowed power level.
+	 */
 	maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
+
 	if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX)
 		maxRegAllowedPower -=
 			(tpScaleReductionTable[(regulatory->tp_scale)] * 2);
 
 	scaledPower = min(powerLimit, maxRegAllowedPower);
 
+	/*
+	 * Reduce scaled Power by number of chains active
+	 * to get the per chain tx power level.
+	 */
 	switch (ar5416_get_ntxchains(tx_chainmask)) {
 	case 1:
 		break;
@@ -656,9 +674,14 @@
 	}
 	scaledPower = max((u16)0, scaledPower);
 
+	/*
+	 * Get TX power from EEPROM.
+	 */
 	if (IS_CHAN_2GHZ(chan))	{
+		/* CTL_11B, CTL_11G, CTL_2GHT20 */
 		numCtlModes =
 			ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40;
+
 		pCtlMode = ctlModesFor11g;
 
 		ath9k_hw_get_legacy_target_powers(ah, chan,
@@ -675,6 +698,7 @@
 					   &targetPowerHt20, 8, false);
 
 		if (IS_CHAN_HT40(chan))	{
+			/* All 2G CTLs */
 			numCtlModes = ARRAY_SIZE(ctlModesFor11g);
 			ath9k_hw_get_target_powers(ah, chan,
 						   pEepData->calTargetPower2GHT40,
@@ -692,8 +716,9 @@
 	}
 
 	for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
-		bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
-				     (pCtlMode[ctlMode] == CTL_2GHT40);
+		bool isHt40CtlMode =
+			(pCtlMode[ctlMode] == CTL_2GHT40) ? true : false;
+
 		if (isHt40CtlMode)
 			freq = centers.synth_center;
 		else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
@@ -701,31 +726,28 @@
 		else
 			freq = centers.ctl_center;
 
-		if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
-		    ah->eep_ops->get_eeprom_rev(ah) <= 2)
-			twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
-
+		/* Walk through the CTL indices stored in EEPROM */
 		for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
-			if ((((cfgCtl & ~CTL_MODE_M) |
-			      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-			     pEepData->ctlIndex[i]) ||
-			    (((cfgCtl & ~CTL_MODE_M) |
-			      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-			     ((pEepData->ctlIndex[i] &
-			       CTL_MODE_M) | SD_NO_CTL))) {
+			struct cal_ctl_edges *pRdEdgesPower;
 
+			/*
+			 * Compare test group from regulatory channel list
+			 * with test mode from pCtlMode list
+			 */
+			if (CMP_CTL || CMP_NO_CTL) {
 				rep = &(pEepData->ctlData[i]);
-				twiceMinEdgePower = ath9k_hw_get_max_edge_power(
-				    freq,
-				    rep->ctlEdges[ar5416_get_ntxchains(
-				    tx_chainmask) - 1],
-				    IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES);
+				pRdEdgesPower =
+				rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1];
 
-				if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
-					twiceMaxEdgePower = min(
-							    twiceMaxEdgePower,
-							    twiceMinEdgePower);
-				else {
+				twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq,
+								pRdEdgesPower,
+								IS_CHAN_2GHZ(chan),
+								AR5416_NUM_BAND_EDGES);
+
+				if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
+					twiceMaxEdgePower = min(twiceMaxEdgePower,
+								twiceMinEdgePower);
+				} else {
 					twiceMaxEdgePower = twiceMinEdgePower;
 					break;
 				}
@@ -734,55 +756,48 @@
 
 		minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
 
+		/* Apply ctl mode to correct target power set */
 		switch (pCtlMode[ctlMode]) {
 		case CTL_11B:
-			for (i = 0;
-			     i < ARRAY_SIZE(targetPowerCck.tPow2x);
-			     i++) {
-				targetPowerCck.tPow2x[i] = (u8)min(
-					(u16)targetPowerCck.tPow2x[i],
-					minCtlPower);
+			for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) {
+				targetPowerCck.tPow2x[i] =
+					(u8)min((u16)targetPowerCck.tPow2x[i],
+						minCtlPower);
 			}
 			break;
 		case CTL_11A:
 		case CTL_11G:
-			for (i = 0;
-			     i < ARRAY_SIZE(targetPowerOfdm.tPow2x);
-			     i++) {
-				targetPowerOfdm.tPow2x[i] = (u8)min(
-					(u16)targetPowerOfdm.tPow2x[i],
-					minCtlPower);
+			for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) {
+				targetPowerOfdm.tPow2x[i] =
+					(u8)min((u16)targetPowerOfdm.tPow2x[i],
+						minCtlPower);
 			}
 			break;
 		case CTL_5GHT20:
 		case CTL_2GHT20:
-			for (i = 0;
-			     i < ARRAY_SIZE(targetPowerHt20.tPow2x);
-			     i++) {
-				targetPowerHt20.tPow2x[i] = (u8)min(
-					(u16)targetPowerHt20.tPow2x[i],
-					minCtlPower);
+			for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) {
+				targetPowerHt20.tPow2x[i] =
+					(u8)min((u16)targetPowerHt20.tPow2x[i],
+						minCtlPower);
 			}
 			break;
 		case CTL_11B_EXT:
-			targetPowerCckExt.tPow2x[0] = (u8)min(
-				    (u16)targetPowerCckExt.tPow2x[0],
-				    minCtlPower);
+			targetPowerCckExt.tPow2x[0] =
+				(u8)min((u16)targetPowerCckExt.tPow2x[0],
+					minCtlPower);
 			break;
 		case CTL_11A_EXT:
 		case CTL_11G_EXT:
-			targetPowerOfdmExt.tPow2x[0] = (u8)min(
-				    (u16)targetPowerOfdmExt.tPow2x[0],
-				    minCtlPower);
+			targetPowerOfdmExt.tPow2x[0] =
+				(u8)min((u16)targetPowerOfdmExt.tPow2x[0],
+					minCtlPower);
 			break;
 		case CTL_5GHT40:
 		case CTL_2GHT40:
-			for (i = 0;
-			     i < ARRAY_SIZE(targetPowerHt40.tPow2x);
-			     i++) {
-				targetPowerHt40.tPow2x[i] = (u8)min(
-					(u16)targetPowerHt40.tPow2x[i],
-					minCtlPower);
+			for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
+				targetPowerHt40.tPow2x[i] =
+					(u8)min((u16)targetPowerHt40.tPow2x[i],
+						minCtlPower);
 			}
 			break;
 		default:
@@ -790,12 +805,13 @@
 		}
 	}
 
+	/* Now set the rates array */
+
 	ratesArray[rate6mb] =
 	ratesArray[rate9mb] =
 	ratesArray[rate12mb] =
 	ratesArray[rate18mb] =
-	ratesArray[rate24mb] =
-	targetPowerOfdm.tPow2x[0];
+	ratesArray[rate24mb] = targetPowerOfdm.tPow2x[0];
 
 	ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
 	ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
@@ -807,12 +823,12 @@
 
 	if (IS_CHAN_2GHZ(chan))	{
 		ratesArray[rate1l] = targetPowerCck.tPow2x[0];
-		ratesArray[rate2s] = ratesArray[rate2l] =
-			targetPowerCck.tPow2x[1];
-		ratesArray[rate5_5s] = ratesArray[rate5_5l] =
-			targetPowerCck.tPow2x[2];
-		ratesArray[rate11s] = ratesArray[rate11l] =
-			targetPowerCck.tPow2x[3];
+		ratesArray[rate2s] =
+		ratesArray[rate2l] = targetPowerCck.tPow2x[1];
+		ratesArray[rate5_5s] =
+		ratesArray[rate5_5l] = targetPowerCck.tPow2x[2];
+		ratesArray[rate11s] =
+		ratesArray[rate11l] = targetPowerCck.tPow2x[3];
 	}
 	if (IS_CHAN_HT40(chan))	{
 		for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++)
@@ -821,28 +837,28 @@
 		ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
 		ratesArray[rateDupCck]  = targetPowerHt40.tPow2x[0];
 		ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
+
 		if (IS_CHAN_2GHZ(chan))
 			ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0];
 	}
 
+#undef CMP_CTL
+#undef CMP_NO_CTL
 #undef REDUCE_SCALED_POWER_BY_TWO_CHAIN
 #undef REDUCE_SCALED_POWER_BY_THREE_CHAIN
 }
 
-static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
+static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
 					struct ath9k_channel *chan, u16 cfgCtl,
 					u8 twiceAntennaReduction,
 					u8 twiceMaxRegulatoryPower,
 					u8 powerLimit)
 {
-#define INCREASE_MAXPOW_BY_TWO_CHAIN     6
-#define INCREASE_MAXPOW_BY_THREE_CHAIN   10
-	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
 	struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
 	struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader;
 	int16_t ratesArray[Ar5416RateSize];
-	int16_t  txPowerIndexOffset = 0;
+	int16_t txPowerIndexOffset = 0;
 	u8 ht40PowerIncForPdadc = 2;
 	int i;
 
@@ -852,13 +868,13 @@
 	    AR9287_EEP_MINOR_VER_2)
 		ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
 
-	ath9k_hw_set_AR9287_power_per_rate_table(ah, chan,
+	ath9k_hw_set_ar9287_power_per_rate_table(ah, chan,
 						 &ratesArray[0], cfgCtl,
 						 twiceAntennaReduction,
 						 twiceMaxRegulatoryPower,
 						 powerLimit);
 
-	ath9k_hw_set_AR9287_power_cal_table(ah, chan, &txPowerIndexOffset);
+	ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset);
 
 	for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
 		ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
@@ -871,6 +887,7 @@
 			ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
 	}
 
+	/* OFDM power per rate */
 	REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
 		  ATH9K_POW_SM(ratesArray[rate18mb], 24)
 		  | ATH9K_POW_SM(ratesArray[rate12mb], 16)
@@ -883,6 +900,7 @@
 		  | ATH9K_POW_SM(ratesArray[rate36mb], 8)
 		  | ATH9K_POW_SM(ratesArray[rate24mb], 0));
 
+	/* CCK power per rate */
 	if (IS_CHAN_2GHZ(chan))	{
 		REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
 			  ATH9K_POW_SM(ratesArray[rate2s], 24)
@@ -896,6 +914,7 @@
 			  | ATH9K_POW_SM(ratesArray[rate5_5l], 0));
 	}
 
+	/* HT20 power per rate */
 	REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
 		  ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
 		  | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
@@ -908,8 +927,9 @@
 		  | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
 		  | ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
 
+	/* HT40 power per rate */
 	if (IS_CHAN_HT40(chan))	{
-		if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+		if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
 			REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
 				  ATH9K_POW_SM(ratesArray[rateHt40_3], 24)
 				  | ATH9K_POW_SM(ratesArray[rateHt40_2], 16)
@@ -943,6 +963,7 @@
 						 ht40PowerIncForPdadc, 0));
 		}
 
+		/* Dup/Ext power per rate */
 		REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
 			  ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
 			  | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
@@ -960,37 +981,20 @@
 			ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2;
 	else
 		regulatory->max_power_level = ratesArray[i];
-
-	switch (ar5416_get_ntxchains(ah->txchainmask)) {
-	case 1:
-		break;
-	case 2:
-		regulatory->max_power_level +=
-			INCREASE_MAXPOW_BY_TWO_CHAIN;
-		break;
-	case 3:
-		regulatory->max_power_level +=
-			INCREASE_MAXPOW_BY_THREE_CHAIN;
-		break;
-	default:
-		ath_print(common, ATH_DBG_EEPROM,
-			  "Invalid chainmask configuration\n");
-		break;
-	}
 }
 
-static void ath9k_hw_AR9287_set_addac(struct ath_hw *ah,
+static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
 				      struct ath9k_channel *chan)
 {
 }
 
-static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah,
+static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
 					     struct ath9k_channel *chan)
 {
 	struct ar9287_eeprom *eep = &ah->eeprom.map9287;
 	struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
 	u16 antWrites[AR9287_ANT_16S];
-	u32 regChainOffset;
+	u32 regChainOffset, regval;
 	u8 txRxAttenLocal;
 	int i, j, offset_num;
 
@@ -1077,42 +1081,37 @@
 	REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
 		      AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62);
 
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB1,
-				  AR9287_AN_RF2G3_DB1_S, pModal->db1);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB2,
-				  AR9287_AN_RF2G3_DB2_S, pModal->db2);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0,
-				  AR9287_AN_RF2G3_OB_CCK,
-				  AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0,
-				  AR9287_AN_RF2G3_OB_PSK,
-				  AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0,
-				  AR9287_AN_RF2G3_OB_QAM,
-				  AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0,
-				  AR9287_AN_RF2G3_OB_PAL_OFF,
-				  AR9287_AN_RF2G3_OB_PAL_OFF_S,
-				  pModal->ob_pal_off);
+	regval = REG_READ(ah, AR9287_AN_RF2G3_CH0);
+	regval &= ~(AR9287_AN_RF2G3_DB1 |
+		    AR9287_AN_RF2G3_DB2 |
+		    AR9287_AN_RF2G3_OB_CCK |
+		    AR9287_AN_RF2G3_OB_PSK |
+		    AR9287_AN_RF2G3_OB_QAM |
+		    AR9287_AN_RF2G3_OB_PAL_OFF);
+	regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
+		   SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
+		   SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
+		   SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
+		   SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
+		   SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
 
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
-				  AR9287_AN_RF2G3_DB1, AR9287_AN_RF2G3_DB1_S,
-				  pModal->db1);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, AR9287_AN_RF2G3_DB2,
-				  AR9287_AN_RF2G3_DB2_S, pModal->db2);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
-				  AR9287_AN_RF2G3_OB_CCK,
-				  AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
-				  AR9287_AN_RF2G3_OB_PSK,
-				  AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
-				  AR9287_AN_RF2G3_OB_QAM,
-				  AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam);
-	ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
-				  AR9287_AN_RF2G3_OB_PAL_OFF,
-				  AR9287_AN_RF2G3_OB_PAL_OFF_S,
-				  pModal->ob_pal_off);
+	ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH0, regval);
+
+	regval = REG_READ(ah, AR9287_AN_RF2G3_CH1);
+	regval &= ~(AR9287_AN_RF2G3_DB1 |
+		    AR9287_AN_RF2G3_DB2 |
+		    AR9287_AN_RF2G3_OB_CCK |
+		    AR9287_AN_RF2G3_OB_PSK |
+		    AR9287_AN_RF2G3_OB_QAM |
+		    AR9287_AN_RF2G3_OB_PAL_OFF);
+	regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
+		   SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
+		   SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
+		   SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
+		   SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
+		   SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
+
+	ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH1, regval);
 
 	REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
 		      AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart);
@@ -1125,13 +1124,13 @@
 				  pModal->xpaBiasLvl);
 }
 
-static u8 ath9k_hw_AR9287_get_num_ant_config(struct ath_hw *ah,
+static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah,
 					     enum ieee80211_band freq_band)
 {
 	return 1;
 }
 
-static u16 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
+static u16 ath9k_hw_ar9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
 						  struct ath9k_channel *chan)
 {
 	struct ar9287_eeprom *eep = &ah->eeprom.map9287;
@@ -1140,11 +1139,12 @@
 	return pModal->antCtrlCommon & 0xFFFF;
 }
 
-static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
+static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
 					    u16 i, bool is2GHz)
 {
 #define EEP_MAP9287_SPURCHAN \
 	(ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
+
 	struct ath_common *common = ath9k_hw_common(ah);
 	u16 spur_val = AR_NO_SPUR;
 
@@ -1171,15 +1171,15 @@
 }
 
 const struct eeprom_ops eep_ar9287_ops = {
-	.check_eeprom		= ath9k_hw_AR9287_check_eeprom,
-	.get_eeprom		= ath9k_hw_AR9287_get_eeprom,
-	.fill_eeprom		= ath9k_hw_AR9287_fill_eeprom,
-	.get_eeprom_ver		= ath9k_hw_AR9287_get_eeprom_ver,
-	.get_eeprom_rev		= ath9k_hw_AR9287_get_eeprom_rev,
-	.get_num_ant_config	= ath9k_hw_AR9287_get_num_ant_config,
-	.get_eeprom_antenna_cfg	= ath9k_hw_AR9287_get_eeprom_antenna_cfg,
-	.set_board_values	= ath9k_hw_AR9287_set_board_values,
-	.set_addac		= ath9k_hw_AR9287_set_addac,
-	.set_txpower		= ath9k_hw_AR9287_set_txpower,
-	.get_spur_channel	= ath9k_hw_AR9287_get_spur_channel
+	.check_eeprom		= ath9k_hw_ar9287_check_eeprom,
+	.get_eeprom		= ath9k_hw_ar9287_get_eeprom,
+	.fill_eeprom		= ath9k_hw_ar9287_fill_eeprom,
+	.get_eeprom_ver		= ath9k_hw_ar9287_get_eeprom_ver,
+	.get_eeprom_rev		= ath9k_hw_ar9287_get_eeprom_rev,
+	.get_num_ant_config	= ath9k_hw_ar9287_get_num_ant_config,
+	.get_eeprom_antenna_cfg	= ath9k_hw_ar9287_get_eeprom_antenna_cfg,
+	.set_board_values	= ath9k_hw_ar9287_set_board_values,
+	.set_addac		= ath9k_hw_ar9287_set_addac,
+	.set_txpower		= ath9k_hw_ar9287_set_txpower,
+	.get_spur_channel	= ath9k_hw_ar9287_get_spur_channel
 };
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 7e1ed78..77b1433 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -617,6 +617,7 @@
 	int16_t minDelta = 0;
 	struct chan_centers centers;
 
+	memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
 	ath9k_hw_get_channel_centers(ah, chan, &centers);
 
 	for (numPiers = 0; numPiers < availPiers; numPiers++) {
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 0ee75e7..3a8ee99 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -76,7 +76,8 @@
 	case LED_FULL:
 		if (led->led_type == ATH_LED_ASSOC) {
 			sc->sc_flags |= SC_OP_LED_ASSOCIATED;
-			ieee80211_queue_delayed_work(sc->hw,
+			if (led_blink)
+				ieee80211_queue_delayed_work(sc->hw,
 						     &sc->ath_led_blink_work, 0);
 		} else if (led->led_type == ATH_LED_RADIO) {
 			ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
@@ -143,7 +144,8 @@
 	/* LED off, active low */
 	ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
 
-	INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
+	if (led_blink)
+		INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
 
 	trigger = ieee80211_get_radio_led_name(sc->hw);
 	snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
@@ -180,7 +182,8 @@
 	return;
 
 fail:
-	cancel_delayed_work_sync(&sc->ath_led_blink_work);
+	if (led_blink)
+		cancel_delayed_work_sync(&sc->ath_led_blink_work);
 	ath_deinit_leds(sc);
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 77b3591..ad9134b 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -16,12 +16,27 @@
 
 #include "htc.h"
 
-#define ATH9K_FW_USB_DEV(devid, fw)					\
-	{ USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw }
+/* identify firmware images */
+#define FIRMWARE_AR7010		"ar7010.fw"
+#define FIRMWARE_AR7010_1_1	"ar7010_1_1.fw"
+#define FIRMWARE_AR9271		"ar9271.fw"
+
+MODULE_FIRMWARE(FIRMWARE_AR7010);
+MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
+MODULE_FIRMWARE(FIRMWARE_AR9271);
 
 static struct usb_device_id ath9k_hif_usb_ids[] = {
-	ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"),
-	ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"),
+	{ USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
+	{ USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
+	{ USB_DEVICE(0x0cf3, 0x7010) }, /* Atheros */
+	{ USB_DEVICE(0x0cf3, 0x7015) }, /* Atheros */
+	{ USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
+	{ USB_DEVICE(0x0846, 0x9018) }, /* Netgear WNDA3200 */
+	{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
+	{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
+	{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
+	{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
+	{ USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
 	{ },
 };
 
@@ -756,6 +771,7 @@
 	size_t len = hif_dev->firmware->size;
 	u32 addr = AR9271_FIRMWARE;
 	u8 *buf = kzalloc(4096, GFP_KERNEL);
+	u32 firm_offset;
 
 	if (!buf)
 		return -ENOMEM;
@@ -779,32 +795,37 @@
 	}
 	kfree(buf);
 
+	if (hif_dev->device_id == 0x7010)
+		firm_offset = AR7010_FIRMWARE_TEXT;
+	else
+		firm_offset = AR9271_FIRMWARE_TEXT;
+
 	/*
 	 * Issue FW download complete command to firmware.
 	 */
 	err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
 			      FIRMWARE_DOWNLOAD_COMP,
 			      0x40 | USB_DIR_OUT,
-			      AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ);
+			      firm_offset >> 8, 0, NULL, 0, HZ);
 	if (err)
 		return -EIO;
 
 	dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
-		 "ar9271.fw", (unsigned long) hif_dev->firmware->size);
+		 hif_dev->fw_name, (unsigned long) hif_dev->firmware->size);
 
 	return 0;
 }
 
-static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
-				  const char *fw_name)
+static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
 {
 	int ret;
 
 	/* Request firmware */
-	ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev);
+	ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name,
+			       &hif_dev->udev->dev);
 	if (ret) {
 		dev_err(&hif_dev->udev->dev,
-			"ath9k_htc: Firmware - %s not found\n", fw_name);
+			"ath9k_htc: Firmware - %s not found\n", hif_dev->fw_name);
 		goto err_fw_req;
 	}
 
@@ -820,7 +841,8 @@
 	ret = ath9k_hif_usb_download_fw(hif_dev);
 	if (ret) {
 		dev_err(&hif_dev->udev->dev,
-			"ath9k_htc: Firmware - %s download failed\n", fw_name);
+			"ath9k_htc: Firmware - %s download failed\n",
+			hif_dev->fw_name);
 		goto err_fw_download;
 	}
 
@@ -847,7 +869,6 @@
 {
 	struct usb_device *udev = interface_to_usbdev(interface);
 	struct hif_device_usb *hif_dev;
-	const char *fw_name = (const char *) id->driver_info;
 	int ret = 0;
 
 	hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
@@ -872,7 +893,27 @@
 		goto err_htc_hw_alloc;
 	}
 
-	ret = ath9k_hif_usb_dev_init(hif_dev, fw_name);
+	/* Find out which firmware to load */
+
+	switch(hif_dev->device_id) {
+	case 0x7010:
+	case 0x9018:
+		if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
+			hif_dev->fw_name = FIRMWARE_AR7010_1_1;
+		else
+			hif_dev->fw_name = FIRMWARE_AR7010;
+		break;
+	default:
+		hif_dev->fw_name = FIRMWARE_AR9271;
+		break;
+	}
+
+	if (!hif_dev->fw_name) {
+		dev_err(&udev->dev, "Can't determine firmware !\n");
+		goto err_htc_hw_alloc;
+	}
+
+	ret = ath9k_hif_usb_dev_init(hif_dev);
 	if (ret) {
 		ret = -EINVAL;
 		goto err_hif_init_usb;
@@ -907,12 +948,10 @@
 	void *buf;
 	int ret;
 
-	buf = kmalloc(4, GFP_KERNEL);
+	buf = kmemdup(&reboot_cmd, 4, GFP_KERNEL);
 	if (!buf)
 		return;
 
-	memcpy(buf, &reboot_cmd, 4);
-
 	ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
 			   buf, 4, NULL, HZ);
 	if (ret)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 0aca49b6..2daf97b 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -19,6 +19,7 @@
 
 #define AR9271_FIRMWARE       0x501000
 #define AR9271_FIRMWARE_TEXT  0x903000
+#define AR7010_FIRMWARE_TEXT  0x906000
 
 #define FIRMWARE_DOWNLOAD       0x30
 #define FIRMWARE_DOWNLOAD_COMP  0x31
@@ -90,6 +91,7 @@
 	struct usb_anchor regout_submitted;
 	struct usb_anchor rx_submitted;
 	struct sk_buff *remain_skb;
+	const char *fw_name;
 	int rx_remain_len;
 	int rx_pkt_len;
 	int rx_transfer_len;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index c251603..3756400 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -223,15 +223,6 @@
 	enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
 };
 
-struct ath9k_htc_aggr_work {
-	u16 tid;
-	u8 sta_addr[ETH_ALEN];
-	struct ieee80211_hw *hw;
-	struct ieee80211_vif *vif;
-	enum ieee80211_ampdu_mlme_action action;
-	struct mutex mutex;
-};
-
 #define ATH9K_HTC_RXBUF 256
 #define HTC_RX_FRAME_HEADER_SIZE 40
 
@@ -257,12 +248,15 @@
 #define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
 #define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
 
+#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
+
 struct ath_tx_stats {
 	u32 buf_queued;
 	u32 buf_completed;
 	u32 skb_queued;
 	u32 skb_completed;
 	u32 skb_dropped;
+	u32 queue_stats[WME_NUM_AC];
 };
 
 struct ath_rx_stats {
@@ -286,11 +280,14 @@
 #define TX_STAT_INC(c) do { } while (0)
 #define RX_STAT_INC(c) do { } while (0)
 
+#define TX_QSTAT_INC(c) do { } while (0)
+
 #endif /* CONFIG_ATH9K_HTC_DEBUGFS */
 
 #define ATH_LED_PIN_DEF             1
 #define ATH_LED_PIN_9287            8
 #define ATH_LED_PIN_9271            15
+#define ATH_LED_PIN_7010            12
 #define ATH_LED_ON_DURATION_IDLE    350	/* in msecs */
 #define ATH_LED_OFF_DURATION_IDLE   250	/* in msecs */
 
@@ -326,11 +323,10 @@
 #define OP_LED_ON         BIT(4)
 #define OP_PREAMBLE_SHORT BIT(5)
 #define OP_PROTECT_ENABLE BIT(6)
-#define OP_TXAGGR         BIT(7)
-#define OP_ASSOCIATED     BIT(8)
-#define OP_ENABLE_BEACON  BIT(9)
-#define OP_LED_DEINIT     BIT(10)
-#define OP_UNPLUGGED      BIT(11)
+#define OP_ASSOCIATED     BIT(7)
+#define OP_ENABLE_BEACON  BIT(8)
+#define OP_LED_DEINIT     BIT(9)
+#define OP_UNPLUGGED      BIT(10)
 
 struct ath9k_htc_priv {
 	struct device *dev;
@@ -371,8 +367,6 @@
 	struct ath9k_htc_rx rx;
 	struct tasklet_struct tx_tasklet;
 	struct sk_buff_head tx_queue;
-	struct ath9k_htc_aggr_work aggr_work;
-	struct delayed_work ath9k_aggr_work;
 	struct delayed_work ath9k_ani_work;
 	struct work_struct ps_work;
 
@@ -390,13 +384,14 @@
 	int led_off_duration;
 	int led_on_cnt;
 	int led_off_cnt;
-	int hwq_map[ATH9K_WME_AC_VO+1];
+
+	int beaconq;
+	int cabq;
+	int hwq_map[WME_NUM_AC];
 
 #ifdef CONFIG_ATH9K_HTC_DEBUGFS
 	struct ath9k_debug debug;
 #endif
-	struct ath9k_htc_target_rate tgt_rate;
-
 	struct mutex mutex;
 };
 
@@ -405,6 +400,7 @@
 	common->bus_ops->read_cachesize(common, csz);
 }
 
+void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
 void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
 			     struct ieee80211_vif *vif);
 void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
@@ -424,8 +420,8 @@
 void ath9k_tx_tasklet(unsigned long data);
 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
 void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
-bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
-			 enum ath9k_tx_queue_subtype qtype);
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
+int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv);
 int get_hw_qnum(u16 queue, int *hwq_map);
 int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
 		       struct ath9k_tx_queue_info *qinfo);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index c10c7d0..bd1506e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -222,6 +222,29 @@
 	spin_unlock_bh(&priv->beacon_lock);
 }
 
+/* Currently, only for IBSS */
+void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
+{
+	struct ath_hw *ah = priv->ah;
+	struct ath9k_tx_queue_info qi, qi_be;
+	int qnum = priv->hwq_map[WME_AC_BE];
+
+	memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+	memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
+
+	ath9k_hw_get_txq_props(ah, qnum, &qi_be);
+
+	qi.tqi_aifs = qi_be.tqi_aifs;
+	qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
+	qi.tqi_cwmax = qi_be.tqi_cwmax;
+
+	if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
+		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+			  "Unable to update beacon queue %u!\n", qnum);
+	} else {
+		ath9k_hw_resettxqueue(ah, priv->beaconq);
+	}
+}
 
 void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
 			     struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index dc01507..148b433 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,13 @@
 	.max_power = 20, \
 }
 
+#define CHAN5G(_freq, _idx) { \
+	.band = IEEE80211_BAND_5GHZ, \
+	.center_freq = (_freq), \
+	.hw_value = (_idx), \
+	.max_power = 20, \
+}
+
 static struct ieee80211_channel ath9k_2ghz_channels[] = {
 	CHAN2G(2412, 0), /* Channel 1 */
 	CHAN2G(2417, 1), /* Channel 2 */
@@ -51,6 +58,37 @@
 	CHAN2G(2484, 13), /* Channel 14 */
 };
 
+static struct ieee80211_channel ath9k_5ghz_channels[] = {
+	/* _We_ call this UNII 1 */
+	CHAN5G(5180, 14), /* Channel 36 */
+	CHAN5G(5200, 15), /* Channel 40 */
+	CHAN5G(5220, 16), /* Channel 44 */
+	CHAN5G(5240, 17), /* Channel 48 */
+	/* _We_ call this UNII 2 */
+	CHAN5G(5260, 18), /* Channel 52 */
+	CHAN5G(5280, 19), /* Channel 56 */
+	CHAN5G(5300, 20), /* Channel 60 */
+	CHAN5G(5320, 21), /* Channel 64 */
+	/* _We_ call this "Middle band" */
+	CHAN5G(5500, 22), /* Channel 100 */
+	CHAN5G(5520, 23), /* Channel 104 */
+	CHAN5G(5540, 24), /* Channel 108 */
+	CHAN5G(5560, 25), /* Channel 112 */
+	CHAN5G(5580, 26), /* Channel 116 */
+	CHAN5G(5600, 27), /* Channel 120 */
+	CHAN5G(5620, 28), /* Channel 124 */
+	CHAN5G(5640, 29), /* Channel 128 */
+	CHAN5G(5660, 30), /* Channel 132 */
+	CHAN5G(5680, 31), /* Channel 136 */
+	CHAN5G(5700, 32), /* Channel 140 */
+	/* _We_ call this UNII 3 */
+	CHAN5G(5745, 33), /* Channel 149 */
+	CHAN5G(5765, 34), /* Channel 153 */
+	CHAN5G(5785, 35), /* Channel 157 */
+	CHAN5G(5805, 36), /* Channel 161 */
+	CHAN5G(5825, 37), /* Channel 165 */
+};
+
 /* Atheros hardware rate code addition for short premble */
 #define SHPCHECK(__hw_rate, __flags) \
 	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
@@ -141,7 +179,7 @@
 	return htc_connect_service(priv->htc, &req, ep_id);
 }
 
-static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
+static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
 {
 	int ret;
 
@@ -199,10 +237,28 @@
 	if (ret)
 		goto err;
 
+	/*
+	 * Setup required credits before initializing HTC.
+	 * This is a bit hacky, but, since queuing is done in
+	 * the HIF layer, shouldn't matter much.
+	 */
+
+	switch(devid) {
+	case 0x7010:
+	case 0x9018:
+		priv->htc->credits = 45;
+		break;
+	default:
+		priv->htc->credits = 33;
+	}
+
 	ret = htc_init(priv->htc);
 	if (ret)
 		goto err;
 
+	dev_info(priv->dev, "ath9k_htc: HTC initialized with %d credits\n",
+		 priv->htc->credits);
+
 	return 0;
 
 err:
@@ -398,17 +454,43 @@
 static void setup_ht_cap(struct ath9k_htc_priv *priv,
 			 struct ieee80211_sta_ht_cap *ht_info)
 {
+	struct ath_common *common = ath9k_hw_common(priv->ah);
+	u8 tx_streams, rx_streams;
+	int i;
+
 	ht_info->ht_supported = true;
 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
 		       IEEE80211_HT_CAP_SM_PS |
 		       IEEE80211_HT_CAP_SGI_40 |
 		       IEEE80211_HT_CAP_DSSSCCK40;
 
+	if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+		ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+	ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
 
 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-	ht_info->mcs.rx_mask[0] = 0xff;
+
+	/* ath9k_htc supports only 1 or 2 stream devices */
+	tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, 2);
+	rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, 2);
+
+	ath_print(common, ATH_DBG_CONFIG,
+		  "TX streams %d, RX streams: %d\n",
+		  tx_streams, rx_streams);
+
+	if (tx_streams != rx_streams) {
+		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+					   IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+	}
+
+	for (i = 0; i < rx_streams; i++)
+		ht_info->mcs.rx_mask[i] = 0xff;
+
 	ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
 }
 
@@ -420,23 +502,37 @@
 	for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
 		priv->hwq_map[i] = -1;
 
-	if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) {
+	priv->beaconq = ath9k_hw_beaconq_setup(priv->ah);
+	if (priv->beaconq == -1) {
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to setup BEACON xmit queue\n");
+		goto err;
+	}
+
+	priv->cabq = ath9k_htc_cabq_setup(priv);
+	if (priv->cabq == -1) {
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to setup CAB xmit queue\n");
+		goto err;
+	}
+
+	if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to setup xmit queue for BE traffic\n");
 		goto err;
 	}
 
-	if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) {
+	if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to setup xmit queue for BK traffic\n");
 		goto err;
 	}
-	if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) {
+	if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to setup xmit queue for VI traffic\n");
 		goto err;
 	}
-	if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) {
+	if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to setup xmit queue for VO traffic\n");
 		goto err;
@@ -468,36 +564,6 @@
 	 */
 	for (i = 0; i < common->keymax; i++)
 		ath9k_hw_keyreset(priv->ah, (u16) i);
-
-	if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
-				   ATH9K_CIPHER_TKIP, NULL)) {
-		/*
-		 * Whether we should enable h/w TKIP MIC.
-		 * XXX: if we don't support WME TKIP MIC, then we wouldn't
-		 * report WMM capable, so it's always safe to turn on
-		 * TKIP MIC in this case.
-		 */
-		ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
-	}
-
-	/*
-	 * Check whether the separate key cache entries
-	 * are required to handle both tx+rx MIC keys.
-	 * With split mic keys the number of stations is limited
-	 * to 27 otherwise 59.
-	 */
-	if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
-				   ATH9K_CIPHER_TKIP, NULL)
-	    && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
-				      ATH9K_CIPHER_MIC, NULL)
-	    && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
-				      0, NULL))
-		common->splitmic = 1;
-
-	/* turn on mcast key search if possible */
-	if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
-		(void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
-					     1, 1, NULL);
 }
 
 static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
@@ -512,6 +578,17 @@
 		priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
 			ARRAY_SIZE(ath9k_legacy_rates);
 	}
+
+	if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes)) {
+		priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels;
+		priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+		priv->sbands[IEEE80211_BAND_5GHZ].n_channels =
+			ARRAY_SIZE(ath9k_5ghz_channels);
+		priv->sbands[IEEE80211_BAND_5GHZ].bitrates =
+			ath9k_legacy_rates + 4;
+		priv->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+			ARRAY_SIZE(ath9k_legacy_rates) - 4;
+	}
 }
 
 static void ath9k_init_misc(struct ath9k_htc_priv *priv)
@@ -524,7 +601,6 @@
 	if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
 		memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
 
-	priv->op_flags |= OP_TXAGGR;
 	priv->ah->opmode = NL80211_IFTYPE_STATION;
 }
 
@@ -556,14 +632,12 @@
 	spin_lock_init(&priv->beacon_lock);
 	spin_lock_init(&priv->tx_lock);
 	mutex_init(&priv->mutex);
-	mutex_init(&priv->aggr_work.mutex);
 	mutex_init(&priv->htc_pm_lock);
 	tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
 		     (unsigned long)priv);
 	tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
 		     (unsigned long)priv);
 	tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
-	INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
 	INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
 	INIT_WORK(&priv->ps_work, ath9k_ps_work);
 
@@ -643,11 +717,17 @@
 	if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
 			&priv->sbands[IEEE80211_BAND_2GHZ];
+	if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes))
+		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+			&priv->sbands[IEEE80211_BAND_5GHZ];
 
 	if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
 		if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
 			setup_ht_cap(priv,
 				     &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+		if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes))
+			setup_ht_cap(priv,
+				     &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
 	}
 
 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
@@ -747,7 +827,7 @@
 		goto err_free;
 	}
 
-	ret = ath9k_init_htc_services(priv);
+	ret = ath9k_init_htc_services(priv, devid);
 	if (ret)
 		goto err_init;
 
@@ -790,7 +870,8 @@
 	if (ret)
 		return ret;
 
-	ret = ath9k_init_htc_services(htc_handle->drv_priv);
+	ret = ath9k_init_htc_services(htc_handle->drv_priv,
+			      htc_handle->drv_priv->ah->hw_version.devid);
 	return ret;
 }
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9d371c1..e38ca66 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -27,13 +27,11 @@
 static void ath_update_txpow(struct ath9k_htc_priv *priv)
 {
 	struct ath_hw *ah = priv->ah;
-	u32 txpow;
 
 	if (priv->curtxpow != priv->txpowlimit) {
 		ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
 		/* read back in case value is clamped */
-		ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
-		priv->curtxpow = txpow;
+		priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
 	}
 }
 
@@ -325,142 +323,128 @@
 	tcap.flags_ext = 0x80601000;
 	tcap.ampdu_limit = 0xffff0000;
 	tcap.ampdu_subframes = 20;
-	tcap.tx_chainmask_legacy = 1;
+	tcap.tx_chainmask_legacy = priv->ah->caps.tx_chainmask;
 	tcap.protmode = 1;
-	tcap.tx_chainmask = 1;
+	tcap.tx_chainmask = priv->ah->caps.tx_chainmask;
 
 	WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
 
 	return ret;
 }
 
-static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
-				 struct ieee80211_vif *vif,
-				 struct ieee80211_sta *sta)
+static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
+				 struct ieee80211_sta *sta,
+				 struct ath9k_htc_target_rate *trate)
 {
-	struct ath_common *common = ath9k_hw_common(priv->ah);
 	struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
 	struct ieee80211_supported_band *sband;
-	struct ath9k_htc_target_rate trate;
 	u32 caps = 0;
-	u8 cmd_rsp;
-	int i, j, ret;
+	int i, j;
 
-	memset(&trate, 0, sizeof(trate));
-
-	/* Only 2GHz is supported */
-	sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+	sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band];
 
 	for (i = 0, j = 0; i < sband->n_bitrates; i++) {
 		if (sta->supp_rates[sband->band] & BIT(i)) {
-			priv->tgt_rate.rates.legacy_rates.rs_rates[j]
+			trate->rates.legacy_rates.rs_rates[j]
 				= (sband->bitrates[i].bitrate * 2) / 10;
 			j++;
 		}
 	}
-	priv->tgt_rate.rates.legacy_rates.rs_nrates = j;
+	trate->rates.legacy_rates.rs_nrates = j;
 
 	if (sta->ht_cap.ht_supported) {
 		for (i = 0, j = 0; i < 77; i++) {
 			if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
-				priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i;
+				trate->rates.ht_rates.rs_rates[j++] = i;
 			if (j == ATH_HTC_RATE_MAX)
 				break;
 		}
-		priv->tgt_rate.rates.ht_rates.rs_nrates = j;
+		trate->rates.ht_rates.rs_nrates = j;
 
 		caps = WLAN_RC_HT_FLAG;
+		if (sta->ht_cap.mcs.rx_mask[1])
+			caps |= WLAN_RC_DS_FLAG;
 		if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
 			caps |= WLAN_RC_40_FLAG;
-		if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+		if (conf_is_ht40(&priv->hw->conf) &&
+		    (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
 			caps |= WLAN_RC_SGI_FLAG;
-
+		else if (conf_is_ht20(&priv->hw->conf) &&
+			 (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20))
+			caps |= WLAN_RC_SGI_FLAG;
 	}
 
-	priv->tgt_rate.sta_index = ista->index;
-	priv->tgt_rate.isnew = 1;
-	trate = priv->tgt_rate;
-	priv->tgt_rate.capflags = cpu_to_be32(caps);
-	trate.capflags = cpu_to_be32(caps);
+	trate->sta_index = ista->index;
+	trate->isnew = 1;
+	trate->capflags = cpu_to_be32(caps);
+}
 
-	WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+static int ath9k_htc_send_rate_cmd(struct ath9k_htc_priv *priv,
+				    struct ath9k_htc_target_rate *trate)
+{
+	struct ath_common *common = ath9k_hw_common(priv->ah);
+	int ret;
+	u8 cmd_rsp;
+
+	WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, trate);
 	if (ret) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to initialize Rate information on target\n");
-		return ret;
 	}
 
-	ath_print(common, ATH_DBG_CONFIG,
-		  "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps);
-	return 0;
+	return ret;
 }
 
-static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40)
+static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
+				struct ieee80211_sta *sta)
 {
-	struct ath9k_htc_priv *priv = hw->priv;
-	struct ieee80211_conf *conf = &hw->conf;
-
-	if (!conf_is_ht(conf))
-		return false;
-
-	if (!(priv->op_flags & OP_ASSOCIATED) ||
-	    (priv->op_flags & OP_SCANNING))
-		return false;
-
-	if (conf_is_ht40(conf)) {
-		if (priv->ah->curchan->chanmode &
-			(CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) {
-			return false;
-		} else {
-			*cw40 = true;
-			return true;
-		}
-	} else {  /* ht20 */
-		if (priv->ah->curchan->chanmode & CHANNEL_HT20)
-			return false;
-		else
-			return true;
-	}
-}
-
-static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
-{
-	struct ath9k_htc_target_rate trate;
 	struct ath_common *common = ath9k_hw_common(priv->ah);
+	struct ath9k_htc_target_rate trate;
 	int ret;
-	u32 caps = be32_to_cpu(priv->tgt_rate.capflags);
-	u8 cmd_rsp;
 
-	memset(&trate, 0, sizeof(trate));
+	memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+	ath9k_htc_setup_rate(priv, sta, &trate);
+	ret = ath9k_htc_send_rate_cmd(priv, &trate);
+	if (!ret)
+		ath_print(common, ATH_DBG_CONFIG,
+			  "Updated target sta: %pM, rate caps: 0x%X\n",
+			  sta->addr, be32_to_cpu(trate.capflags));
+}
 
-	trate = priv->tgt_rate;
+static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
+				  struct ieee80211_vif *vif,
+				  struct ieee80211_bss_conf *bss_conf)
+{
+	struct ath_common *common = ath9k_hw_common(priv->ah);
+	struct ath9k_htc_target_rate trate;
+	struct ieee80211_sta *sta;
+	int ret;
 
-	if (is_cw40)
-		caps |= WLAN_RC_40_FLAG;
-	else
-		caps &= ~WLAN_RC_40_FLAG;
+	memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
 
-	priv->tgt_rate.capflags = cpu_to_be32(caps);
-	trate.capflags = cpu_to_be32(caps);
-
-	WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
-	if (ret) {
-		ath_print(common, ATH_DBG_FATAL,
-			  "Unable to update Rate information on target\n");
+	rcu_read_lock();
+	sta = ieee80211_find_sta(vif, bss_conf->bssid);
+	if (!sta) {
+		rcu_read_unlock();
 		return;
 	}
+	ath9k_htc_setup_rate(priv, sta, &trate);
+	rcu_read_unlock();
 
-	ath_print(common, ATH_DBG_CONFIG, "Rate control updated with "
-		  "caps:0x%x on target\n", priv->tgt_rate.capflags);
+	ret = ath9k_htc_send_rate_cmd(priv, &trate);
+	if (!ret)
+		ath_print(common, ATH_DBG_CONFIG,
+			  "Updated target sta: %pM, rate caps: 0x%X\n",
+			  bss_conf->bssid, be32_to_cpu(trate.capflags));
 }
 
-static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
-			       struct ieee80211_vif *vif,
-			       u8 *sta_addr, u8 tid, bool oper)
+int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
+			   struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta,
+			   enum ieee80211_ampdu_mlme_action action, u16 tid)
 {
 	struct ath_common *common = ath9k_hw_common(priv->ah);
 	struct ath9k_htc_target_aggr aggr;
-	struct ieee80211_sta *sta = NULL;
 	struct ath9k_htc_sta *ista;
 	int ret = 0;
 	u8 cmd_rsp;
@@ -469,74 +453,30 @@
 		return -EINVAL;
 
 	memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
-
-	rcu_read_lock();
-
-	/* Check if we are able to retrieve the station */
-	sta = ieee80211_find_sta(vif, sta_addr);
-	if (!sta) {
-		rcu_read_unlock();
-		return -EINVAL;
-	}
-
 	ista = (struct ath9k_htc_sta *) sta->drv_priv;
 
-	if (oper)
-		ista->tid_state[tid] = AGGR_START;
-	else
-		ista->tid_state[tid] = AGGR_STOP;
-
 	aggr.sta_index = ista->index;
-
-	rcu_read_unlock();
-
-	aggr.tidno = tid;
-	aggr.aggr_enable = oper;
+	aggr.tidno = tid & 0xf;
+	aggr.aggr_enable = (action == IEEE80211_AMPDU_TX_START) ? true : false;
 
 	WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
 	if (ret)
 		ath_print(common, ATH_DBG_CONFIG,
 			  "Unable to %s TX aggregation for (%pM, %d)\n",
-			  (oper) ? "start" : "stop", sta->addr, tid);
+			  (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
 	else
 		ath_print(common, ATH_DBG_CONFIG,
-			  "%s aggregation for (%pM, %d)\n",
-			  (oper) ? "Starting" : "Stopping", sta->addr, tid);
+			  "%s TX aggregation for (%pM, %d)\n",
+			  (aggr.aggr_enable) ? "Starting" : "Stopping",
+			  sta->addr, tid);
+
+	spin_lock_bh(&priv->tx_lock);
+	ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP;
+	spin_unlock_bh(&priv->tx_lock);
 
 	return ret;
 }
 
-void ath9k_htc_aggr_work(struct work_struct *work)
-{
-	int ret = 0;
-	struct ath9k_htc_priv *priv =
-		container_of(work, struct ath9k_htc_priv,
-			     ath9k_aggr_work.work);
-	struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
-
-	mutex_lock(&wk->mutex);
-
-	switch (wk->action) {
-	case IEEE80211_AMPDU_TX_START:
-		ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
-					  wk->tid, true);
-		if (!ret)
-			ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
-						 wk->tid);
-		break;
-	case IEEE80211_AMPDU_TX_STOP:
-		ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
-				    wk->tid, false);
-		ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
-		break;
-	default:
-		ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
-			  "Unknown AMPDU action\n");
-	}
-
-	mutex_unlock(&wk->mutex);
-}
-
 /*********/
 /* DEBUG */
 /*********/
@@ -617,6 +557,19 @@
 			"%20s : %10u\n", "SKBs dropped",
 			priv->debug.tx_stats.skb_dropped);
 
+	len += snprintf(buf + len, sizeof(buf) - len,
+			"%20s : %10u\n", "BE queued",
+			priv->debug.tx_stats.queue_stats[WME_AC_BE]);
+	len += snprintf(buf + len, sizeof(buf) - len,
+			"%20s : %10u\n", "BK queued",
+			priv->debug.tx_stats.queue_stats[WME_AC_BK]);
+	len += snprintf(buf + len, sizeof(buf) - len,
+			"%20s : %10u\n", "VI queued",
+			priv->debug.tx_stats.queue_stats[WME_AC_VI]);
+	len += snprintf(buf + len, sizeof(buf) - len,
+			"%20s : %10u\n", "VO queued",
+			priv->debug.tx_stats.queue_stats[WME_AC_VO]);
+
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
@@ -978,6 +931,8 @@
 		priv->ah->led_pin = ATH_LED_PIN_9287;
 	else if (AR_SREV_9271(priv->ah))
 		priv->ah->led_pin = ATH_LED_PIN_9271;
+	else if (AR_DEVID_7010(priv->ah))
+		priv->ah->led_pin = ATH_LED_PIN_7010;
 	else
 		priv->ah->led_pin = ATH_LED_PIN_DEF;
 
@@ -1054,6 +1009,95 @@
 		wiphy_rfkill_start_polling(priv->hw->wiphy);
 }
 
+static void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
+{
+	struct ath9k_htc_priv *priv = hw->priv;
+	struct ath_hw *ah = priv->ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	int ret;
+	u8 cmd_rsp;
+
+	if (!ah->curchan)
+		ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+	/* Reset the HW */
+	ret = ath9k_hw_reset(ah, ah->curchan, false);
+	if (ret) {
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to reset hardware; reset status %d "
+			  "(freq %u MHz)\n", ret, ah->curchan->channel);
+	}
+
+	ath_update_txpow(priv);
+
+	/* Start RX */
+	WMI_CMD(WMI_START_RECV_CMDID);
+	ath9k_host_rx_init(priv);
+
+	/* Start TX */
+	htc_start(priv->htc);
+	spin_lock_bh(&priv->tx_lock);
+	priv->tx_queues_stop = false;
+	spin_unlock_bh(&priv->tx_lock);
+	ieee80211_wake_queues(hw);
+
+	WMI_CMD(WMI_ENABLE_INTR_CMDID);
+
+	/* Enable LED */
+	ath9k_hw_cfg_output(ah, ah->led_pin,
+			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+	ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+}
+
+static void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
+{
+	struct ath9k_htc_priv *priv = hw->priv;
+	struct ath_hw *ah = priv->ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	int ret;
+	u8 cmd_rsp;
+
+	ath9k_htc_ps_wakeup(priv);
+
+	/* Disable LED */
+	ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+	ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+
+	WMI_CMD(WMI_DISABLE_INTR_CMDID);
+
+	/* Stop TX */
+	ieee80211_stop_queues(hw);
+	htc_stop(priv->htc);
+	WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+	skb_queue_purge(&priv->tx_queue);
+
+	/* Stop RX */
+	WMI_CMD(WMI_STOP_RECV_CMDID);
+
+	/*
+	 * The MIB counters have to be disabled here,
+	 * since the target doesn't do it.
+	 */
+	ath9k_hw_disable_mib_counters(ah);
+
+	if (!ah->curchan)
+		ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+	/* Reset the HW */
+	ret = ath9k_hw_reset(ah, ah->curchan, false);
+	if (ret) {
+		ath_print(common, ATH_DBG_FATAL,
+			  "Unable to reset hardware; reset status %d "
+			  "(freq %u MHz)\n", ret, ah->curchan->channel);
+	}
+
+	/* Disable the PHY */
+	ath9k_hw_phy_disable(ah);
+
+	ath9k_htc_ps_restore(priv);
+	ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+}
+
 /**********************/
 /* mac80211 Callbacks */
 /**********************/
@@ -1099,7 +1143,7 @@
 	return 0;
 }
 
-static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
+static int ath9k_htc_start(struct ieee80211_hw *hw)
 {
 	struct ath9k_htc_priv *priv = hw->priv;
 	struct ath_hw *ah = priv->ah;
@@ -1111,10 +1155,16 @@
 	__be16 htc_mode;
 	u8 cmd_rsp;
 
+	mutex_lock(&priv->mutex);
+
 	ath_print(common, ATH_DBG_CONFIG,
 		  "Starting driver with initial channel: %d MHz\n",
 		  curchan->center_freq);
 
+	/* Ensure that HW is awake before flushing RX */
+	ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+	WMI_CMD(WMI_FLUSH_RECV_CMDID);
+
 	/* setup initial channel */
 	init_channel = ath9k_cmn_get_curchannel(hw, ah);
 
@@ -1127,6 +1177,7 @@
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to reset hardware; reset status %d "
 			  "(freq %u MHz)\n", ret, curchan->center_freq);
+		mutex_unlock(&priv->mutex);
 		return ret;
 	}
 
@@ -1147,31 +1198,14 @@
 	priv->tx_queues_stop = false;
 	spin_unlock_bh(&priv->tx_lock);
 
-	if (led) {
-		/* Enable LED */
-		ath9k_hw_cfg_output(ah, ah->led_pin,
-				    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-		ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-	}
-
 	ieee80211_wake_queues(hw);
 
-	return ret;
-}
-
-static int ath9k_htc_start(struct ieee80211_hw *hw)
-{
-	struct ath9k_htc_priv *priv = hw->priv;
-	int ret = 0;
-
-	mutex_lock(&priv->mutex);
-	ret = ath9k_htc_radio_enable(hw, false);
 	mutex_unlock(&priv->mutex);
 
 	return ret;
 }
 
-static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
+static void ath9k_htc_stop(struct ieee80211_hw *hw)
 {
 	struct ath9k_htc_priv *priv = hw->priv;
 	struct ath_hw *ah = priv->ah;
@@ -1179,21 +1213,17 @@
 	int ret = 0;
 	u8 cmd_rsp;
 
+	mutex_lock(&priv->mutex);
+
 	if (priv->op_flags & OP_INVALID) {
 		ath_print(common, ATH_DBG_ANY, "Device not present\n");
+		mutex_unlock(&priv->mutex);
 		return;
 	}
 
-	if (led) {
-		/* Disable LED */
-		ath9k_hw_set_gpio(ah, ah->led_pin, 1);
-		ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
-	}
-
 	/* Cancel all the running timers/work .. */
 	cancel_work_sync(&priv->ps_work);
 	cancel_delayed_work_sync(&priv->ath9k_ani_work);
-	cancel_delayed_work_sync(&priv->ath9k_aggr_work);
 	cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
 	ath9k_led_stop_brightness(priv);
 
@@ -1202,12 +1232,6 @@
 	WMI_CMD(WMI_DISABLE_INTR_CMDID);
 	WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
 	WMI_CMD(WMI_STOP_RECV_CMDID);
-	ath9k_hw_phy_disable(ah);
-	ath9k_hw_disable(ah);
-	ath9k_hw_configpcipowersave(ah, 1, 1);
-	ath9k_htc_ps_restore(priv);
-	ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
-
 	skb_queue_purge(&priv->tx_queue);
 
 	/* Remove monitor interface here */
@@ -1220,21 +1244,18 @@
 				  "Monitor interface removed\n");
 	}
 
+	ath9k_hw_phy_disable(ah);
+	ath9k_hw_disable(ah);
+	ath9k_hw_configpcipowersave(ah, 1, 1);
+	ath9k_htc_ps_restore(priv);
+	ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+
 	priv->op_flags |= OP_INVALID;
 
 	ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
-}
-
-static void ath9k_htc_stop(struct ieee80211_hw *hw)
-{
-	struct ath9k_htc_priv *priv = hw->priv;
-
-	mutex_lock(&priv->mutex);
-	ath9k_htc_radio_disable(hw, false);
 	mutex_unlock(&priv->mutex);
 }
 
-
 static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
 				   struct ieee80211_vif *vif)
 {
@@ -1302,6 +1323,7 @@
 out:
 	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
+
 	return ret;
 }
 
@@ -1318,6 +1340,7 @@
 	ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
 
 	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
 
 	memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
 	memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
@@ -1328,6 +1351,7 @@
 	ath9k_htc_remove_station(priv, vif, NULL);
 	priv->vif = NULL;
 
+	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -1343,30 +1367,27 @@
 		bool enable_radio = false;
 		bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
 
+		mutex_lock(&priv->htc_pm_lock);
 		if (!idle && priv->ps_idle)
 			enable_radio = true;
-
 		priv->ps_idle = idle;
+		mutex_unlock(&priv->htc_pm_lock);
 
 		if (enable_radio) {
-			ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
-			ath9k_htc_radio_enable(hw, true);
 			ath_print(common, ATH_DBG_CONFIG,
 				  "not-idle: enabling radio\n");
+			ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+			ath9k_htc_radio_enable(hw);
 		}
 	}
 
 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
 		struct ieee80211_channel *curchan = hw->conf.channel;
 		int pos = curchan->hw_value;
-		bool is_cw40 = false;
 
 		ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
 			  curchan->center_freq);
 
-		if (check_rc_update(hw, &is_cw40))
-			ath9k_htc_rc_update(priv, is_cw40);
-
 		ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
 
 		if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
@@ -1399,14 +1420,21 @@
 		}
 	}
 
-	if (priv->ps_idle) {
+	if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+		mutex_lock(&priv->htc_pm_lock);
+		if (!priv->ps_idle) {
+			mutex_unlock(&priv->htc_pm_lock);
+			goto out;
+		}
+		mutex_unlock(&priv->htc_pm_lock);
+
 		ath_print(common, ATH_DBG_CONFIG,
 			  "idle: disabling radio\n");
-		ath9k_htc_radio_disable(hw, true);
+		ath9k_htc_radio_disable(hw);
 	}
 
+out:
 	mutex_unlock(&priv->mutex);
-
 	return 0;
 }
 
@@ -1428,8 +1456,8 @@
 	u32 rfilt;
 
 	mutex_lock(&priv->mutex);
-
 	ath9k_htc_ps_wakeup(priv);
+
 	changed_flags &= SUPPORTED_FILTERS;
 	*total_flags &= SUPPORTED_FILTERS;
 
@@ -1444,30 +1472,38 @@
 	mutex_unlock(&priv->mutex);
 }
 
-static void ath9k_htc_sta_notify(struct ieee80211_hw *hw,
-				 struct ieee80211_vif *vif,
-				 enum sta_notify_cmd cmd,
-				 struct ieee80211_sta *sta)
+static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta)
 {
 	struct ath9k_htc_priv *priv = hw->priv;
 	int ret;
 
 	mutex_lock(&priv->mutex);
-
-	switch (cmd) {
-	case STA_NOTIFY_ADD:
-		ret = ath9k_htc_add_station(priv, vif, sta);
-		if (!ret)
-			ath9k_htc_init_rate(priv, vif, sta);
-		break;
-	case STA_NOTIFY_REMOVE:
-		ath9k_htc_remove_station(priv, vif, sta);
-		break;
-	default:
-		break;
-	}
-
+	ath9k_htc_ps_wakeup(priv);
+	ret = ath9k_htc_add_station(priv, vif, sta);
+	if (!ret)
+		ath9k_htc_init_rate(priv, sta);
+	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
+
+	return ret;
+}
+
+static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta)
+{
+	struct ath9k_htc_priv *priv = hw->priv;
+	int ret;
+
+	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
+	ret = ath9k_htc_remove_station(priv, vif, sta);
+	ath9k_htc_ps_restore(priv);
+	mutex_unlock(&priv->mutex);
+
+	return ret;
 }
 
 static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -1482,6 +1518,7 @@
 		return 0;
 
 	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
 
 	memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
 
@@ -1499,9 +1536,16 @@
 		  params->cw_max, params->txop);
 
 	ret = ath_htc_txq_update(priv, qnum, &qi);
-	if (ret)
+	if (ret) {
 		ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
+		goto out;
+	}
 
+	if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) &&
+	    (qnum == priv->hwq_map[WME_AC_BE]))
+		    ath9k_htc_beaconq_config(priv);
+out:
+	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
 
 	return ret;
@@ -1574,7 +1618,6 @@
 			ath_start_ani(priv);
 		} else {
 			priv->op_flags &= ~OP_ASSOCIATED;
-			cancel_work_sync(&priv->ps_work);
 			cancel_delayed_work_sync(&priv->ath9k_ani_work);
 		}
 	}
@@ -1631,6 +1674,9 @@
 		ath9k_hw_init_global_settings(ah);
 	}
 
+	if (changed & BSS_CHANGED_HT)
+		ath9k_htc_update_rate(priv, vif, bss_conf);
+
 	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
 }
@@ -1641,7 +1687,9 @@
 	u64 tsf;
 
 	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
 	tsf = ath9k_hw_gettsf64(priv->ah);
+	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
 
 	return tsf;
@@ -1652,7 +1700,9 @@
 	struct ath9k_htc_priv *priv = hw->priv;
 
 	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
 	ath9k_hw_settsf64(priv->ah, tsf);
+	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -1660,11 +1710,11 @@
 {
 	struct ath9k_htc_priv *priv = hw->priv;
 
-	ath9k_htc_ps_wakeup(priv);
 	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
 	ath9k_hw_reset_tsf(priv->ah);
-	mutex_unlock(&priv->mutex);
 	ath9k_htc_ps_restore(priv);
+	mutex_unlock(&priv->mutex);
 }
 
 static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
@@ -1674,8 +1724,8 @@
 				  u16 tid, u16 *ssn)
 {
 	struct ath9k_htc_priv *priv = hw->priv;
-	struct ath9k_htc_aggr_work *work = &priv->aggr_work;
 	struct ath9k_htc_sta *ista;
+	int ret = 0;
 
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
@@ -1683,26 +1733,26 @@
 	case IEEE80211_AMPDU_RX_STOP:
 		break;
 	case IEEE80211_AMPDU_TX_START:
+		ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
+		if (!ret)
+			ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
 	case IEEE80211_AMPDU_TX_STOP:
-		if (!(priv->op_flags & OP_TXAGGR))
-			return -ENOTSUPP;
-		memcpy(work->sta_addr, sta->addr, ETH_ALEN);
-		work->hw = hw;
-		work->vif = vif;
-		work->action = action;
-		work->tid = tid;
-		ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
+		ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		break;
 	case IEEE80211_AMPDU_TX_OPERATIONAL:
 		ista = (struct ath9k_htc_sta *) sta->drv_priv;
+		spin_lock_bh(&priv->tx_lock);
 		ista->tid_state[tid] = AGGR_OPERATIONAL;
+		spin_unlock_bh(&priv->tx_lock);
 		break;
 	default:
 		ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
 			  "Unknown AMPDU action\n");
 	}
 
-	return 0;
+	return ret;
 }
 
 static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
@@ -1722,8 +1772,8 @@
 {
 	struct ath9k_htc_priv *priv = hw->priv;
 
-	ath9k_htc_ps_wakeup(priv);
 	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
 	spin_lock_bh(&priv->beacon_lock);
 	priv->op_flags &= ~OP_SCANNING;
 	spin_unlock_bh(&priv->beacon_lock);
@@ -1731,8 +1781,8 @@
 	if (priv->op_flags & OP_ASSOCIATED)
 		ath9k_htc_beacon_config(priv, priv->vif);
 	ath_start_ani(priv);
-	mutex_unlock(&priv->mutex);
 	ath9k_htc_ps_restore(priv);
+	mutex_unlock(&priv->mutex);
 }
 
 static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
@@ -1746,8 +1796,10 @@
 	struct ath9k_htc_priv *priv = hw->priv;
 
 	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
 	priv->ah->coverage_class = coverage_class;
 	ath9k_hw_init_global_settings(priv->ah);
+	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -1759,7 +1811,8 @@
 	.remove_interface   = ath9k_htc_remove_interface,
 	.config             = ath9k_htc_config,
 	.configure_filter   = ath9k_htc_configure_filter,
-	.sta_notify         = ath9k_htc_sta_notify,
+	.sta_add            = ath9k_htc_sta_add,
+	.sta_remove         = ath9k_htc_sta_remove,
 	.conf_tx            = ath9k_htc_conf_tx,
 	.bss_info_changed   = ath9k_htc_bss_info_changed,
 	.set_key            = ath9k_htc_set_key,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2571b44..bd0b4ac 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -20,19 +20,29 @@
 /* TX */
 /******/
 
+#define ATH9K_HTC_INIT_TXQ(subtype) do {			\
+		qi.tqi_subtype = subtype;			\
+		qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;		\
+		qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;		\
+		qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;		\
+		qi.tqi_physCompBuf = 0;				\
+		qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |	\
+			TXQ_FLAG_TXDESCINT_ENABLE;		\
+	} while (0)
+
 int get_hw_qnum(u16 queue, int *hwq_map)
 {
 	switch (queue) {
 	case 0:
-		return hwq_map[ATH9K_WME_AC_VO];
+		return hwq_map[WME_AC_VO];
 	case 1:
-		return hwq_map[ATH9K_WME_AC_VI];
+		return hwq_map[WME_AC_VI];
 	case 2:
-		return hwq_map[ATH9K_WME_AC_BE];
+		return hwq_map[WME_AC_BE];
 	case 3:
-		return hwq_map[ATH9K_WME_AC_BK];
+		return hwq_map[WME_AC_BK];
 	default:
-		return hwq_map[ATH9K_WME_AC_BE];
+		return hwq_map[WME_AC_BE];
 	}
 }
 
@@ -71,7 +81,7 @@
 	struct ath9k_htc_vif *avp;
 	struct ath9k_htc_tx_ctl tx_ctl;
 	enum htc_endpoint_id epid;
-	u16 qnum, hw_qnum;
+	u16 qnum;
 	__le16 fc;
 	u8 *tx_fhdr;
 	u8 sta_idx;
@@ -131,20 +141,23 @@
 		memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
 
 		qnum = skb_get_queue_mapping(skb);
-		hw_qnum = get_hw_qnum(qnum, priv->hwq_map);
 
-		switch (hw_qnum) {
+		switch (qnum) {
 		case 0:
-			epid = priv->data_be_ep;
-			break;
-		case 2:
-			epid = priv->data_vi_ep;
-			break;
-		case 3:
+			TX_QSTAT_INC(WME_AC_VO);
 			epid = priv->data_vo_ep;
 			break;
 		case 1:
+			TX_QSTAT_INC(WME_AC_VI);
+			epid = priv->data_vi_ep;
+			break;
+		case 2:
+			TX_QSTAT_INC(WME_AC_BE);
+			epid = priv->data_be_ep;
+			break;
+		case 3:
 		default:
+			TX_QSTAT_INC(WME_AC_BK);
 			epid = priv->data_bk_ep;
 			break;
 		}
@@ -174,6 +187,19 @@
 	return htc_send(priv->htc, skb, epid, &tx_ctl);
 }
 
+static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
+				    struct ath9k_htc_sta *ista, u8 tid)
+{
+	bool ret = false;
+
+	spin_lock_bh(&priv->tx_lock);
+	if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP))
+		ret = true;
+	spin_unlock_bh(&priv->tx_lock);
+
+	return ret;
+}
+
 void ath9k_tx_tasklet(unsigned long data)
 {
 	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
@@ -203,8 +229,7 @@
 		/* Check if we need to start aggregation */
 
 		if (sta && conf_is_ht(&priv->hw->conf) &&
-		    (priv->op_flags & OP_TXAGGR)
-		    && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+		    !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
 			if (ieee80211_is_data_qos(fc)) {
 				u8 *qc, tid;
 				struct ath9k_htc_sta *ista;
@@ -213,10 +238,11 @@
 				tid = qc[0] & 0xf;
 				ista = (struct ath9k_htc_sta *)sta->drv_priv;
 
-				if ((tid < ATH9K_HTC_MAX_TID) &&
-				    ista->tid_state[tid] == AGGR_STOP) {
+				if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
 					ieee80211_start_tx_ba_session(sta, tid);
+					spin_lock_bh(&priv->tx_lock);
 					ista->tid_state[tid] = AGGR_PROGRESS;
+					spin_unlock_bh(&priv->tx_lock);
 				}
 			}
 		}
@@ -284,8 +310,7 @@
 
 }
 
-bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
-			 enum ath9k_tx_queue_subtype subtype)
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype)
 {
 	struct ath_hw *ah = priv->ah;
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -293,13 +318,7 @@
 	int qnum;
 
 	memset(&qi, 0, sizeof(qi));
-
-	qi.tqi_subtype = subtype;
-	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
-	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
-	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
-	qi.tqi_physCompBuf = 0;
-	qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE;
+	ATH9K_HTC_INIT_TXQ(subtype);
 
 	qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
 	if (qnum == -1)
@@ -317,6 +336,16 @@
 	return true;
 }
 
+int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv)
+{
+	struct ath9k_tx_queue_info qi;
+
+	memset(&qi, 0, sizeof(qi));
+	ATH9K_HTC_INIT_TXQ(0);
+
+	return ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_CAB, &qi);
+}
+
 /******/
 /* RX */
 /******/
@@ -387,9 +416,6 @@
 	/* configure operational mode */
 	ath9k_hw_setopmode(ah);
 
-	/* Handle any link-level address change. */
-	ath9k_hw_setmac(ah, common->macaddr);
-
 	/* calculate and install multicast filter */
 	mfilt[0] = mfilt[1] = ~0;
 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -399,7 +425,7 @@
 {
 	ath9k_hw_rxena(priv->ah);
 	ath9k_htc_opmode_init(priv);
-	ath9k_hw_startpcureceive(priv->ah);
+	ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING));
 	priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 064397f..705c0f3 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -89,7 +89,6 @@
 	struct htc_endpoint *endpoint;
 	struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
 
-	target->credits = be16_to_cpu(htc_ready_msg->credits);
 	target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
 
 	endpoint = &target->endpoint[ENDPOINT0];
@@ -159,7 +158,7 @@
 
 	cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
 	cp_msg->pipe_id = USB_WLAN_TX_PIPE;
-	cp_msg->credits = 28;
+	cp_msg->credits = target->credits;
 
 	target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
 
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 624422a..381da6c 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -128,6 +128,17 @@
 	ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
 }
 
+static inline void ath9k_hw_procmibevent(struct ath_hw *ah)
+{
+	ath9k_hw_ops(ah)->ani_proc_mib_event(ah);
+}
+
+static inline void ath9k_hw_ani_monitor(struct ath_hw *ah,
+					struct ath9k_channel *chan)
+{
+	ath9k_hw_ops(ah)->ani_monitor(ah, chan);
+}
+
 /* Private hardware call ops */
 
 /* PHY ops */
@@ -277,4 +288,9 @@
 	return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
 }
 
+static inline void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
+{
+	ath9k_hw_private_ops(ah)->ani_reset(ah, is_scanning);
+}
+
 #endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c33f17d..3ed5c9e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,11 +23,6 @@
 #include "rc.h"
 #include "ar9003_mac.h"
 
-#define ATH9K_CLOCK_RATE_CCK		22
-#define ATH9K_CLOCK_RATE_5GHZ_OFDM	40
-#define ATH9K_CLOCK_RATE_2GHZ_OFDM	44
-#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
-
 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
 
 MODULE_AUTHOR("Atheros Communications");
@@ -80,6 +75,15 @@
 	ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
 }
 
+static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+	/* You will not have this callback if using the old ANI */
+	if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
+		return;
+
+	ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
+}
+
 /********************/
 /* Helper Functions */
 /********************/
@@ -371,13 +375,7 @@
 	ah->config.ofdm_trig_high = 500;
 	ah->config.cck_trig_high = 200;
 	ah->config.cck_trig_low = 100;
-
-	/*
-	 * For now ANI is disabled for AR9003, it is still
-	 * being tested.
-	 */
-	if (!AR_SREV_9300_20_OR_LATER(ah))
-		ah->config.enable_ani = 1;
+	ah->config.enable_ani = true;
 
 	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
 		ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -390,12 +388,7 @@
 		ah->config.ht_enable = 0;
 
 	ah->config.rx_intr_mitigation = true;
-
-	/*
-	 * Tx IQ Calibration (ah->config.tx_iq_calibration) is only
-	 * used by AR9003, but it is showing reliability issues.
-	 * It will take a while to fix so this is currently disabled.
-	 */
+	ah->config.pcieSerDesWrite = true;
 
 	/*
 	 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -433,7 +426,9 @@
 		ah->ah_flags = AH_USE_EEPROM;
 
 	ah->atim_window = 0;
-	ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
+	ah->sta_id1_defaults =
+		AR_STA_ID1_CRPT_MIC_ENABLE |
+		AR_STA_ID1_MCAST_KSRCH;
 	ah->beacon_interval = 100;
 	ah->enable_32kHz_clock = DONT_USE_32KHZ;
 	ah->slottime = (u32) -1;
@@ -571,28 +566,19 @@
 	ah->ani_function = ATH9K_ANI_ALL;
 	if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
 		ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
+	if (!AR_SREV_9300_20_OR_LATER(ah))
+		ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
 
 	ath9k_hw_init_mode_regs(ah);
 
 	/*
-	 * Configire PCIE after Ini init. SERDES values now come from ini file
-	 * This enables PCIe low power mode.
+	 * Read back AR_WA into a permanent copy and set bits 14 and 17.
+	 * We need to do this to avoid RMW of this register. We cannot
+	 * read the reg when chip is asleep.
 	 */
-	if (AR_SREV_9300_20_OR_LATER(ah)) {
-		u32 regval;
-		unsigned int i;
-
-		/* Set Bits 16 and 17 in the AR_WA register. */
-		regval = REG_READ(ah, AR_WA);
-		regval |= 0x00030000;
-		REG_WRITE(ah, AR_WA, regval);
-
-		for (i = 0; i < ah->iniPcieSerdesLowPower.ia_rows; i++) {
-			REG_WRITE(ah,
-				  INI_RA(&ah->iniPcieSerdesLowPower, i, 0),
-				  INI_RA(&ah->iniPcieSerdesLowPower, i, 1));
-		}
-	}
+	ah->WARegVal = REG_READ(ah, AR_WA);
+	ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+			 AR_WA_ASPM_TIMER_BASED_DISABLE);
 
 	if (ah->is_pciexpress)
 		ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -627,6 +613,7 @@
 		ar9003_hw_set_nf_limits(ah);
 
 	ath9k_init_nfcal_hist_buffer(ah);
+	ah->bb_watchdog_timeout_ms = 25;
 
 	common->state = ATH_HW_INITIALIZED;
 
@@ -1012,6 +999,11 @@
 
 	ENABLE_REGWRITE_BUFFER(ah);
 
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
+		REG_WRITE(ah, AR_WA, ah->WARegVal);
+		udelay(10);
+	}
+
 	REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
 		  AR_RTC_FORCE_WAKE_ON_INT);
 
@@ -1066,6 +1058,11 @@
 {
 	ENABLE_REGWRITE_BUFFER(ah);
 
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
+		REG_WRITE(ah, AR_WA, ah->WARegVal);
+		udelay(10);
+	}
+
 	REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
 		  AR_RTC_FORCE_WAKE_ON_INT);
 
@@ -1073,6 +1070,7 @@
 		REG_WRITE(ah, AR_RC, AR_RC_AHB);
 
 	REG_WRITE(ah, AR_RTC_RESET, 0);
+	udelay(2);
 
 	REGWRITE_BUFFER_FLUSH(ah);
 	DISABLE_REGWRITE_BUFFER(ah);
@@ -1102,6 +1100,11 @@
 
 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
 {
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
+		REG_WRITE(ah, AR_WA, ah->WARegVal);
+		udelay(10);
+	}
+
 	REG_WRITE(ah, AR_RTC_FORCE_WAKE,
 		  AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
 
@@ -1265,7 +1268,8 @@
 	macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
 
 	/* For chips on which RTC reset is done, save TSF before it gets cleared */
-	if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+	if (AR_SREV_9100(ah) ||
+	    (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
 		tsf = ath9k_hw_gettsf64(ah);
 
 	saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -1297,16 +1301,30 @@
 	}
 
 	/* Restore TSF */
-	if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+	if (tsf)
 		ath9k_hw_settsf64(ah, tsf);
 
 	if (AR_SREV_9280_10_OR_LATER(ah))
 		REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
 
+	if (!AR_SREV_9300_20_OR_LATER(ah))
+		ar9002_hw_enable_async_fifo(ah);
+
 	r = ath9k_hw_process_ini(ah, chan);
 	if (r)
 		return r;
 
+	/*
+	 * Some AR91xx SoC devices frequently fail to accept TSF writes
+	 * right after the chip reset. When that happens, write a new
+	 * value after the initvals have been applied, with an offset
+	 * based on measured time difference
+	 */
+	if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
+		tsf += 1500;
+		ath9k_hw_settsf64(ah, tsf);
+	}
+
 	/* Setup MFP options for CCMP */
 	if (AR_SREV_9280_20_OR_LATER(ah)) {
 		/* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
@@ -1367,6 +1385,7 @@
 		ath9k_hw_resettxqueue(ah, i);
 
 	ath9k_hw_init_interrupt_masks(ah, ah->opmode);
+	ath9k_hw_ani_cache_ini_regs(ah);
 	ath9k_hw_init_qos(ah);
 
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
@@ -1375,7 +1394,7 @@
 	ath9k_hw_init_global_settings(ah);
 
 	if (!AR_SREV_9300_20_OR_LATER(ah)) {
-		ar9002_hw_enable_async_fifo(ah);
+		ar9002_hw_update_async_fifo(ah);
 		ar9002_hw_enable_wep_aggregation(ah);
 	}
 
@@ -1426,9 +1445,13 @@
 				"Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
 		}
 	} else {
-		/* Configure AR9271 target WLAN */
-                if (AR_SREV_9271(ah))
-			REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
+		if (common->bus_ops->ath_bus_type == ATH_USB) {
+			/* Configure AR9271 target WLAN */
+			if (AR_SREV_9271(ah))
+				REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
+			else
+				REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
+		}
 #ifdef __BIG_ENDIAN
                 else
 			REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -1441,6 +1464,7 @@
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		ath9k_hw_loadnf(ah, curchan);
 		ath9k_hw_start_nfcal(ah);
+		ar9003_hw_bb_watchdog_config(ah);
 	}
 
 	return 0;
@@ -1486,9 +1510,10 @@
 }
 EXPORT_SYMBOL(ath9k_hw_keyreset);
 
-bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
+static bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
 {
 	u32 macHi, macLo;
+	u32 unicast_flag = AR_KEYTABLE_VALID;
 
 	if (entry >= ah->caps.keycache_size) {
 		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
@@ -1497,6 +1522,16 @@
 	}
 
 	if (mac != NULL) {
+		/*
+		 * AR_KEYTABLE_VALID indicates that the address is a unicast
+		 * address, which must match the transmitter address for
+		 * decrypting frames.
+		 * Not setting this bit allows the hardware to use the key
+		 * for multicast frame decryption.
+		 */
+		if (mac[0] & 0x01)
+			unicast_flag = 0;
+
 		macHi = (mac[5] << 8) | mac[4];
 		macLo = (mac[3] << 24) |
 			(mac[2] << 16) |
@@ -1509,11 +1544,10 @@
 		macLo = macHi = 0;
 	}
 	REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
-	REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID);
+	REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
 
 	return true;
 }
-EXPORT_SYMBOL(ath9k_hw_keysetmac);
 
 bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
 				 const struct ath9k_keyval *k,
@@ -1714,17 +1748,6 @@
 }
 EXPORT_SYMBOL(ath9k_hw_set_keycache_entry);
 
-bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
-{
-	if (entry < ah->caps.keycache_size) {
-		u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
-		if (val & AR_KEYTABLE_VALID)
-			return true;
-	}
-	return false;
-}
-EXPORT_SYMBOL(ath9k_hw_keyisvalid);
-
 /******************************/
 /* Power Management (Chipset) */
 /******************************/
@@ -1751,6 +1774,11 @@
 			REG_CLR_BIT(ah, (AR_RTC_RESET),
 				    AR_RTC_RESET_EN);
 	}
+
+	/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
+	if (AR_SREV_9300_20_OR_LATER(ah))
+		REG_WRITE(ah, AR_WA,
+			  ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
 }
 
 /*
@@ -1777,6 +1805,10 @@
 				    AR_RTC_FORCE_WAKE_EN);
 		}
 	}
+
+	/* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
+	if (AR_SREV_9300_20_OR_LATER(ah))
+		REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
 }
 
 static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
@@ -1784,6 +1816,12 @@
 	u32 val;
 	int i;
 
+	/* Set Bits 14 and 17 of AR_WA before powering on the chip. */
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
+		REG_WRITE(ah, AR_WA, ah->WARegVal);
+		udelay(10);
+	}
+
 	if (setChip) {
 		if ((REG_READ(ah, AR_RTC_STATUS) &
 		     AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
@@ -2138,6 +2176,8 @@
 
 	if (AR_SREV_9271(ah))
 		pCap->num_gpio_pins = AR9271_NUM_GPIO;
+	else if (AR_DEVID_7010(ah))
+		pCap->num_gpio_pins = AR7010_NUM_GPIO;
 	else if (AR_SREV_9285_10_OR_LATER(ah))
 		pCap->num_gpio_pins = AR9285_NUM_GPIO;
 	else if (AR_SREV_9280_10_OR_LATER(ah))
@@ -2165,7 +2205,7 @@
 		pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
 	}
 #endif
-	if (AR_SREV_9271(ah))
+	if (AR_SREV_9271(ah) || AR_SREV_9300_20_OR_LATER(ah))
 		pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
 	else
 		pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
@@ -2220,6 +2260,8 @@
 		pCap->rx_status_len = sizeof(struct ar9003_rxs);
 		pCap->tx_desc_len = sizeof(struct ar9003_txc);
 		pCap->txs_len = sizeof(struct ar9003_txs);
+		if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+			pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
 	} else {
 		pCap->tx_desc_len = sizeof(struct ath_desc);
 		if (AR_SREV_9280_20(ah) &&
@@ -2232,101 +2274,12 @@
 	if (AR_SREV_9300_20_OR_LATER(ah))
 		pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
 
+	if (AR_SREV_9287_10_OR_LATER(ah) || AR_SREV_9271(ah))
+		pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
+
 	return 0;
 }
 
-bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
-			    u32 capability, u32 *result)
-{
-	struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
-	switch (type) {
-	case ATH9K_CAP_CIPHER:
-		switch (capability) {
-		case ATH9K_CIPHER_AES_CCM:
-		case ATH9K_CIPHER_AES_OCB:
-		case ATH9K_CIPHER_TKIP:
-		case ATH9K_CIPHER_WEP:
-		case ATH9K_CIPHER_MIC:
-		case ATH9K_CIPHER_CLR:
-			return true;
-		default:
-			return false;
-		}
-	case ATH9K_CAP_TKIP_MIC:
-		switch (capability) {
-		case 0:
-			return true;
-		case 1:
-			return (ah->sta_id1_defaults &
-				AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
-			false;
-		}
-	case ATH9K_CAP_TKIP_SPLIT:
-		return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
-			false : true;
-	case ATH9K_CAP_MCAST_KEYSRCH:
-		switch (capability) {
-		case 0:
-			return true;
-		case 1:
-			if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
-				return false;
-			} else {
-				return (ah->sta_id1_defaults &
-					AR_STA_ID1_MCAST_KSRCH) ? true :
-					false;
-			}
-		}
-		return false;
-	case ATH9K_CAP_TXPOW:
-		switch (capability) {
-		case 0:
-			return 0;
-		case 1:
-			*result = regulatory->power_limit;
-			return 0;
-		case 2:
-			*result = regulatory->max_power_level;
-			return 0;
-		case 3:
-			*result = regulatory->tp_scale;
-			return 0;
-		}
-		return false;
-	case ATH9K_CAP_DS:
-		return (AR_SREV_9280_20_OR_LATER(ah) &&
-			(ah->eep_ops->get_eeprom(ah, EEP_RC_CHAIN_MASK) == 1))
-			? false : true;
-	default:
-		return false;
-	}
-}
-EXPORT_SYMBOL(ath9k_hw_getcapability);
-
-bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
-			    u32 capability, u32 setting, int *status)
-{
-	switch (type) {
-	case ATH9K_CAP_TKIP_MIC:
-		if (setting)
-			ah->sta_id1_defaults |=
-				AR_STA_ID1_CRPT_MIC_ENABLE;
-		else
-			ah->sta_id1_defaults &=
-				~AR_STA_ID1_CRPT_MIC_ENABLE;
-		return true;
-	case ATH9K_CAP_MCAST_KEYSRCH:
-		if (setting)
-			ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
-		else
-			ah->sta_id1_defaults &= ~AR_STA_ID1_MCAST_KSRCH;
-		return true;
-	default:
-		return false;
-	}
-}
-EXPORT_SYMBOL(ath9k_hw_setcapability);
-
 /****************************/
 /* GPIO / RFKILL / Antennae */
 /****************************/
@@ -2365,8 +2318,15 @@
 
 	BUG_ON(gpio >= ah->caps.num_gpio_pins);
 
-	gpio_shift = gpio << 1;
+	if (AR_DEVID_7010(ah)) {
+		gpio_shift = gpio;
+		REG_RMW(ah, AR7010_GPIO_OE,
+			(AR7010_GPIO_OE_AS_INPUT << gpio_shift),
+			(AR7010_GPIO_OE_MASK << gpio_shift));
+		return;
+	}
 
+	gpio_shift = gpio << 1;
 	REG_RMW(ah,
 		AR_GPIO_OE_OUT,
 		(AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
@@ -2382,7 +2342,11 @@
 	if (gpio >= ah->caps.num_gpio_pins)
 		return 0xffffffff;
 
-	if (AR_SREV_9300_20_OR_LATER(ah))
+	if (AR_DEVID_7010(ah)) {
+		u32 val;
+		val = REG_READ(ah, AR7010_GPIO_IN);
+		return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
+	} else if (AR_SREV_9300_20_OR_LATER(ah))
 		return MS_REG_READ(AR9300, gpio) != 0;
 	else if (AR_SREV_9271(ah))
 		return MS_REG_READ(AR9271, gpio) != 0;
@@ -2402,10 +2366,16 @@
 {
 	u32 gpio_shift;
 
+	if (AR_DEVID_7010(ah)) {
+		gpio_shift = gpio;
+		REG_RMW(ah, AR7010_GPIO_OE,
+			(AR7010_GPIO_OE_AS_OUTPUT << gpio_shift),
+			(AR7010_GPIO_OE_MASK << gpio_shift));
+		return;
+	}
+
 	ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
-
 	gpio_shift = 2 * gpio;
-
 	REG_RMW(ah,
 		AR_GPIO_OE_OUT,
 		(AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
@@ -2415,6 +2385,13 @@
 
 void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
 {
+	if (AR_DEVID_7010(ah)) {
+		val = val ? 0 : 1;
+		REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio),
+			AR_GPIO_BIT(gpio));
+		return;
+	}
+
 	if (AR_SREV_9271(ah))
 		val = ~val;
 
@@ -2520,12 +2497,6 @@
 }
 EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
 
-void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
-{
-	memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN);
-}
-EXPORT_SYMBOL(ath9k_hw_setmac);
-
 void ath9k_hw_setopmode(struct ath_hw *ah)
 {
 	ath9k_hw_set_operating_mode(ah, ah->opmode);
@@ -2598,21 +2569,6 @@
 }
 EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
 
-/*
- *  Extend 15-bit time stamp from rx descriptor to
- *  a full 64-bit TSF using the current h/w TSF.
-*/
-u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
-{
-	u64 tsf;
-
-	tsf = ath9k_hw_gettsf64(ah);
-	if ((tsf & 0x7fff) < rstamp)
-		tsf -= 0x8000;
-	return (tsf & ~0x7fff) | rstamp;
-}
-EXPORT_SYMBOL(ath9k_hw_extend_tsf);
-
 void ath9k_hw_set11nmac2040(struct ath_hw *ah)
 {
 	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 77245df..bb99e2e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -158,6 +158,9 @@
 #define ATH9K_HW_RX_HP_QDEPTH	16
 #define ATH9K_HW_RX_LP_QDEPTH	128
 
+#define PAPRD_GAIN_TABLE_ENTRIES    32
+#define PAPRD_TABLE_SZ              24
+
 enum ath_ini_subsys {
 	ATH_INI_PRE = 0,
 	ATH_INI_CORE,
@@ -199,15 +202,8 @@
 	ATH9K_HW_CAP_RAC_SUPPORTED		= BIT(18),
 	ATH9K_HW_CAP_LDPC			= BIT(19),
 	ATH9K_HW_CAP_FASTCLOCK			= BIT(20),
-};
-
-enum ath9k_capability_type {
-	ATH9K_CAP_CIPHER = 0,
-	ATH9K_CAP_TKIP_MIC,
-	ATH9K_CAP_TKIP_SPLIT,
-	ATH9K_CAP_TXPOW,
-	ATH9K_CAP_MCAST_KEYSRCH,
-	ATH9K_CAP_DS
+	ATH9K_HW_CAP_SGI_20			= BIT(21),
+	ATH9K_HW_CAP_PAPRD			= BIT(22),
 };
 
 struct ath9k_hw_capabilities {
@@ -237,8 +233,9 @@
 	int sw_beacon_response_time;
 	int additional_swba_backoff;
 	int ack_6mb;
-	int cwm_ignore_extcca;
+	u32 cwm_ignore_extcca;
 	u8 pcie_powersave_enable;
+	bool pcieSerDesWrite;
 	u8 pcie_clock_req;
 	u32 pcie_waen;
 	u8 analog_shiftreg;
@@ -262,10 +259,10 @@
 #define AR_BASE_FREQ_5GHZ   	4900
 #define AR_SPUR_FEEQ_BOUND_HT40 19
 #define AR_SPUR_FEEQ_BOUND_HT20 10
-	bool tx_iq_calibration; /* Only available for >= AR9003 */
 	int spurmode;
 	u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
 	u8 max_txtrig_level;
+	u16 ani_poll_interval; /* ANI poll interval in ms */
 };
 
 enum ath9k_int {
@@ -279,6 +276,7 @@
 	ATH9K_INT_TX = 0x00000040,
 	ATH9K_INT_TXDESC = 0x00000080,
 	ATH9K_INT_TIM_TIMER = 0x00000100,
+	ATH9K_INT_BB_WATCHDOG = 0x00000400,
 	ATH9K_INT_TXURN = 0x00000800,
 	ATH9K_INT_MIB = 0x00001000,
 	ATH9K_INT_RXPHY = 0x00004000,
@@ -358,6 +356,9 @@
 	int8_t iCoff;
 	int8_t qCoff;
 	int16_t rawNoiseFloor;
+	bool paprd_done;
+	u16 small_signal_gain[AR9300_MAX_CHAINS];
+	u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
 };
 
 #define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
@@ -459,7 +460,7 @@
 #define AR_GENTMR_BIT(_index)	(1 << (_index))
 
 /*
- * Using de Bruijin sequence to to look up 1's index in a 32 bit number
+ * Using de Bruijin sequence to look up 1's index in a 32 bit number
  * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
  */
 #define debruijn32 0x077CB531U
@@ -510,6 +511,17 @@
  * @setup_calibration: set up calibration
  * @iscal_supported: used to query if a type of calibration is supported
  * @loadnf: load noise floor read from each chain on the CCA registers
+ *
+ * @ani_reset: reset ANI parameters to default values
+ * @ani_lower_immunity: lower the noise immunity level. The level controls
+ *	the power-based packet detection on hardware. If a power jump is
+ *	detected the adapter takes it as an indication that a packet has
+ *	arrived. The level ranges from 0-5. Each level corresponds to a
+ *	few dB more of noise immunity. If you have a strong time-varying
+ *	interference that is causing false detections (OFDM timing errors or
+ *	CCK timing errors) the level can be increased.
+ * @ani_cache_ini_regs: cache the values for ANI from the initial
+ *	register settings through the register initialization.
  */
 struct ath_hw_private_ops {
 	/* Calibration ops */
@@ -553,6 +565,11 @@
 			    int param);
 	void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
 	void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan);
+
+	/* ANI */
+	void (*ani_reset)(struct ath_hw *ah, bool is_scanning);
+	void (*ani_lower_immunity)(struct ath_hw *ah);
+	void (*ani_cache_ini_regs)(struct ath_hw *ah);
 };
 
 /**
@@ -563,6 +580,11 @@
  *
  * @config_pci_powersave:
  * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ *
+ * @ani_proc_mib_event: process MIB events, this would happen upon specific ANI
+ *	thresholds being reached or having overflowed.
+ * @ani_monitor: called periodically by the core driver to collect
+ *	MIB stats and adjust ANI if specific thresholds have been reached.
  */
 struct ath_hw_ops {
 	void (*config_pci_powersave)(struct ath_hw *ah,
@@ -603,6 +625,9 @@
 				     u32 burstDuration);
 	void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
 				       u32 vmf);
+
+	void (*ani_proc_mib_event)(struct ath_hw *ah);
+	void (*ani_monitor)(struct ath_hw *ah, struct ath9k_channel *chan);
 };
 
 struct ath_hw {
@@ -789,6 +814,18 @@
 	u32 ts_paddr_end;
 	u16 ts_tail;
 	u8 ts_size;
+
+	u32 bb_watchdog_last_status;
+	u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
+
+	u32 paprd_gain_table_entries[PAPRD_GAIN_TABLE_ENTRIES];
+	u8 paprd_gain_table_index[PAPRD_GAIN_TABLE_ENTRIES];
+	/*
+	 * Store the permanent value of Reg 0x4004in WARegVal
+	 * so we dont have to R/M/W. We should not be reading
+	 * this register when in sleep states.
+	 */
+	u32 WARegVal;
 };
 
 static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -818,19 +855,13 @@
 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 		   bool bChannelChange);
 int ath9k_hw_fill_cap_info(struct ath_hw *ah);
-bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
-			    u32 capability, u32 *result);
-bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
-			    u32 capability, u32 setting, int *status);
 u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
 
 /* Key Cache Management */
 bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
-bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac);
 bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
 				 const struct ath9k_keyval *k,
 				 const u8 *mac);
-bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry);
 
 /* GPIO / RFKILL / Antennae */
 void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
@@ -856,7 +887,6 @@
 bool ath9k_hw_phy_disable(struct ath_hw *ah);
 bool ath9k_hw_disable(struct ath_hw *ah);
 void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
-void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac);
 void ath9k_hw_setopmode(struct ath_hw *ah);
 void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
 void ath9k_hw_setbssidmask(struct ath_hw *ah);
@@ -865,7 +895,6 @@
 void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
 void ath9k_hw_reset_tsf(struct ath_hw *ah);
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
-u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
 void ath9k_hw_init_global_settings(struct ath_hw *ah);
 void ath9k_hw_set11nmac2040(struct ath_hw *ah);
 void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
@@ -907,13 +936,26 @@
 void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
 int ar9002_hw_rf_claim(struct ath_hw *ah);
 void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
+void ar9002_hw_update_async_fifo(struct ath_hw *ah);
 void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
 
 /*
- * Code specifric to AR9003, we stuff these here to avoid callbacks
+ * Code specific to AR9003, we stuff these here to avoid callbacks
  * for older families
  */
 void ar9003_hw_set_nf_limits(struct ath_hw *ah);
+void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
+void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
+void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
+void ar9003_paprd_enable(struct ath_hw *ah, bool val);
+void ar9003_paprd_populate_single_table(struct ath_hw *ah,
+					struct ath9k_channel *chan, int chain);
+int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
+			      int chain);
+int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
+int ar9003_paprd_init_table(struct ath_hw *ah);
+bool ar9003_paprd_is_done(struct ath_hw *ah);
+void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
 
 /* Hardware family op attach helpers */
 void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
@@ -926,8 +968,24 @@
 void ar9002_hw_attach_ops(struct ath_hw *ah);
 void ar9003_hw_attach_ops(struct ath_hw *ah);
 
+/*
+ * ANI work can be shared between all families but a next
+ * generation implementation of ANI will be used only for AR9003 only
+ * for now as the other families still need to be tested with the same
+ * next generation ANI. Feel free to start testing it though for the
+ * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani.
+ */
+extern int modparam_force_new_ani;
+void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah);
+void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah);
+
 #define ATH_PCIE_CAP_LINK_CTRL	0x70
 #define ATH_PCIE_CAP_LINK_L0S	1
 #define ATH_PCIE_CAP_LINK_L1	2
 
+#define ATH9K_CLOCK_RATE_CCK		22
+#define ATH9K_CLOCK_RATE_5GHZ_OFDM	40
+#define ATH9K_CLOCK_RATE_2GHZ_OFDM	44
+#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
+
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d457cb3..8700e3d 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -33,6 +33,10 @@
 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
 
+int led_blink = 1;
+module_param_named(blink, led_blink, int, 0444);
+MODULE_PARM_DESC(blink, "Enable LED blink on activity");
+
 /* We use the hw_value as an index into our private channel structure */
 
 #define CHAN2G(_freq, _idx)  { \
@@ -175,18 +179,6 @@
 	.write = ath9k_iowrite32,
 };
 
-static int count_streams(unsigned int chainmask, int max)
-{
-	int streams = 0;
-
-	do {
-		if (++streams == max)
-			break;
-	} while ((chainmask = chainmask & (chainmask - 1)));
-
-	return streams;
-}
-
 /**************************/
 /*     Initialization     */
 /**************************/
@@ -208,6 +200,9 @@
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
 		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
 
+	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+		ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
 
@@ -224,8 +219,8 @@
 
 	/* set up supported mcs set */
 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-	tx_streams = count_streams(common->tx_chainmask, max_streams);
-	rx_streams = count_streams(common->rx_chainmask, max_streams);
+	tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
+	rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
 
 	ath_print(common, ATH_DBG_CONFIG,
 		  "TX streams %d, RX streams: %d\n",
@@ -388,36 +383,14 @@
 	for (i = 0; i < common->keymax; i++)
 		ath9k_hw_keyreset(sc->sc_ah, (u16) i);
 
-	if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
-				   ATH9K_CIPHER_TKIP, NULL)) {
-		/*
-		 * Whether we should enable h/w TKIP MIC.
-		 * XXX: if we don't support WME TKIP MIC, then we wouldn't
-		 * report WMM capable, so it's always safe to turn on
-		 * TKIP MIC in this case.
-		 */
-		ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
-	}
-
 	/*
 	 * Check whether the separate key cache entries
 	 * are required to handle both tx+rx MIC keys.
 	 * With split mic keys the number of stations is limited
 	 * to 27 otherwise 59.
 	 */
-	if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
-				   ATH9K_CIPHER_TKIP, NULL)
-	    && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
-				      ATH9K_CIPHER_MIC, NULL)
-	    && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
-				      0, NULL))
+	if (!(sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA))
 		common->splitmic = 1;
-
-	/* turn on mcast key search if possible */
-	if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
-		(void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
-					     1, 1, NULL);
-
 }
 
 static int ath9k_init_btcoex(struct ath_softc *sc)
@@ -435,7 +408,7 @@
 		r = ath_init_btcoex_timer(sc);
 		if (r)
 			return -1;
-		qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+		qnum = sc->tx.hwq_map[WME_AC_BE];
 		ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
 		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
 		break;
@@ -472,23 +445,23 @@
 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
 	ath_cabq_update(sc);
 
-	if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+	if (!ath_tx_setup(sc, WME_AC_BK)) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to setup xmit queue for BK traffic\n");
 		goto err;
 	}
 
-	if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+	if (!ath_tx_setup(sc, WME_AC_BE)) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to setup xmit queue for BE traffic\n");
 		goto err;
 	}
-	if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+	if (!ath_tx_setup(sc, WME_AC_VI)) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to setup xmit queue for VI traffic\n");
 		goto err;
 	}
-	if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+	if (!ath_tx_setup(sc, WME_AC_VO)) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to setup xmit queue for VO traffic\n");
 		goto err;
@@ -745,6 +718,7 @@
 			goto error_world;
 	}
 
+	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
 	INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
 	INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
 	sc->wiphy_scheduler_int = msecs_to_jiffies(500);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 0e425cb..e955bb9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -15,6 +15,7 @@
  */
 
 #include "hw.h"
+#include "hw-ops.h"
 
 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
 					struct ath9k_tx_queue_info *qi)
@@ -554,8 +555,13 @@
 		REGWRITE_BUFFER_FLUSH(ah);
 		DISABLE_REGWRITE_BUFFER(ah);
 
-		/* cwmin and cwmax should be 0 for beacon queue */
-		if (AR_SREV_9300_20_OR_LATER(ah)) {
+		/*
+		 * cwmin and cwmax should be 0 for beacon queue
+		 * but not for IBSS as we would create an imbalance
+		 * on beaconing fairness for participating nodes.
+		 */
+		if (AR_SREV_9300_20_OR_LATER(ah) &&
+		    ah->opmode != NL80211_IFTYPE_ADHOC) {
 			REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
 				  | SM(0, AR_D_LCL_IFS_CWMAX)
 				  | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
@@ -756,11 +762,11 @@
 }
 EXPORT_SYMBOL(ath9k_hw_putrxbuf);
 
-void ath9k_hw_startpcureceive(struct ath_hw *ah)
+void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
 {
 	ath9k_enable_mib_counters(ah);
 
-	ath9k_ani_reset(ah);
+	ath9k_ani_reset(ah, is_scanning);
 
 	REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
 }
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 00f3e0c..7559fb2b 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -577,13 +577,8 @@
 
 #define	ATH9K_NUM_TX_QUEUES 10
 
-enum ath9k_tx_queue_subtype {
-	ATH9K_WME_AC_BK = 0,
-	ATH9K_WME_AC_BE,
-	ATH9K_WME_AC_VI,
-	ATH9K_WME_AC_VO,
-	ATH9K_WME_UPSD
-};
+/* Used as a queue subtype instead of a WMM AC */
+#define ATH9K_WME_UPSD	4
 
 enum ath9k_tx_queue_flags {
 	TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
@@ -617,7 +612,7 @@
 struct ath9k_tx_queue_info {
 	u32 tqi_ver;
 	enum ath9k_tx_queue tqi_type;
-	enum ath9k_tx_queue_subtype tqi_subtype;
+	int tqi_subtype;
 	enum ath9k_tx_queue_flags tqi_qflags;
 	u32 tqi_priority;
 	u32 tqi_aifs;
@@ -715,7 +710,7 @@
 			  u32 size, u32 flags);
 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
-void ath9k_hw_startpcureceive(struct ath_hw *ah);
+void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
 void ath9k_hw_stoppcurecv(struct ath_hw *ah);
 void ath9k_hw_abortpcurecv(struct ath_hw *ah);
 bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1e2a68e..efbf535 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -51,13 +51,11 @@
 static void ath_update_txpow(struct ath_softc *sc)
 {
 	struct ath_hw *ah = sc->sc_ah;
-	u32 txpow;
 
 	if (sc->curtxpow != sc->config.txpowlimit) {
 		ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
 		/* read back in case value is clamped */
-		ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
-		sc->curtxpow = txpow;
+		sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
 	}
 }
 
@@ -232,6 +230,114 @@
 	return r;
 }
 
+static void ath_paprd_activate(struct ath_softc *sc)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	int chain;
+
+	if (!ah->curchan->paprd_done)
+		return;
+
+	ath9k_ps_wakeup(sc);
+	for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+		if (!(ah->caps.tx_chainmask & BIT(chain)))
+			continue;
+
+		ar9003_paprd_populate_single_table(ah, ah->curchan, chain);
+	}
+
+	ar9003_paprd_enable(ah, true);
+	ath9k_ps_restore(sc);
+}
+
+void ath_paprd_calibrate(struct work_struct *work)
+{
+	struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
+	struct ieee80211_hw *hw = sc->hw;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff *skb = NULL;
+	struct ieee80211_tx_info *tx_info;
+	int band = hw->conf.channel->band;
+	struct ieee80211_supported_band *sband = &sc->sbands[band];
+	struct ath_tx_control txctl;
+	int qnum, ftype;
+	int chain_ok = 0;
+	int chain;
+	int len = 1800;
+	int time_left;
+	int i;
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb)
+		return;
+
+	tx_info = IEEE80211_SKB_CB(skb);
+
+	skb_put(skb, len);
+	memset(skb->data, 0, len);
+	hdr = (struct ieee80211_hdr *)skb->data;
+	ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
+	hdr->frame_control = cpu_to_le16(ftype);
+	hdr->duration_id = 10;
+	memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+	memset(&txctl, 0, sizeof(txctl));
+	qnum = sc->tx.hwq_map[WME_AC_BE];
+	txctl.txq = &sc->tx.txq[qnum];
+
+	ath9k_ps_wakeup(sc);
+	ar9003_paprd_init_table(ah);
+	for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+		if (!(ah->caps.tx_chainmask & BIT(chain)))
+			continue;
+
+		chain_ok = 0;
+		memset(tx_info, 0, sizeof(*tx_info));
+		tx_info->band = band;
+
+		for (i = 0; i < 4; i++) {
+			tx_info->control.rates[i].idx = sband->n_bitrates - 1;
+			tx_info->control.rates[i].count = 6;
+		}
+
+		init_completion(&sc->paprd_complete);
+		ar9003_paprd_setup_gain_table(ah, chain);
+		txctl.paprd = BIT(chain);
+		if (ath_tx_start(hw, skb, &txctl) != 0)
+			break;
+
+		time_left = wait_for_completion_timeout(&sc->paprd_complete,
+				msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
+		if (!time_left) {
+			ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+				  "Timeout waiting for paprd training on "
+				  "TX chain %d\n",
+				  chain);
+			goto fail_paprd;
+		}
+
+		if (!ar9003_paprd_is_done(ah))
+			break;
+
+		if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0)
+			break;
+
+		chain_ok = 1;
+	}
+	kfree_skb(skb);
+
+	if (chain_ok) {
+		ah->curchan->paprd_done = true;
+		ath_paprd_activate(sc);
+	}
+
+fail_paprd:
+	ath9k_ps_restore(sc);
+}
+
 /*
  *  This routine performs the periodic noise floor calibration function
  *  that is used to adjust and optimize the chip performance.  This
@@ -285,7 +391,8 @@
 	}
 
 	/* Verify whether we must check ANI */
-	if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+	if ((timestamp - common->ani.checkani_timer) >=
+	     ah->config.ani_poll_interval) {
 		aniflag = true;
 		common->ani.checkani_timer = timestamp;
 	}
@@ -326,15 +433,24 @@
 	*/
 	cal_interval = ATH_LONG_CALINTERVAL;
 	if (sc->sc_ah->config.enable_ani)
-		cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+		cal_interval = min(cal_interval,
+				   (u32)ah->config.ani_poll_interval);
 	if (!common->ani.caldone)
 		cal_interval = min(cal_interval, (u32)short_cal_interval);
 
 	mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
+	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) &&
+	    !(sc->sc_flags & SC_OP_SCANNING)) {
+		if (!sc->sc_ah->curchan->paprd_done)
+			ieee80211_queue_work(sc->hw, &sc->paprd_work);
+		else
+			ath_paprd_activate(sc);
+	}
 }
 
 static void ath_start_ani(struct ath_common *common)
 {
+	struct ath_hw *ah = common->ah;
 	unsigned long timestamp = jiffies_to_msecs(jiffies);
 	struct ath_softc *sc = (struct ath_softc *) common->priv;
 
@@ -346,7 +462,8 @@
 	common->ani.checkani_timer = timestamp;
 
 	mod_timer(&common->ani.timer,
-		  jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+		  jiffies +
+			msecs_to_jiffies((u32)ah->config.ani_poll_interval));
 }
 
 /*
@@ -524,6 +641,12 @@
 	    !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
 		goto chip_reset;
 
+	if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+	    (status & ATH9K_INT_BB_WATCHDOG)) {
+		ar9003_hw_bb_watchdog_dbg_info(ah);
+		goto chip_reset;
+	}
+
 	if (status & ATH9K_INT_SWBA)
 		tasklet_schedule(&sc->bcon_tasklet);
 
@@ -619,234 +742,6 @@
 	return chanmode;
 }
 
-static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
-			   struct ath9k_keyval *hk, const u8 *addr,
-			   bool authenticator)
-{
-	struct ath_hw *ah = common->ah;
-	const u8 *key_rxmic;
-	const u8 *key_txmic;
-
-	key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
-	key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
-
-	if (addr == NULL) {
-		/*
-		 * Group key installation - only two key cache entries are used
-		 * regardless of splitmic capability since group key is only
-		 * used either for TX or RX.
-		 */
-		if (authenticator) {
-			memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
-			memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
-		} else {
-			memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
-			memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
-		}
-		return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
-	}
-	if (!common->splitmic) {
-		/* TX and RX keys share the same key cache entry. */
-		memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
-		memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
-		return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
-	}
-
-	/* Separate key cache entries for TX and RX */
-
-	/* TX key goes at first index, RX key at +32. */
-	memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
-	if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
-		/* TX MIC entry failed. No need to proceed further */
-		ath_print(common, ATH_DBG_FATAL,
-			  "Setting TX MIC Key Failed\n");
-		return 0;
-	}
-
-	memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
-	/* XXX delete tx key on failure? */
-	return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
-}
-
-static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
-{
-	int i;
-
-	for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
-		if (test_bit(i, common->keymap) ||
-		    test_bit(i + 64, common->keymap))
-			continue; /* At least one part of TKIP key allocated */
-		if (common->splitmic &&
-		    (test_bit(i + 32, common->keymap) ||
-		     test_bit(i + 64 + 32, common->keymap)))
-			continue; /* At least one part of TKIP key allocated */
-
-		/* Found a free slot for a TKIP key */
-		return i;
-	}
-	return -1;
-}
-
-static int ath_reserve_key_cache_slot(struct ath_common *common)
-{
-	int i;
-
-	/* First, try to find slots that would not be available for TKIP. */
-	if (common->splitmic) {
-		for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
-			if (!test_bit(i, common->keymap) &&
-			    (test_bit(i + 32, common->keymap) ||
-			     test_bit(i + 64, common->keymap) ||
-			     test_bit(i + 64 + 32, common->keymap)))
-				return i;
-			if (!test_bit(i + 32, common->keymap) &&
-			    (test_bit(i, common->keymap) ||
-			     test_bit(i + 64, common->keymap) ||
-			     test_bit(i + 64 + 32, common->keymap)))
-				return i + 32;
-			if (!test_bit(i + 64, common->keymap) &&
-			    (test_bit(i , common->keymap) ||
-			     test_bit(i + 32, common->keymap) ||
-			     test_bit(i + 64 + 32, common->keymap)))
-				return i + 64;
-			if (!test_bit(i + 64 + 32, common->keymap) &&
-			    (test_bit(i, common->keymap) ||
-			     test_bit(i + 32, common->keymap) ||
-			     test_bit(i + 64, common->keymap)))
-				return i + 64 + 32;
-		}
-	} else {
-		for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
-			if (!test_bit(i, common->keymap) &&
-			    test_bit(i + 64, common->keymap))
-				return i;
-			if (test_bit(i, common->keymap) &&
-			    !test_bit(i + 64, common->keymap))
-				return i + 64;
-		}
-	}
-
-	/* No partially used TKIP slots, pick any available slot */
-	for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
-		/* Do not allow slots that could be needed for TKIP group keys
-		 * to be used. This limitation could be removed if we know that
-		 * TKIP will not be used. */
-		if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
-			continue;
-		if (common->splitmic) {
-			if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
-				continue;
-			if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
-				continue;
-		}
-
-		if (!test_bit(i, common->keymap))
-			return i; /* Found a free slot for a key */
-	}
-
-	/* No free slot found */
-	return -1;
-}
-
-static int ath_key_config(struct ath_common *common,
-			  struct ieee80211_vif *vif,
-			  struct ieee80211_sta *sta,
-			  struct ieee80211_key_conf *key)
-{
-	struct ath_hw *ah = common->ah;
-	struct ath9k_keyval hk;
-	const u8 *mac = NULL;
-	int ret = 0;
-	int idx;
-
-	memset(&hk, 0, sizeof(hk));
-
-	switch (key->alg) {
-	case ALG_WEP:
-		hk.kv_type = ATH9K_CIPHER_WEP;
-		break;
-	case ALG_TKIP:
-		hk.kv_type = ATH9K_CIPHER_TKIP;
-		break;
-	case ALG_CCMP:
-		hk.kv_type = ATH9K_CIPHER_AES_CCM;
-		break;
-	default:
-		return -EOPNOTSUPP;
-	}
-
-	hk.kv_len = key->keylen;
-	memcpy(hk.kv_val, key->key, key->keylen);
-
-	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
-		/* For now, use the default keys for broadcast keys. This may
-		 * need to change with virtual interfaces. */
-		idx = key->keyidx;
-	} else if (key->keyidx) {
-		if (WARN_ON(!sta))
-			return -EOPNOTSUPP;
-		mac = sta->addr;
-
-		if (vif->type != NL80211_IFTYPE_AP) {
-			/* Only keyidx 0 should be used with unicast key, but
-			 * allow this for client mode for now. */
-			idx = key->keyidx;
-		} else
-			return -EIO;
-	} else {
-		if (WARN_ON(!sta))
-			return -EOPNOTSUPP;
-		mac = sta->addr;
-
-		if (key->alg == ALG_TKIP)
-			idx = ath_reserve_key_cache_slot_tkip(common);
-		else
-			idx = ath_reserve_key_cache_slot(common);
-		if (idx < 0)
-			return -ENOSPC; /* no free key cache entries */
-	}
-
-	if (key->alg == ALG_TKIP)
-		ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
-				      vif->type == NL80211_IFTYPE_AP);
-	else
-		ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
-
-	if (!ret)
-		return -EIO;
-
-	set_bit(idx, common->keymap);
-	if (key->alg == ALG_TKIP) {
-		set_bit(idx + 64, common->keymap);
-		if (common->splitmic) {
-			set_bit(idx + 32, common->keymap);
-			set_bit(idx + 64 + 32, common->keymap);
-		}
-	}
-
-	return idx;
-}
-
-static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
-{
-	struct ath_hw *ah = common->ah;
-
-	ath9k_hw_keyreset(ah, key->hw_key_idx);
-	if (key->hw_key_idx < IEEE80211_WEP_NKID)
-		return;
-
-	clear_bit(key->hw_key_idx, common->keymap);
-	if (key->alg != ALG_TKIP)
-		return;
-
-	clear_bit(key->hw_key_idx + 64, common->keymap);
-	if (common->splitmic) {
-		ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
-		clear_bit(key->hw_key_idx + 32, common->keymap);
-		clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
-	}
-}
-
 static void ath9k_bss_assoc_info(struct ath_softc *sc,
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_bss_conf *bss_conf)
@@ -1032,25 +927,25 @@
 	return r;
 }
 
-int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
+static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
 {
 	int qnum;
 
 	switch (queue) {
 	case 0:
-		qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
+		qnum = sc->tx.hwq_map[WME_AC_VO];
 		break;
 	case 1:
-		qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
+		qnum = sc->tx.hwq_map[WME_AC_VI];
 		break;
 	case 2:
-		qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
+		qnum = sc->tx.hwq_map[WME_AC_BE];
 		break;
 	case 3:
-		qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
+		qnum = sc->tx.hwq_map[WME_AC_BK];
 		break;
 	default:
-		qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
+		qnum = sc->tx.hwq_map[WME_AC_BE];
 		break;
 	}
 
@@ -1062,16 +957,16 @@
 	int qnum;
 
 	switch (queue) {
-	case ATH9K_WME_AC_VO:
+	case WME_AC_VO:
 		qnum = 0;
 		break;
-	case ATH9K_WME_AC_VI:
+	case WME_AC_VI:
 		qnum = 1;
 		break;
-	case ATH9K_WME_AC_BE:
+	case WME_AC_BE:
 		qnum = 2;
 		break;
-	case ATH9K_WME_AC_BK:
+	case WME_AC_BK:
 		qnum = 3;
 		break;
 	default:
@@ -1201,7 +1096,9 @@
 		    ATH9K_INT_GLOBAL;
 
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
-		ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP;
+		ah->imask |= ATH9K_INT_RXHP |
+			     ATH9K_INT_RXLP |
+			     ATH9K_INT_BB_WATCHDOG;
 	else
 		ah->imask |= ATH9K_INT_RX;
 
@@ -1251,6 +1148,7 @@
 	struct ath_tx_control txctl;
 	int padpos, padsize;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	int qnum;
 
 	if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
 		ath_print(common, ATH_DBG_XMIT,
@@ -1280,7 +1178,8 @@
 		 * completed and if needed, also for RX of buffered frames.
 		 */
 		ath9k_ps_wakeup(sc);
-		ath9k_hw_setrxabort(sc->sc_ah, 0);
+		if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+			ath9k_hw_setrxabort(sc->sc_ah, 0);
 		if (ieee80211_is_pspoll(hdr->frame_control)) {
 			ath_print(common, ATH_DBG_PS,
 				  "Sending PS-Poll to pick a buffered frame\n");
@@ -1322,11 +1221,8 @@
 		memmove(skb->data, skb->data + padsize, padpos);
 	}
 
-	/* Check if a tx queue is available */
-
-	txctl.txq = ath_test_get_txq(sc, skb);
-	if (!txctl.txq)
-		goto exit;
+	qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
+	txctl.txq = &sc->tx.txq[qnum];
 
 	ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
 
@@ -1352,8 +1248,11 @@
 
 	aphy->state = ATH_WIPHY_INACTIVE;
 
-	cancel_delayed_work_sync(&sc->ath_led_blink_work);
+	if (led_blink)
+		cancel_delayed_work_sync(&sc->ath_led_blink_work);
+
 	cancel_delayed_work_sync(&sc->tx_complete_work);
+	cancel_work_sync(&sc->paprd_work);
 
 	if (!sc->num_sec_wiphy) {
 		cancel_delayed_work_sync(&sc->wiphy_work);
@@ -1547,8 +1446,8 @@
 			ah->imask |= ATH9K_INT_TIM_TIMER;
 			ath9k_hw_set_interrupts(ah, ah->imask);
 		}
+		ath9k_hw_setrxabort(ah, 1);
 	}
-	ath9k_hw_setrxabort(ah, 1);
 }
 
 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1785,7 +1684,7 @@
 		ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
 
 	if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
-		if ((qnum == sc->tx.hwq_map[ATH9K_WME_AC_BE]) && !ret)
+		if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret)
 			ath_beaconq_config(sc);
 
 	mutex_unlock(&sc->mutex);
@@ -1813,7 +1712,7 @@
 
 	switch (cmd) {
 	case SET_KEY:
-		ret = ath_key_config(common, vif, sta, key);
+		ret = ath9k_cmn_key_config(common, vif, sta, key);
 		if (ret >= 0) {
 			key->hw_key_idx = ret;
 			/* push IV and Michael MIC generation to stack */
@@ -1826,7 +1725,7 @@
 		}
 		break;
 	case DISABLE_KEY:
-		ath_key_delete(common, key);
+		ath9k_cmn_key_delete(common, key);
 		break;
 	default:
 		ret = -EINVAL;
@@ -1999,6 +1898,8 @@
 	struct ath_softc *sc = aphy->sc;
 	int ret = 0;
 
+	local_bh_disable();
+
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
 		if (!(sc->sc_flags & SC_OP_RXAGGR))
@@ -2028,6 +1929,8 @@
 			  "Unknown AMPDU action\n");
 	}
 
+	local_bh_enable();
+
 	return ret;
 }
 
@@ -2072,6 +1975,7 @@
 	ath9k_wiphy_pause_all_forced(sc, aphy);
 	sc->sc_flags |= SC_OP_SCANNING;
 	del_timer_sync(&common->ani.timer);
+	cancel_work_sync(&sc->paprd_work);
 	cancel_delayed_work_sync(&sc->tx_complete_work);
 	mutex_unlock(&sc->mutex);
 }
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 1ec836c..257b10b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,7 @@
 	{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
 	{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI   */
 	{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
+	{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E  AR9300 */
 	{ 0 }
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 8519452..600ee0b 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -20,7 +20,7 @@
 #include "ath9k.h"
 
 static const struct ath_rate_table ar5416_11na_ratetable = {
-	42,
+	43,
 	8, /* MCS start */
 	{
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
@@ -40,73 +40,75 @@
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
 			29300, 7, 108, 4, 7, 7, 7, 7 },
 		{ VALID_2040, VALID_2040, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */
-			6400, 0, 0, 0, 8, 24, 8, 24 },
+			6400, 0, 0, 0, 8, 25, 8, 25 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */
-			12700, 1, 1, 2, 9, 25, 9, 25 },
+			12700, 1, 1, 2, 9, 26, 9, 26 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */
-			18800, 2, 2, 2, 10, 26, 10, 26 },
+			18800, 2, 2, 2, 10, 27, 10, 27 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */
-			25000, 3, 3, 4, 11, 27, 11, 27 },
+			25000, 3, 3, 4, 11, 28, 11, 28 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */
-			36700, 4, 4, 4, 12, 28, 12, 28 },
+			36700, 4, 4, 4, 12, 29, 12, 29 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */
-			48100, 5, 5, 4, 13, 29, 13, 29 },
+			48100, 5, 5, 4, 13, 30, 13, 30 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */
-			53500, 6, 6, 4, 14, 30, 14, 30 },
+			53500, 6, 6, 4, 14, 31, 14, 31 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */
-			59000, 7, 7, 4, 15, 31, 15, 32 },
+			59000, 7, 7, 4, 15, 32, 15, 33 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */
-			12700, 8, 8, 3, 16, 33, 16, 33 },
+			12700, 8, 8, 3, 16, 34, 16, 34 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */
-			24800, 9, 9, 2, 17, 34, 17, 34 },
+			24800, 9, 9, 2, 17, 35, 17, 35 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */
-			36600, 10, 10, 2, 18, 35, 18, 35 },
+			36600, 10, 10, 2, 18, 36, 18, 36 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */
-			48100, 11, 11, 4, 19, 36, 19, 36 },
+			48100, 11, 11, 4, 19, 37, 19, 37 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */
-			69500, 12, 12, 4, 20, 37, 20, 37 },
+			69500, 12, 12, 4, 20, 38, 20, 38 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */
-			89500, 13, 13, 4, 21, 38, 21, 38 },
+			89500, 13, 13, 4, 21, 39, 21, 39 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */
-			98900, 14, 14, 4, 22, 39, 22, 39 },
+			98900, 14, 14, 4, 22, 40, 22, 40 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */
-			108300, 15, 15, 4, 23, 40, 23, 41 },
+			108300, 15, 15, 4, 23, 41, 24, 42 },
+		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS_HGI, 144400, /* 144.4 Mb */
+			12000, 15, 15, 4, 23, 41, 24, 42 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */
-			13200, 0, 0, 0, 8, 24, 24, 24 },
+			13200, 0, 0, 0, 8, 25, 25, 25 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */
-			25900, 1, 1, 2, 9, 25, 25, 25 },
+			25900, 1, 1, 2, 9, 26, 26, 26 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */
-			38600, 2, 2, 2, 10, 26, 26, 26 },
+			38600, 2, 2, 2, 10, 27, 27, 27 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */
-			49800, 3, 3, 4, 11, 27, 27, 27 },
+			49800, 3, 3, 4, 11, 28, 28, 28 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */
-			72200, 4, 4, 4, 12, 28, 28, 28 },
+			72200, 4, 4, 4, 12, 29, 29, 29 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */
-			92900, 5, 5, 4, 13, 29, 29, 29 },
+			92900, 5, 5, 4, 13, 30, 30, 30 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */
-			102700, 6, 6, 4, 14, 30, 30, 30 },
+			102700, 6, 6, 4, 14, 31, 31, 31 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */
-			112000, 7, 7, 4, 15, 31, 32, 32 },
+			112000, 7, 7, 4, 15, 32, 33, 33 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
-			122000, 7, 7, 4, 15, 31, 32, 32 },
+			122000, 7, 7, 4, 15, 32, 33, 33 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */
-			25800, 8, 8, 0, 16, 33, 33, 33 },
+			25800, 8, 8, 0, 16, 34, 34, 34 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */
-			49800, 9, 9, 2, 17, 34, 34, 34 },
+			49800, 9, 9, 2, 17, 35, 35, 35 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */
-			71900, 10, 10, 2, 18, 35, 35, 35 },
+			71900, 10, 10, 2, 18, 36, 36, 36 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */
-			92500, 11, 11, 4, 19, 36, 36, 36 },
+			92500, 11, 11, 4, 19, 37, 37, 37 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */
-			130300, 12, 12, 4, 20, 37, 37, 37 },
+			130300, 12, 12, 4, 20, 38, 38, 38 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */
-			162800, 13, 13, 4, 21, 38, 38, 38 },
+			162800, 13, 13, 4, 21, 39, 39, 39 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */
-			178200, 14, 14, 4, 22, 39, 39, 39 },
+			178200, 14, 14, 4, 22, 40, 40, 40 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */
-			192100, 15, 15, 4, 23, 40, 41, 41 },
+			192100, 15, 15, 4, 23, 41, 42, 42 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
-			207000, 15, 15, 4, 23, 40, 41, 41 },
+			207000, 15, 15, 4, 23, 41, 42, 42 },
 	},
 	50,  /* probe interval */
 	WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -116,7 +118,7 @@
  * for HT are the 64K max aggregate limit */
 
 static const struct ath_rate_table ar5416_11ng_ratetable = {
-	46,
+	47,
 	12, /* MCS start */
 	{
 		{ VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
@@ -144,73 +146,75 @@
 		{ VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
 			30900, 11, 108, 8, 11, 11, 11, 11 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */
-			6400, 0, 0, 4, 12, 28, 12, 28 },
+			6400, 0, 0, 4, 12, 29, 12, 29 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */
-			12700, 1, 1, 6, 13, 29, 13, 29 },
+			12700, 1, 1, 6, 13, 30, 13, 30 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */
-			18800, 2, 2, 6, 14, 30, 14, 30 },
+			18800, 2, 2, 6, 14, 31, 14, 31 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */
-			25000, 3, 3, 8, 15, 31, 15, 31 },
+			25000, 3, 3, 8, 15, 32, 15, 32 },
 		{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */
-			36700, 4, 4, 8, 16, 32, 16, 32 },
+			36700, 4, 4, 8, 16, 33, 16, 33 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */
-			48100, 5, 5, 8, 17, 33, 17, 33 },
+			48100, 5, 5, 8, 17, 34, 17, 34 },
 		{ INVALID,  VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */
-			53500, 6, 6, 8, 18, 34, 18, 34 },
+			53500, 6, 6, 8, 18, 35, 18, 35 },
 		{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */
-			59000, 7, 7, 8, 19, 35, 19, 36 },
+			59000, 7, 7, 8, 19, 36, 19, 37 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */
-			12700, 8, 8, 4, 20, 37, 20, 37 },
+			12700, 8, 8, 4, 20, 38, 20, 38 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */
-			24800, 9, 9, 6, 21, 38, 21, 38 },
+			24800, 9, 9, 6, 21, 39, 21, 39 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */
-			36600, 10, 10, 6, 22, 39, 22, 39 },
+			36600, 10, 10, 6, 22, 40, 22, 40 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */
-			48100, 11, 11, 8, 23, 40, 23, 40 },
+			48100, 11, 11, 8, 23, 41, 23, 41 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */
-			69500, 12, 12, 8, 24, 41, 24, 41 },
+			69500, 12, 12, 8, 24, 42, 24, 42 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */
-			89500, 13, 13, 8, 25, 42, 25, 42 },
+			89500, 13, 13, 8, 25, 43, 25, 43 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */
-			98900, 14, 14, 8, 26, 43, 26, 44 },
+			98900, 14, 14, 8, 26, 44, 26, 44 },
 		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */
-			108300, 15, 15, 8, 27, 44, 27, 45 },
+			108300, 15, 15, 8, 27, 45, 28, 46 },
+		{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS_HGI, 144400, /* 130 Mb */
+			120000, 15, 15, 8, 27, 45, 28, 46 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */
-			13200, 0, 0, 8, 12, 28, 28, 28 },
+			13200, 0, 0, 8, 12, 29, 29, 29 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */
-			25900, 1, 1, 8, 13, 29, 29, 29 },
+			25900, 1, 1, 8, 13, 30, 30, 30 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */
-			38600, 2, 2, 8, 14, 30, 30, 30 },
+			38600, 2, 2, 8, 14, 31, 31, 31 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */
-			49800, 3, 3, 8,  15, 31, 31, 31 },
+			49800, 3, 3, 8,  15, 32, 32, 32 },
 		{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */
-			72200, 4, 4, 8, 16, 32, 32, 32 },
+			72200, 4, 4, 8, 16, 33, 33, 33 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */
-			92900, 5, 5, 8, 17, 33, 33, 33 },
+			92900, 5, 5, 8, 17, 34, 34, 34 },
 		{ INVALID,  VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */
-			102700, 6, 6, 8, 18, 34, 34, 34 },
+			102700, 6, 6, 8, 18, 35, 35, 35 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */
-			112000, 7, 7, 8, 19, 35, 36, 36 },
+			112000, 7, 7, 8, 19, 36, 37, 37 },
 		{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
-			122000, 7, 7, 8, 19, 35, 36, 36 },
+			122000, 7, 7, 8, 19, 36, 37, 37 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */
-			25800, 8, 8, 8, 20, 37, 37, 37 },
+			25800, 8, 8, 8, 20, 38, 38, 38 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */
-			49800, 9, 9, 8, 21, 38, 38, 38 },
+			49800, 9, 9, 8, 21, 39, 39, 39 },
 		{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */
-			71900, 10, 10, 8, 22, 39, 39, 39 },
+			71900, 10, 10, 8, 22, 40, 40, 40 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */
-			92500, 11, 11, 8, 23, 40, 40, 40 },
+			92500, 11, 11, 8, 23, 41, 41, 41 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */
-			130300, 12, 12, 8, 24, 41, 41, 41 },
+			130300, 12, 12, 8, 24, 42, 42, 42 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */
-			162800, 13, 13, 8, 25, 42, 42, 42 },
+			162800, 13, 13, 8, 25, 43, 43, 43 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */
-			178200, 14, 14, 8, 26, 43, 43, 43 },
+			178200, 14, 14, 8, 26, 44, 44, 44 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */
-			192100, 15, 15, 8, 27, 44, 45, 45 },
+			192100, 15, 15, 8, 27, 45, 46, 46 },
 		{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
-			207000, 15, 15, 8, 27, 44, 45, 45 },
+			207000, 15, 15, 8, 27, 45, 46, 46 },
 	},
 	50,  /* probe interval */
 	WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -1193,21 +1197,19 @@
 }
 
 static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
-			       bool is_cw40, bool is_sgi40)
+			       bool is_cw40, bool is_sgi)
 {
 	u8 caps = 0;
 
 	if (sta->ht_cap.ht_supported) {
 		caps = WLAN_RC_HT_FLAG;
-		if (sc->sc_ah->caps.tx_chainmask != 1 &&
-		    ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) {
-			if (sta->ht_cap.mcs.rx_mask[1])
-				caps |= WLAN_RC_DS_FLAG;
-		}
+		if (sta->ht_cap.mcs.rx_mask[1])
+			caps |= WLAN_RC_DS_FLAG;
 		if (is_cw40)
 			caps |= WLAN_RC_40_FLAG;
-		if (is_sgi40)
+		if (is_sgi)
 			caps |= WLAN_RC_SGI_FLAG;
+
 	}
 
 	return caps;
@@ -1300,7 +1302,7 @@
 	struct ath_softc *sc = priv;
 	struct ath_rate_priv *ath_rc_priv = priv_sta;
 	const struct ath_rate_table *rate_table;
-	bool is_cw40, is_sgi40;
+	bool is_cw40, is_sgi = false;
 	int i, j = 0;
 
 	for (i = 0; i < sband->n_bitrates; i++) {
@@ -1323,7 +1325,11 @@
 	}
 
 	is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-	is_sgi40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+
+	if (is_cw40)
+		is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+	else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+		is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
 
 	/* Choose rate table first */
 
@@ -1336,7 +1342,7 @@
 		rate_table = hw_rate_table[sc->cur_rate_mode];
 	}
 
-	ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40);
+	ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi);
 	ath_rc_init(sc, priv_sta, sband, sta, rate_table);
 }
 
@@ -1347,10 +1353,10 @@
 	struct ath_softc *sc = priv;
 	struct ath_rate_priv *ath_rc_priv = priv_sta;
 	const struct ath_rate_table *rate_table = NULL;
-	bool oper_cw40 = false, oper_sgi40;
+	bool oper_cw40 = false, oper_sgi;
 	bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ?
 		true : false;
-	bool local_sgi40 = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ?
+	bool local_sgi = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ?
 		true : false;
 
 	/* FIXME: Handle AP mode later when we support CWM */
@@ -1363,15 +1369,21 @@
 		    oper_chan_type == NL80211_CHAN_HT40PLUS)
 			oper_cw40 = true;
 
-		oper_sgi40 = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
-			true : false;
+		if (oper_cw40)
+			oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+				   true : false;
+		else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+			oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+				   true : false;
+		else
+			oper_sgi = false;
 
-		if ((local_cw40 != oper_cw40) || (local_sgi40 != oper_sgi40)) {
+		if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
 			rate_table = ath_choose_rate_table(sc, sband->band,
 						   sta->ht_cap.ht_supported,
 						   oper_cw40);
 			ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
-						   oper_cw40, oper_sgi40);
+						   oper_cw40, oper_sgi);
 			ath_rc_init(sc, priv_sta, sband, sta, rate_table);
 
 			ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ca6065b..da0cfe9 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -116,9 +116,6 @@
 	/* configure operational mode */
 	ath9k_hw_setopmode(ah);
 
-	/* Handle any link-level address change. */
-	ath9k_hw_setmac(ah, common->macaddr);
-
 	/* calculate and install multicast filter */
 	mfilt[0] = mfilt[1] = ~0;
 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -295,7 +292,7 @@
 
 	ath_opmode_init(sc);
 
-	ath9k_hw_startpcureceive(sc->sc_ah);
+	ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING));
 }
 
 static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -501,7 +498,7 @@
 start_recv:
 	spin_unlock_bh(&sc->rx.rxbuflock);
 	ath_opmode_init(sc);
-	ath9k_hw_startpcureceive(ah);
+	ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING));
 
 	return 0;
 }
@@ -700,12 +697,16 @@
 	bf = SKB_CB_ATHBUF(skb);
 	BUG_ON(!bf);
 
-	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
 				common->rx_bufsize, DMA_FROM_DEVICE);
 
 	ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
-	if (ret == -EINPROGRESS)
+	if (ret == -EINPROGRESS) {
+		/*let device gain the buffer again*/
+		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+				common->rx_bufsize, DMA_FROM_DEVICE);
 		return false;
+	}
 
 	__skb_unlink(skb, &rx_edma->rx_fifo);
 	if (ret == -EINVAL) {
@@ -814,13 +815,263 @@
 	 * 1. accessing the frame
 	 * 2. requeueing the same buffer to h/w
 	 */
-	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
 			common->rx_bufsize,
 			DMA_FROM_DEVICE);
 
 	return bf;
 }
 
+/* Assumes you've already done the endian to CPU conversion */
+static bool ath9k_rx_accept(struct ath_common *common,
+			    struct ieee80211_hdr *hdr,
+			    struct ieee80211_rx_status *rxs,
+			    struct ath_rx_status *rx_stats,
+			    bool *decrypt_error)
+{
+	struct ath_hw *ah = common->ah;
+	__le16 fc;
+	u8 rx_status_len = ah->caps.rx_status_len;
+
+	fc = hdr->frame_control;
+
+	if (!rx_stats->rs_datalen)
+		return false;
+        /*
+         * rs_status follows rs_datalen so if rs_datalen is too large
+         * we can take a hint that hardware corrupted it, so ignore
+         * those frames.
+         */
+	if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
+		return false;
+
+	/*
+	 * rs_more indicates chained descriptors which can be used
+	 * to link buffers together for a sort of scatter-gather
+	 * operation.
+	 * reject the frame, we don't support scatter-gather yet and
+	 * the frame is probably corrupt anyway
+	 */
+	if (rx_stats->rs_more)
+		return false;
+
+	/*
+	 * The rx_stats->rs_status will not be set until the end of the
+	 * chained descriptors so it can be ignored if rs_more is set. The
+	 * rs_more will be false at the last element of the chained
+	 * descriptors.
+	 */
+	if (rx_stats->rs_status != 0) {
+		if (rx_stats->rs_status & ATH9K_RXERR_CRC)
+			rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+		if (rx_stats->rs_status & ATH9K_RXERR_PHY)
+			return false;
+
+		if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
+			*decrypt_error = true;
+		} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
+			if (ieee80211_is_ctl(fc))
+				/*
+				 * Sometimes, we get invalid
+				 * MIC failures on valid control frames.
+				 * Remove these mic errors.
+				 */
+				rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
+			else
+				rxs->flag |= RX_FLAG_MMIC_ERROR;
+		}
+		/*
+		 * Reject error frames with the exception of
+		 * decryption and MIC failures. For monitor mode,
+		 * we also ignore the CRC error.
+		 */
+		if (ah->opmode == NL80211_IFTYPE_MONITOR) {
+			if (rx_stats->rs_status &
+			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+			      ATH9K_RXERR_CRC))
+				return false;
+		} else {
+			if (rx_stats->rs_status &
+			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
+				return false;
+			}
+		}
+	}
+	return true;
+}
+
+static int ath9k_process_rate(struct ath_common *common,
+			      struct ieee80211_hw *hw,
+			      struct ath_rx_status *rx_stats,
+			      struct ieee80211_rx_status *rxs)
+{
+	struct ieee80211_supported_band *sband;
+	enum ieee80211_band band;
+	unsigned int i = 0;
+
+	band = hw->conf.channel->band;
+	sband = hw->wiphy->bands[band];
+
+	if (rx_stats->rs_rate & 0x80) {
+		/* HT rate */
+		rxs->flag |= RX_FLAG_HT;
+		if (rx_stats->rs_flags & ATH9K_RX_2040)
+			rxs->flag |= RX_FLAG_40MHZ;
+		if (rx_stats->rs_flags & ATH9K_RX_GI)
+			rxs->flag |= RX_FLAG_SHORT_GI;
+		rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+		return 0;
+	}
+
+	for (i = 0; i < sband->n_bitrates; i++) {
+		if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+			rxs->rate_idx = i;
+			return 0;
+		}
+		if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
+			rxs->flag |= RX_FLAG_SHORTPRE;
+			rxs->rate_idx = i;
+			return 0;
+		}
+	}
+
+	/*
+	 * No valid hardware bitrate found -- we should not get here
+	 * because hardware has already validated this frame as OK.
+	 */
+	ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
+		  "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
+
+	return -EINVAL;
+}
+
+static void ath9k_process_rssi(struct ath_common *common,
+			       struct ieee80211_hw *hw,
+			       struct ieee80211_hdr *hdr,
+			       struct ath_rx_status *rx_stats)
+{
+	struct ath_hw *ah = common->ah;
+	struct ieee80211_sta *sta;
+	struct ath_node *an;
+	int last_rssi = ATH_RSSI_DUMMY_MARKER;
+	__le16 fc;
+
+	fc = hdr->frame_control;
+
+	rcu_read_lock();
+	/*
+	 * XXX: use ieee80211_find_sta! This requires quite a bit of work
+	 * under the current ath9k virtual wiphy implementation as we have
+	 * no way of tying a vif to wiphy. Typically vifs are attached to
+	 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
+	 * wiphy you'd have to iterate over every wiphy and each sdata.
+	 */
+	sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
+	if (sta) {
+		an = (struct ath_node *) sta->drv_priv;
+		if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
+		   !rx_stats->rs_moreaggr)
+			ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
+		last_rssi = an->last_rssi;
+	}
+	rcu_read_unlock();
+
+	if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+		rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
+					      ATH_RSSI_EP_MULTIPLIER);
+	if (rx_stats->rs_rssi < 0)
+		rx_stats->rs_rssi = 0;
+
+	/* Update Beacon RSSI, this is used by ANI. */
+	if (ieee80211_is_beacon(fc))
+		ah->stats.avgbrssi = rx_stats->rs_rssi;
+}
+
+/*
+ * For Decrypt or Demic errors, we only mark packet status here and always push
+ * up the frame up to let mac80211 handle the actual error case, be it no
+ * decryption key or real decryption error. This let us keep statistics there.
+ */
+static int ath9k_rx_skb_preprocess(struct ath_common *common,
+				   struct ieee80211_hw *hw,
+				   struct ieee80211_hdr *hdr,
+				   struct ath_rx_status *rx_stats,
+				   struct ieee80211_rx_status *rx_status,
+				   bool *decrypt_error)
+{
+	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+	/*
+	 * everything but the rate is checked here, the rate check is done
+	 * separately to avoid doing two lookups for a rate for each frame.
+	 */
+	if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+		return -EINVAL;
+
+	ath9k_process_rssi(common, hw, hdr, rx_stats);
+
+	if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+		return -EINVAL;
+
+	rx_status->band = hw->conf.channel->band;
+	rx_status->freq = hw->conf.channel->center_freq;
+	rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
+	rx_status->antenna = rx_stats->rs_antenna;
+	rx_status->flag |= RX_FLAG_TSFT;
+
+	return 0;
+}
+
+static void ath9k_rx_skb_postprocess(struct ath_common *common,
+				     struct sk_buff *skb,
+				     struct ath_rx_status *rx_stats,
+				     struct ieee80211_rx_status *rxs,
+				     bool decrypt_error)
+{
+	struct ath_hw *ah = common->ah;
+	struct ieee80211_hdr *hdr;
+	int hdrlen, padpos, padsize;
+	u8 keyix;
+	__le16 fc;
+
+	/* see if any padding is done by the hw and remove it */
+	hdr = (struct ieee80211_hdr *) skb->data;
+	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+	fc = hdr->frame_control;
+	padpos = ath9k_cmn_padpos(hdr->frame_control);
+
+	/* The MAC header is padded to have 32-bit boundary if the
+	 * packet payload is non-zero. The general calculation for
+	 * padsize would take into account odd header lengths:
+	 * padsize = (4 - padpos % 4) % 4; However, since only
+	 * even-length headers are used, padding can only be 0 or 2
+	 * bytes and we can optimize this a bit. In addition, we must
+	 * not try to remove padding from short control frames that do
+	 * not have payload. */
+	padsize = padpos & 3;
+	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+		memmove(skb->data + padsize, skb->data, padpos);
+		skb_pull(skb, padsize);
+	}
+
+	keyix = rx_stats->rs_keyix;
+
+	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+	    ieee80211_has_protected(fc)) {
+		rxs->flag |= RX_FLAG_DECRYPTED;
+	} else if (ieee80211_has_protected(fc)
+		   && !decrypt_error && skb->len >= hdrlen + 4) {
+		keyix = skb->data[hdrlen + 3] >> 6;
+
+		if (test_bit(keyix, common->keymap))
+			rxs->flag |= RX_FLAG_DECRYPTED;
+	}
+	if (ah->sw_mgmt_crypto &&
+	    (rxs->flag & RX_FLAG_DECRYPTED) &&
+	    ieee80211_is_mgmt(fc))
+		/* Use software decrypt for management frames. */
+		rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
 
 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
 {
@@ -842,15 +1093,21 @@
 	enum ath9k_rx_qtype qtype;
 	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
 	int dma_type;
+	u8 rx_status_len = ah->caps.rx_status_len;
+	u64 tsf = 0;
+	u32 tsf_lower = 0;
 
 	if (edma)
-		dma_type = DMA_FROM_DEVICE;
-	else
 		dma_type = DMA_BIDIRECTIONAL;
+	else
+		dma_type = DMA_FROM_DEVICE;
 
 	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
 	spin_lock_bh(&sc->rx.rxbuflock);
 
+	tsf = ath9k_hw_gettsf64(ah);
+	tsf_lower = tsf & 0xffffffff;
+
 	do {
 		/* If handling rx interrupt and flush is in progress => exit */
 		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
@@ -869,7 +1126,7 @@
 		if (!skb)
 			continue;
 
-		hdr = (struct ieee80211_hdr *) skb->data;
+		hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
 		rxs =  IEEE80211_SKB_RXCB(skb);
 
 		hw = ath_get_virt_hw(sc, hdr);
@@ -883,8 +1140,17 @@
 		if (flush)
 			goto requeue;
 
-		retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
-						     rxs, &decrypt_error);
+		rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
+		if (rs.rs_tstamp > tsf_lower &&
+		    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
+			rxs->mactime -= 0x100000000ULL;
+
+		if (rs.rs_tstamp < tsf_lower &&
+		    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
+			rxs->mactime += 0x100000000ULL;
+
+		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+						 rxs, &decrypt_error);
 		if (retval)
 			goto requeue;
 
@@ -908,8 +1174,8 @@
 		if (ah->caps.rx_status_len)
 			skb_pull(skb, ah->caps.rx_status_len);
 
-		ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
-					     rxs, decrypt_error);
+		ath9k_rx_skb_postprocess(common, skb, &rs,
+					 rxs, decrypt_error);
 
 		/* We will now give hardware our shiny new allocated skb */
 		bf->bf_mpdu = requeue_skb;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index d4371a4..633e3d9 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -222,6 +222,7 @@
 
 #define AR_ISR_S2              0x008c
 #define AR_ISR_S2_QCU_TXURN    0x000003FF
+#define AR_ISR_S2_BB_WATCHDOG  0x00010000
 #define AR_ISR_S2_CST          0x00400000
 #define AR_ISR_S2_GTT          0x00800000
 #define AR_ISR_S2_TIM          0x01000000
@@ -699,7 +700,15 @@
 #define AR_RC_HOSTIF         0x00000100
 
 #define AR_WA                		0x4004
+#define AR_WA_BIT6			(1 << 6)
+#define AR_WA_BIT7			(1 << 7)
+#define AR_WA_BIT23			(1 << 23)
 #define AR_WA_D3_L1_DISABLE		(1 << 14)
+#define AR_WA_D3_TO_L1_DISABLE_REAL     (1 << 16)
+#define AR_WA_ASPM_TIMER_BASED_DISABLE  (1 << 17)
+#define AR_WA_RESET_EN                  (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
+#define AR_WA_ANALOG_SHIFT              (1 << 20)
+#define AR_WA_POR_SHORT                 (1 << 21) /* PCI-E Phy reset control */
 #define AR9285_WA_DEFAULT		0x004a050b
 #define AR9280_WA_DEFAULT           	0x0040073b
 #define AR_WA_DEFAULT               	0x0000073f
@@ -756,32 +765,33 @@
 #define AR_SREV_REVISION2        	      0x00000F00
 #define AR_SREV_REVISION2_S     	      8
 
-#define AR_SREV_VERSION_5416_PCI               0xD
-#define AR_SREV_VERSION_5416_PCIE              0xC
-#define AR_SREV_REVISION_5416_10               0
-#define AR_SREV_REVISION_5416_20               1
-#define AR_SREV_REVISION_5416_22               2
-#define AR_SREV_VERSION_9100                  0x14
-#define AR_SREV_VERSION_9160        	      0x40
-#define AR_SREV_REVISION_9160_10    	      0
-#define AR_SREV_REVISION_9160_11    	      1
-#define AR_SREV_VERSION_9280                0x80
-#define AR_SREV_REVISION_9280_10            0
-#define AR_SREV_REVISION_9280_20            1
-#define AR_SREV_REVISION_9280_21            2
-#define AR_SREV_VERSION_9285                  0xC0
-#define AR_SREV_REVISION_9285_10              0
-#define AR_SREV_REVISION_9285_11              1
-#define AR_SREV_REVISION_9285_12              2
-#define AR_SREV_VERSION_9287                  0x180
-#define AR_SREV_REVISION_9287_10              0
-#define AR_SREV_REVISION_9287_11              1
-#define AR_SREV_REVISION_9287_12              2
-#define AR_SREV_VERSION_9271			0x140
-#define AR_SREV_REVISION_9271_10		0
-#define AR_SREV_REVISION_9271_11		1
-#define AR_SREV_VERSION_9300                  0x1c0
-#define AR_SREV_REVISION_9300_20              2 /* 2.0 and 2.1 */
+#define AR_SREV_VERSION_5416_PCI	0xD
+#define AR_SREV_VERSION_5416_PCIE	0xC
+#define AR_SREV_REVISION_5416_10	0
+#define AR_SREV_REVISION_5416_20	1
+#define AR_SREV_REVISION_5416_22	2
+#define AR_SREV_VERSION_9100		0x14
+#define AR_SREV_VERSION_9160		0x40
+#define AR_SREV_REVISION_9160_10	0
+#define AR_SREV_REVISION_9160_11	1
+#define AR_SREV_VERSION_9280		0x80
+#define AR_SREV_REVISION_9280_10	0
+#define AR_SREV_REVISION_9280_20	1
+#define AR_SREV_REVISION_9280_21	2
+#define AR_SREV_VERSION_9285		0xC0
+#define AR_SREV_REVISION_9285_10	0
+#define AR_SREV_REVISION_9285_11	1
+#define AR_SREV_REVISION_9285_12	2
+#define AR_SREV_VERSION_9287		0x180
+#define AR_SREV_REVISION_9287_10	0
+#define AR_SREV_REVISION_9287_11	1
+#define AR_SREV_REVISION_9287_12	2
+#define AR_SREV_REVISION_9287_13	3
+#define AR_SREV_VERSION_9271		0x140
+#define AR_SREV_REVISION_9271_10	0
+#define AR_SREV_REVISION_9271_11	1
+#define AR_SREV_VERSION_9300		0x1c0
+#define AR_SREV_REVISION_9300_20	2 /* 2.0 and 2.1 */
 
 #define AR_SREV_5416(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -859,6 +869,11 @@
 	(((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \
 	 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
 	  ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_12)))
+#define AR_SREV_9287_13_OR_LATER(_ah) \
+	(((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \
+	 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
+	  ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_13)))
+
 #define AR_SREV_9271(_ah) \
     (((_ah))->hw_version.macVersion == AR_SREV_VERSION_9271)
 #define AR_SREV_9271_10(_ah) \
@@ -867,6 +882,7 @@
 #define AR_SREV_9271_11(_ah) \
     (AR_SREV_9271(_ah) && \
      ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
+
 #define AR_SREV_9300(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
 #define AR_SREV_9300_20(_ah) \
@@ -881,6 +897,10 @@
     (AR_SREV_9285_12_OR_LATER(_ah) && \
      ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
 
+#define AR_DEVID_7010(_ah) \
+	(((_ah)->hw_version.devid == 0x7010) || \
+	 ((_ah)->hw_version.devid == 0x9018))
+
 #define AR_RADIO_SREV_MAJOR                   0xf0
 #define AR_RAD5133_SREV_MAJOR                 0xc0
 #define AR_RAD2133_SREV_MAJOR                 0xd0
@@ -978,6 +998,7 @@
 #define AR9287_NUM_GPIO                          11
 #define AR9271_NUM_GPIO                          16
 #define AR9300_NUM_GPIO                          17
+#define AR7010_NUM_GPIO                          16
 
 #define AR_GPIO_IN_OUT                           0x4048
 #define AR_GPIO_IN_VAL                           0x0FFFC000
@@ -992,6 +1013,8 @@
 #define AR9271_GPIO_IN_VAL_S                     16
 #define AR9300_GPIO_IN_VAL                       0x0001FFFF
 #define AR9300_GPIO_IN_VAL_S                     0
+#define AR7010_GPIO_IN_VAL                       0x0000FFFF
+#define AR7010_GPIO_IN_VAL_S                     0
 
 #define AR_GPIO_OE_OUT                           (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
 #define AR_GPIO_OE_OUT_DRV                       0x3
@@ -1000,6 +1023,21 @@
 #define AR_GPIO_OE_OUT_DRV_HI                    0x2
 #define AR_GPIO_OE_OUT_DRV_ALL                   0x3
 
+#define AR7010_GPIO_OE                           0x52000
+#define AR7010_GPIO_OE_MASK                      0x1
+#define AR7010_GPIO_OE_AS_OUTPUT                 0x0
+#define AR7010_GPIO_OE_AS_INPUT                  0x1
+#define AR7010_GPIO_IN                           0x52004
+#define AR7010_GPIO_OUT                          0x52008
+#define AR7010_GPIO_SET                          0x5200C
+#define AR7010_GPIO_CLEAR                        0x52010
+#define AR7010_GPIO_INT                          0x52014
+#define AR7010_GPIO_INT_TYPE                     0x52018
+#define AR7010_GPIO_INT_POLARITY                 0x5201C
+#define AR7010_GPIO_PENDING                      0x52020
+#define AR7010_GPIO_INT_MASK                     0x52024
+#define AR7010_GPIO_FUNCTION                     0x52028
+
 #define AR_GPIO_INTR_POL                         (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050)
 #define AR_GPIO_INTR_POL_VAL                     0x0001FFFF
 #define AR_GPIO_INTR_POL_VAL_S                   0
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 105ad40..89423ca 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -219,7 +219,7 @@
 	info->control.rates[1].idx = -1;
 
 	memset(&txctl, 0, sizeof(struct ath_tx_control));
-	txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
+	txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]];
 	txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
 
 	if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index e23172c..6260faa 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -279,9 +279,6 @@
 	if (wmi->drv_priv->op_flags & OP_UNPLUGGED)
 		return 0;
 
-	if (!wmi)
-		return -EINVAL;
-
 	skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
 	if (!skb)
 		return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 859aa4a..c3681a1 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -328,6 +328,7 @@
 	u32 ba[WME_BA_BMP_SIZE >> 5];
 	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
 	bool rc_update = true;
+	struct ieee80211_tx_rate rates[4];
 
 	skb = bf->bf_mpdu;
 	hdr = (struct ieee80211_hdr *)skb->data;
@@ -335,6 +336,8 @@
 	tx_info = IEEE80211_SKB_CB(skb);
 	hw = bf->aphy->hw;
 
+	memcpy(rates, tx_info->control.rates, sizeof(rates));
+
 	rcu_read_lock();
 
 	/* XXX: use ieee80211_find_sta! */
@@ -375,6 +378,9 @@
 		txfail = txpending = 0;
 		bf_next = bf->bf_next;
 
+		skb = bf->bf_mpdu;
+		tx_info = IEEE80211_SKB_CB(skb);
+
 		if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
 			/* transmit completion, subframe is
 			 * acked by block ack */
@@ -428,6 +434,7 @@
 			spin_unlock_bh(&txq->axq_lock);
 
 			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
+				memcpy(tx_info->control.rates, rates, sizeof(rates));
 				ath_tx_rc_status(bf, ts, nbad, txok, true);
 				rc_update = false;
 			} else {
@@ -941,6 +948,7 @@
 	if (!ATH_TXQ_SETUP(sc, qnum)) {
 		struct ath_txq *txq = &sc->tx.txq[qnum];
 
+		txq->axq_class = subtype;
 		txq->axq_qnum = qnum;
 		txq->axq_link = NULL;
 		INIT_LIST_HEAD(&txq->axq_q);
@@ -958,58 +966,6 @@
 	return &sc->tx.txq[qnum];
 }
 
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
-{
-	int qnum;
-
-	switch (qtype) {
-	case ATH9K_TX_QUEUE_DATA:
-		if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
-			ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
-				  "HAL AC %u out of range, max %zu!\n",
-				  haltype, ARRAY_SIZE(sc->tx.hwq_map));
-			return -1;
-		}
-		qnum = sc->tx.hwq_map[haltype];
-		break;
-	case ATH9K_TX_QUEUE_BEACON:
-		qnum = sc->beacon.beaconq;
-		break;
-	case ATH9K_TX_QUEUE_CAB:
-		qnum = sc->beacon.cabq->axq_qnum;
-		break;
-	default:
-		qnum = -1;
-	}
-	return qnum;
-}
-
-struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
-{
-	struct ath_txq *txq = NULL;
-	u16 skb_queue = skb_get_queue_mapping(skb);
-	int qnum;
-
-	qnum = ath_get_hal_qnum(skb_queue, sc);
-	txq = &sc->tx.txq[qnum];
-
-	spin_lock_bh(&txq->axq_lock);
-
-	if (txq->axq_depth >= (ATH_TXBUF - 20)) {
-		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
-			  "TX queue: %d is full, depth: %d\n",
-			  qnum, txq->axq_depth);
-		ath_mac80211_stop_queue(sc, skb_queue);
-		txq->stopped = 1;
-		spin_unlock_bh(&txq->axq_lock);
-		return NULL;
-	}
-
-	spin_unlock_bh(&txq->axq_lock);
-
-	return txq;
-}
-
 int ath_txq_update(struct ath_softc *sc, int qnum,
 		   struct ath9k_tx_queue_info *qinfo)
 {
@@ -1688,12 +1644,15 @@
 		bf->bf_frmlen -= padsize;
 	}
 
-	if (conf_is_ht(&hw->conf)) {
+	if (!txctl->paprd && conf_is_ht(&hw->conf)) {
 		bf->bf_state.bf_type |= BUF_HT;
 		if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
 			use_ldpc = true;
 	}
 
+	bf->bf_state.bfs_paprd = txctl->paprd;
+	if (txctl->paprd)
+		bf->bf_state.bfs_paprd_timestamp = jiffies;
 	bf->bf_flags = setup_tx_flags(skb, use_ldpc);
 
 	bf->bf_keytype = get_hw_crypto_keytype(skb);
@@ -1768,6 +1727,9 @@
 			    bf->bf_buf_addr,
 			    txctl->txq->axq_qnum);
 
+	if (bf->bf_state.bfs_paprd)
+		ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
+
 	spin_lock_bh(&txctl->txq->axq_lock);
 
 	if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
@@ -1809,8 +1771,9 @@
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	struct ath_txq *txq = txctl->txq;
 	struct ath_buf *bf;
-	int r;
+	int q, r;
 
 	bf = ath_tx_get_buffer(sc);
 	if (!bf) {
@@ -1820,8 +1783,6 @@
 
 	r = ath_tx_setup_buffer(hw, bf, skb, txctl);
 	if (unlikely(r)) {
-		struct ath_txq *txq = txctl->txq;
-
 		ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
 
 		/* upon ath_tx_processq() this TX queue will be resumed, we
@@ -1829,7 +1790,7 @@
 		 * we will at least have to run TX completionon one buffer
 		 * on the queue */
 		spin_lock_bh(&txq->axq_lock);
-		if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
+		if (!txq->stopped && txq->axq_depth > 1) {
 			ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
 			txq->stopped = 1;
 		}
@@ -1840,6 +1801,17 @@
 		return r;
 	}
 
+	q = skb_get_queue_mapping(skb);
+	if (q >= 4)
+		q = 0;
+
+	spin_lock_bh(&txq->axq_lock);
+	if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
+		ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
+		txq->stopped = 1;
+	}
+	spin_unlock_bh(&txq->axq_lock);
+
 	ath_tx_start_dma(sc, bf, txctl);
 
 	return 0;
@@ -1909,7 +1881,7 @@
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
-	int padpos, padsize;
+	int q, padpos, padsize;
 
 	ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
 
@@ -1948,8 +1920,16 @@
 
 	if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
 		ath9k_tx_status(hw, skb);
-	else
+	else {
+		q = skb_get_queue_mapping(skb);
+		if (q >= 4)
+			q = 0;
+
+		if (--sc->tx.pending_frames[q] < 0)
+			sc->tx.pending_frames[q] = 0;
+
 		ieee80211_tx_status(hw, skb);
+	}
 }
 
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1971,8 +1951,18 @@
 	}
 
 	dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
-	ath_tx_complete(sc, skb, bf->aphy, tx_flags);
-	ath_debug_stat_tx(sc, txq, bf, ts);
+
+	if (bf->bf_state.bfs_paprd) {
+		if (time_after(jiffies,
+			       bf->bf_state.bfs_paprd_timestamp +
+			       msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
+			dev_kfree_skb_any(skb);
+		else
+			complete(&sc->paprd_complete);
+	} else {
+		ath_tx_complete(sc, skb, bf->aphy, tx_flags);
+		ath_debug_stat_tx(sc, txq, bf, ts);
+	}
 
 	/*
 	 * Return the list of ath_buf of this mpdu to free queue
@@ -2050,21 +2040,21 @@
 		tx_info->status.rates[i].idx = -1;
 	}
 
-	tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
+	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
 }
 
 static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
 {
 	int qnum;
 
+	qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
+	if (qnum == -1)
+		return;
+
 	spin_lock_bh(&txq->axq_lock);
-	if (txq->stopped &&
-	    sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
-		qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
-		if (qnum != -1) {
-			ath_mac80211_start_queue(sc, qnum);
-			txq->stopped = 0;
-		}
+	if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
+		ath_mac80211_start_queue(sc, qnum);
+		txq->stopped = 0;
 	}
 	spin_unlock_bh(&txq->axq_lock);
 }
@@ -2161,7 +2151,6 @@
 			 * This frame is sent out as a single frame.
 			 * Use hardware retry status for this frame.
 			 */
-			bf->bf_retries = ts.ts_longretry;
 			if (ts.ts_status & ATH9K_TXERR_XRETRY)
 				bf->bf_state.bf_type |= BUF_XRETRY;
 			ath_tx_rc_status(bf, &ts, 0, txok, true);
@@ -2279,8 +2268,18 @@
 
 		txok = !(txs.ts_status & ATH9K_TXERR_MASK);
 
+		/*
+		 * Make sure null func frame is acked before configuring
+		 * hw into ps mode.
+		 */
+		if (bf->bf_isnullfunc && txok) {
+			if ((sc->ps_flags & PS_ENABLED))
+				ath9k_enable_ps(sc);
+			else
+				sc->ps_flags |= PS_NULLFUNC_COMPLETED;
+		}
+
 		if (!bf_isampdu(bf)) {
-			bf->bf_retries = txs.ts_longretry;
 			if (txs.ts_status & ATH9K_TXERR_XRETRY)
 				bf->bf_state.bf_type |= BUF_XRETRY;
 			ath_tx_rc_status(bf, &txs, 0, txok, true);
@@ -2424,26 +2423,8 @@
 	for (acno = 0, ac = &an->ac[acno];
 	     acno < WME_NUM_AC; acno++, ac++) {
 		ac->sched    = false;
+		ac->qnum = sc->tx.hwq_map[acno];
 		INIT_LIST_HEAD(&ac->tid_q);
-
-		switch (acno) {
-		case WME_AC_BE:
-			ac->qnum = ath_tx_get_qnum(sc,
-				   ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
-			break;
-		case WME_AC_BK:
-			ac->qnum = ath_tx_get_qnum(sc,
-				   ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
-			break;
-		case WME_AC_VI:
-			ac->qnum = ath_tx_get_qnum(sc,
-				   ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
-			break;
-		case WME_AC_VO:
-			ac->qnum = ath_tx_get_qnum(sc,
-				   ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
-			break;
-		}
 	}
 }
 
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 3a003e6..8674a9935 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -530,7 +530,7 @@
 	/* Size of the data. For ucode and PCM this is in bytes.
 	 * For IV this is number-of-ivs. */
 	__be32 size;
-} __attribute__((__packed__));
+} __packed;
 
 /* Initial Value file format */
 #define B43_IV_OFFSET_MASK	0x7FFF
@@ -540,8 +540,8 @@
 	union {
 		__be16 d16;
 		__be32 d32;
-	} data __attribute__((__packed__));
-} __attribute__((__packed__));
+	} data __packed;
+} __packed;
 
 
 /* Data structures for DMA transmission, per 80211 core. */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index fa40fdf..10d0aaf 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -333,11 +333,11 @@
 	dma_addr_t dmaaddr;
 
 	if (tx) {
-		dmaaddr = ssb_dma_map_single(ring->dev->dev,
-					     buf, len, DMA_TO_DEVICE);
+		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
+					 buf, len, DMA_TO_DEVICE);
 	} else {
-		dmaaddr = ssb_dma_map_single(ring->dev->dev,
-					     buf, len, DMA_FROM_DEVICE);
+		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
+					 buf, len, DMA_FROM_DEVICE);
 	}
 
 	return dmaaddr;
@@ -348,11 +348,11 @@
 			  dma_addr_t addr, size_t len, int tx)
 {
 	if (tx) {
-		ssb_dma_unmap_single(ring->dev->dev,
-				     addr, len, DMA_TO_DEVICE);
+		dma_unmap_single(ring->dev->dev->dma_dev,
+				 addr, len, DMA_TO_DEVICE);
 	} else {
-		ssb_dma_unmap_single(ring->dev->dev,
-				     addr, len, DMA_FROM_DEVICE);
+		dma_unmap_single(ring->dev->dev->dma_dev,
+				 addr, len, DMA_FROM_DEVICE);
 	}
 }
 
@@ -361,7 +361,7 @@
 				 dma_addr_t addr, size_t len)
 {
 	B43_WARN_ON(ring->tx);
-	ssb_dma_sync_single_for_cpu(ring->dev->dev,
+	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
 				    addr, len, DMA_FROM_DEVICE);
 }
 
@@ -370,8 +370,8 @@
 				    dma_addr_t addr, size_t len)
 {
 	B43_WARN_ON(ring->tx);
-	ssb_dma_sync_single_for_device(ring->dev->dev,
-				       addr, len, DMA_FROM_DEVICE);
+	dma_sync_single_for_device(ring->dev->dev->dma_dev,
+				   addr, len, DMA_FROM_DEVICE);
 }
 
 static inline
@@ -401,9 +401,9 @@
 	 */
 	if (ring->type == B43_DMA_64BIT)
 		flags |= GFP_DMA;
-	ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
-						  B43_DMA_RINGMEMSIZE,
-						  &(ring->dmabase), flags);
+	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+					    B43_DMA_RINGMEMSIZE,
+					    &(ring->dmabase), flags);
 	if (!ring->descbase) {
 		b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
 		return -ENOMEM;
@@ -420,8 +420,8 @@
 	if (ring->type == B43_DMA_64BIT)
 		flags |= GFP_DMA;
 
-	ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
-				ring->descbase, ring->dmabase, flags);
+	dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
+			  ring->descbase, ring->dmabase);
 }
 
 /* Reset the RX DMA channel */
@@ -528,7 +528,7 @@
 				  dma_addr_t addr,
 				  size_t buffersize, bool dma_to_device)
 {
-	if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
+	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
 		return 1;
 
 	switch (ring->type) {
@@ -874,10 +874,10 @@
 			goto err_kfree_meta;
 
 		/* test for ability to dma to txhdr_cache */
-		dma_test = ssb_dma_map_single(dev->dev,
-					      ring->txhdr_cache,
-					      b43_txhdr_size(dev),
-					      DMA_TO_DEVICE);
+		dma_test = dma_map_single(dev->dev->dma_dev,
+					  ring->txhdr_cache,
+					  b43_txhdr_size(dev),
+					  DMA_TO_DEVICE);
 
 		if (b43_dma_mapping_error(ring, dma_test,
 					  b43_txhdr_size(dev), 1)) {
@@ -889,10 +889,10 @@
 			if (!ring->txhdr_cache)
 				goto err_kfree_meta;
 
-			dma_test = ssb_dma_map_single(dev->dev,
-						      ring->txhdr_cache,
-						      b43_txhdr_size(dev),
-						      DMA_TO_DEVICE);
+			dma_test = dma_map_single(dev->dev->dma_dev,
+						  ring->txhdr_cache,
+						  b43_txhdr_size(dev),
+						  DMA_TO_DEVICE);
 
 			if (b43_dma_mapping_error(ring, dma_test,
 						  b43_txhdr_size(dev), 1)) {
@@ -903,9 +903,9 @@
 			}
 		}
 
-		ssb_dma_unmap_single(dev->dev,
-				     dma_test, b43_txhdr_size(dev),
-				     DMA_TO_DEVICE);
+		dma_unmap_single(dev->dev->dma_dev,
+				 dma_test, b43_txhdr_size(dev),
+				 DMA_TO_DEVICE);
 	}
 
 	err = alloc_ringmemory(ring);
@@ -1018,9 +1018,12 @@
 	/* Try to set the DMA mask. If it fails, try falling back to a
 	 * lower mask, as we can always also support a lower one. */
 	while (1) {
-		err = ssb_dma_set_mask(dev->dev, mask);
-		if (!err)
-			break;
+		err = dma_set_mask(dev->dev->dma_dev, mask);
+		if (!err) {
+			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
+			if (!err)
+				break;
+		}
 		if (mask == DMA_BIT_MASK(64)) {
 			mask = DMA_BIT_MASK(32);
 			fallback = 1;
@@ -1221,14 +1224,14 @@
 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
 	/* create a bounce buffer in zone_dma on mapping failure. */
 	if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
-		priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA);
+		priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
+						  GFP_ATOMIC | GFP_DMA);
 		if (!priv_info->bouncebuffer) {
 			ring->current_slot = old_top_slot;
 			ring->used_slots = old_used_slots;
 			err = -ENOMEM;
 			goto out_unmap_hdr;
 		}
-		memcpy(priv_info->bouncebuffer, skb->data, skb->len);
 
 		meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
 		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index dc91944..a01c210 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -67,7 +67,7 @@
 struct b43_dmadesc32 {
 	__le32 control;
 	__le32 address;
-} __attribute__ ((__packed__));
+} __packed;
 #define B43_DMA32_DCTL_BYTECNT		0x00001FFF
 #define B43_DMA32_DCTL_ADDREXT_MASK		0x00030000
 #define B43_DMA32_DCTL_ADDREXT_SHIFT	16
@@ -140,7 +140,7 @@
 	__le32 control1;
 	__le32 address_low;
 	__le32 address_high;
-} __attribute__ ((__packed__));
+} __packed;
 #define B43_DMA64_DCTL0_DTABLEEND		0x10000000
 #define B43_DMA64_DCTL0_IRQ			0x20000000
 #define B43_DMA64_DCTL0_FRAMEEND		0x40000000
@@ -153,8 +153,8 @@
 	union {
 		struct b43_dmadesc32 dma32;
 		struct b43_dmadesc64 dma64;
-	} __attribute__ ((__packed__));
-} __attribute__ ((__packed__));
+	} __packed;
+} __packed;
 
 /* Misc DMA constants */
 #define B43_DMA_RINGMEMSIZE		PAGE_SIZE
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7965b70..8e24379 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1804,7 +1804,7 @@
 			       dma_reason[2], dma_reason[3],
 			       dma_reason[4], dma_reason[5]);
 			b43err(dev->wl, "This device does not support DMA "
-			       "on your system. Please use PIO instead.\n");
+			       "on your system. It will now be switched to PIO.\n");
 			/* Fall back to PIO transfers if we get fatal DMA errors! */
 			dev->use_pio = 1;
 			b43_controller_restart(dev, "DMA error");
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 4e56b7b..45933cf 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -182,6 +182,7 @@
 
 static const struct sdio_device_id b43_sdio_ids[] = {
 	{ SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */
+	{ SDIO_DEVICE(0x0092, 0x0004) }, /* C-guys, Inc. EW-CG1102GC */
 	{ },
 };
 
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index d23ff9f..d4cf9b3 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -10,8 +10,8 @@
 		union {				\
 			__le32 data;		\
 			__u8 raw[size];		\
-		} __attribute__((__packed__));	\
-	} __attribute__((__packed__))
+		} __packed;	\
+	} __packed
 
 /* struct b43_plcp_hdr4 */
 _b43_declare_plcp_hdr(4);
@@ -57,7 +57,7 @@
 			__u8 rts_frame[16];		/* The RTS frame (if used) */
 			PAD_BYTES(2);
 			struct b43_plcp_hdr6 plcp;	/* Main PLCP header */
-		} new_format __attribute__ ((__packed__));
+		} new_format __packed;
 
 		/* The old r351 format. */
 		struct {
@@ -68,10 +68,10 @@
 			__u8 rts_frame[16];		/* The RTS frame (if used) */
 			PAD_BYTES(2);
 			struct b43_plcp_hdr6 plcp;	/* Main PLCP header */
-		} old_format __attribute__ ((__packed__));
+		} old_format __packed;
 
-	} __attribute__ ((__packed__));
-} __attribute__ ((__packed__));
+	} __packed;
+} __packed;
 
 /* MAC TX control */
 #define B43_TXH_MAC_USEFBR		0x10000000 /* Use fallback rate for this AMPDU */
@@ -218,20 +218,20 @@
 		struct {
 			__u8 jssi;	/* PHY RX Status 1: JSSI */
 			__u8 sig_qual;	/* PHY RX Status 1: Signal Quality */
-		} __attribute__ ((__packed__));
+		} __packed;
 
 		/* RSSI for N-PHYs */
 		struct {
 			__s8 power0;	/* PHY RX Status 1: Power 0 */
 			__s8 power1;	/* PHY RX Status 1: Power 1 */
-		} __attribute__ ((__packed__));
-	} __attribute__ ((__packed__));
+		} __packed;
+	} __packed;
 	__le16 phy_status2;	/* PHY RX Status 2 */
 	__le16 phy_status3;	/* PHY RX Status 3 */
 	__le32 mac_status;	/* MAC RX status */
 	__le16 mac_time;
 	__le16 channel;
-} __attribute__ ((__packed__));
+} __packed;
 
 /* PHY RX Status 0 */
 #define B43_RX_PHYST0_GAINCTL		0x4000 /* Gain Control */
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 89fe2f9..c81b2f5 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -372,7 +372,7 @@
 	/* Size of the data. For ucode and PCM this is in bytes.
 	 * For IV this is number-of-ivs. */
 	__be32 size;
-} __attribute__((__packed__));
+} __packed;
 
 /* Initial Value file format */
 #define B43legacy_IV_OFFSET_MASK	0x7FFF
@@ -382,8 +382,8 @@
 	union {
 		__be16 d16;
 		__be32 d32;
-	} data __attribute__((__packed__));
-} __attribute__((__packed__));
+	} data __packed;
+} __packed;
 
 #define B43legacy_PHYMODE(phytype)	(1 << (phytype))
 #define B43legacy_PHYMODE_B		B43legacy_PHYMODE	\
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index e91520d..e03e01d 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -394,11 +394,11 @@
 	dma_addr_t dmaaddr;
 
 	if (tx)
-		dmaaddr = ssb_dma_map_single(ring->dev->dev,
+		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 					     buf, len,
 					     DMA_TO_DEVICE);
 	else
-		dmaaddr = ssb_dma_map_single(ring->dev->dev,
+		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 					     buf, len,
 					     DMA_FROM_DEVICE);
 
@@ -412,11 +412,11 @@
 		      int tx)
 {
 	if (tx)
-		ssb_dma_unmap_single(ring->dev->dev,
+		dma_unmap_single(ring->dev->dev->dma_dev,
 				     addr, len,
 				     DMA_TO_DEVICE);
 	else
-		ssb_dma_unmap_single(ring->dev->dev,
+		dma_unmap_single(ring->dev->dev->dma_dev,
 				     addr, len,
 				     DMA_FROM_DEVICE);
 }
@@ -428,8 +428,8 @@
 {
 	B43legacy_WARN_ON(ring->tx);
 
-	ssb_dma_sync_single_for_cpu(ring->dev->dev,
-				    addr, len, DMA_FROM_DEVICE);
+	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
+				addr, len, DMA_FROM_DEVICE);
 }
 
 static inline
@@ -439,8 +439,8 @@
 {
 	B43legacy_WARN_ON(ring->tx);
 
-	ssb_dma_sync_single_for_device(ring->dev->dev,
-				       addr, len, DMA_FROM_DEVICE);
+	dma_sync_single_for_device(ring->dev->dev->dma_dev,
+				   addr, len, DMA_FROM_DEVICE);
 }
 
 static inline
@@ -460,10 +460,10 @@
 static int alloc_ringmemory(struct b43legacy_dmaring *ring)
 {
 	/* GFP flags must match the flags in free_ringmemory()! */
-	ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
-						  B43legacy_DMA_RINGMEMSIZE,
-						  &(ring->dmabase),
-						  GFP_KERNEL);
+	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+					    B43legacy_DMA_RINGMEMSIZE,
+					    &(ring->dmabase),
+					    GFP_KERNEL);
 	if (!ring->descbase) {
 		b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
 			     " failed\n");
@@ -476,8 +476,8 @@
 
 static void free_ringmemory(struct b43legacy_dmaring *ring)
 {
-	ssb_dma_free_consistent(ring->dev->dev, B43legacy_DMA_RINGMEMSIZE,
-				ring->descbase, ring->dmabase, GFP_KERNEL);
+	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
+			  ring->descbase, ring->dmabase);
 }
 
 /* Reset the RX DMA channel */
@@ -589,7 +589,7 @@
 					 size_t buffersize,
 					 bool dma_to_device)
 {
-	if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
+	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
 		return 1;
 
 	switch (ring->type) {
@@ -906,7 +906,7 @@
 			goto err_kfree_meta;
 
 		/* test for ability to dma to txhdr_cache */
-		dma_test = ssb_dma_map_single(dev->dev, ring->txhdr_cache,
+		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
 					      sizeof(struct b43legacy_txhdr_fw3),
 					      DMA_TO_DEVICE);
 
@@ -920,7 +920,7 @@
 			if (!ring->txhdr_cache)
 				goto err_kfree_meta;
 
-			dma_test = ssb_dma_map_single(dev->dev,
+			dma_test = dma_map_single(dev->dev->dma_dev,
 					ring->txhdr_cache,
 					sizeof(struct b43legacy_txhdr_fw3),
 					DMA_TO_DEVICE);
@@ -930,9 +930,9 @@
 				goto err_kfree_txhdr_cache;
 		}
 
-		ssb_dma_unmap_single(dev->dev, dma_test,
-				     sizeof(struct b43legacy_txhdr_fw3),
-				     DMA_TO_DEVICE);
+		dma_unmap_single(dev->dev->dma_dev, dma_test,
+				 sizeof(struct b43legacy_txhdr_fw3),
+				 DMA_TO_DEVICE);
 	}
 
 	ring->nr_slots = nr_slots;
@@ -1040,9 +1040,12 @@
 	/* Try to set the DMA mask. If it fails, try falling back to a
 	 * lower mask, as we can always also support a lower one. */
 	while (1) {
-		err = ssb_dma_set_mask(dev->dev, mask);
-		if (!err)
-			break;
+		err = dma_set_mask(dev->dev->dma_dev, mask);
+		if (!err) {
+			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
+			if (!err)
+				break;
+		}
 		if (mask == DMA_BIT_MASK(64)) {
 			mask = DMA_BIT_MASK(32);
 			fallback = 1;
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index f968104..f89c342 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -72,7 +72,7 @@
 struct b43legacy_dmadesc32 {
 	__le32 control;
 	__le32 address;
-} __attribute__((__packed__));
+} __packed;
 #define B43legacy_DMA32_DCTL_BYTECNT		0x00001FFF
 #define B43legacy_DMA32_DCTL_ADDREXT_MASK	0x00030000
 #define B43legacy_DMA32_DCTL_ADDREXT_SHIFT	16
@@ -147,7 +147,7 @@
 	__le32 control1;
 	__le32 address_low;
 	__le32 address_high;
-} __attribute__((__packed__));
+} __packed;
 #define B43legacy_DMA64_DCTL0_DTABLEEND		0x10000000
 #define B43legacy_DMA64_DCTL0_IRQ		0x20000000
 #define B43legacy_DMA64_DCTL0_FRAMEEND		0x40000000
@@ -162,8 +162,8 @@
 	union {
 		struct b43legacy_dmadesc32 dma32;
 		struct b43legacy_dmadesc64 dma64;
-	} __attribute__((__packed__));
-} __attribute__((__packed__));
+	} __packed;
+} __packed;
 
 
 /* Misc DMA constants */
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index 9163308..289db00 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -9,8 +9,8 @@
 		union {				\
 			__le32 data;		\
 			__u8 raw[size];		\
-		} __attribute__((__packed__));	\
-	} __attribute__((__packed__))
+		} __packed;	\
+	} __packed
 
 /* struct b43legacy_plcp_hdr4 */
 _b43legacy_declare_plcp_hdr(4);
@@ -39,7 +39,7 @@
 	struct b43legacy_plcp_hdr6 rts_plcp;	/* RTS PLCP */
 	__u8 rts_frame[18];			/* The RTS frame (if used) */
 	struct b43legacy_plcp_hdr6 plcp;
-} __attribute__((__packed__));
+} __packed;
 
 /* MAC TX control */
 #define B43legacy_TX4_MAC_KEYIDX	0x0FF00000 /* Security key index */
@@ -123,7 +123,7 @@
 	__le16 seq;
 	u8 phy_stat;
 	PAD_BYTES(1);
-} __attribute__((__packed__));
+} __packed;
 
 
 /* Receive header for v3 firmware. */
@@ -138,7 +138,7 @@
 	__le16 mac_status;	/* MAC RX status */
 	__le16 mac_time;
 	__le16 channel;
-} __attribute__((__packed__));
+} __packed;
 
 
 /* PHY RX Status 0 */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
index 7f9d8d9..ed98ce7 100644
--- a/drivers/net/wireless/hostap/hostap_80211.h
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -19,35 +19,35 @@
 			__le16 status_code;
 			/* possibly followed by Challenge text */
 			u8 variable[0];
-		} __attribute__ ((packed)) auth;
+		} __packed auth;
 		struct {
 			__le16 reason_code;
-		} __attribute__ ((packed)) deauth;
+		} __packed deauth;
 		struct {
 			__le16 capab_info;
 			__le16 listen_interval;
 			/* followed by SSID and Supported rates */
 			u8 variable[0];
-		} __attribute__ ((packed)) assoc_req;
+		} __packed assoc_req;
 		struct {
 			__le16 capab_info;
 			__le16 status_code;
 			__le16 aid;
 			/* followed by Supported rates */
 			u8 variable[0];
-		} __attribute__ ((packed)) assoc_resp, reassoc_resp;
+		} __packed assoc_resp, reassoc_resp;
 		struct {
 			__le16 capab_info;
 			__le16 listen_interval;
 			u8 current_ap[6];
 			/* followed by SSID and Supported rates */
 			u8 variable[0];
-		} __attribute__ ((packed)) reassoc_req;
+		} __packed reassoc_req;
 		struct {
 			__le16 reason_code;
-		} __attribute__ ((packed)) disassoc;
+		} __packed disassoc;
 		struct {
-		} __attribute__ ((packed)) probe_req;
+		} __packed probe_req;
 		struct {
 			u8 timestamp[8];
 			__le16 beacon_int;
@@ -55,9 +55,9 @@
 			/* followed by some of SSID, Supported rates,
 			 * FH Params, DS Params, CF Params, IBSS Params, TIM */
 			u8 variable[0];
-		} __attribute__ ((packed)) beacon, probe_resp;
+		} __packed beacon, probe_resp;
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
 
 #define IEEE80211_MGMT_HDR_LEN 24
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 231dbd7..9cadaa2 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -688,7 +688,7 @@
 	struct ap_data *ap = data;
 	struct net_device *dev = ap->local->dev;
 	struct ieee80211_hdr *hdr;
-	u16 fc, status;
+	u16 status;
 	__le16 *pos;
 	struct sta_info *sta = NULL;
 	char *txt = NULL;
@@ -699,7 +699,6 @@
 	}
 
 	hdr = (struct ieee80211_hdr *) skb->data;
-	fc = le16_to_cpu(hdr->frame_control);
 	if ((!ieee80211_is_assoc_resp(hdr->frame_control) &&
 	     !ieee80211_is_reassoc_resp(hdr->frame_control)) ||
 	    skb->len < IEEE80211_MGMT_HDR_LEN + 4) {
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
index 90b64b09..4230102 100644
--- a/drivers/net/wireless/hostap/hostap_common.h
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -179,7 +179,7 @@
 	__le16 variant;
 	__le16 major;
 	__le16 minor;
-} __attribute__ ((packed));
+} __packed;
 
 #define HFA384X_COMP_ID_PRI 0x15
 #define HFA384X_COMP_ID_STA 0x1f
@@ -192,14 +192,14 @@
 	__le16 variant;
 	__le16 bottom;
 	__le16 top;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct hfa384x_build_id
 {
 	__le16 pri_seq;
 	__le16 sec_seq;
-} __attribute__ ((packed));
+} __packed;
 
 /* FD01 - Download Buffer */
 struct hfa384x_rid_download_buffer
@@ -207,14 +207,14 @@
 	__le16 page;
 	__le16 offset;
 	__le16 length;
-} __attribute__ ((packed));
+} __packed;
 
 /* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */
 struct hfa384x_comms_quality {
 	__le16 comm_qual; /* 0 .. 92 */
 	__le16 signal_level; /* 27 .. 154 */
 	__le16 noise_level; /* 27 .. 154 */
-} __attribute__ ((packed));
+} __packed;
 
 
 /* netdevice private ioctls (used, e.g., with iwpriv from user space) */
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index eb57d1e..eaee84b 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -741,9 +741,7 @@
 	local_info_t *local =
 		container_of(work, local_info_t, set_multicast_list_queue);
 	struct net_device *dev = local->dev;
-	struct hostap_interface *iface;
 
-	iface = netdev_priv(dev);
 	if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
 			    local->is_promisc)) {
 		printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index 1ba33be..1c66b3c 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -31,14 +31,14 @@
 	u32 did;
 	u16 status, len;
 	u32 data;
-} __attribute__ ((packed));
+} __packed;
 
 struct linux_wlan_ng_prism_hdr {
 	u32 msgcode, msglen;
 	char devname[16];
 	struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal,
 		noise, rate, istx, frmlen;
-} __attribute__ ((packed));
+} __packed;
 
 struct linux_wlan_ng_cap_hdr {
 	__be32 version;
@@ -55,7 +55,7 @@
 	__be32 ssi_noise;
 	__be32 preamble;
 	__be32 encoding;
-} __attribute__ ((packed));
+} __packed;
 
 struct hostap_radiotap_rx {
 	struct ieee80211_radiotap_header hdr;
@@ -66,7 +66,7 @@
 	__le16 chan_flags;
 	s8 dbm_antsignal;
 	s8 dbm_antnoise;
-} __attribute__ ((packed));
+} __packed;
 
 #define LWNG_CAP_DID_BASE   (4 | (1 << 6)) /* section 4, group 1 */
 #define LWNG_CAPHDR_VERSION 0x80211001
@@ -97,7 +97,7 @@
 	__be16 len;
 
 	/* followed by frame data; max 2304 bytes */
-} __attribute__ ((packed));
+} __packed;
 
 
 struct hfa384x_tx_frame {
@@ -126,14 +126,14 @@
 	__be16 len;
 
 	/* followed by frame data; max 2304 bytes */
-} __attribute__ ((packed));
+} __packed;
 
 
 struct hfa384x_rid_hdr
 {
 	__le16 len;
 	__le16 rid;
-} __attribute__ ((packed));
+} __packed;
 
 
 /* Macro for converting signal levels (range 27 .. 154) to wireless ext
@@ -145,24 +145,24 @@
 struct hfa384x_scan_request {
 	__le16 channel_list;
 	__le16 txrate; /* HFA384X_RATES_* */
-} __attribute__ ((packed));
+} __packed;
 
 struct hfa384x_hostscan_request {
 	__le16 channel_list;
 	__le16 txrate;
 	__le16 target_ssid_len;
 	u8 target_ssid[32];
-} __attribute__ ((packed));
+} __packed;
 
 struct hfa384x_join_request {
 	u8 bssid[6];
 	__le16 channel;
-} __attribute__ ((packed));
+} __packed;
 
 struct hfa384x_info_frame {
 	__le16 len;
 	__le16 type;
-} __attribute__ ((packed));
+} __packed;
 
 struct hfa384x_comm_tallies {
 	__le16 tx_unicast_frames;
@@ -186,7 +186,7 @@
 	__le16 rx_discards_wep_undecryptable;
 	__le16 rx_message_in_msg_fragments;
 	__le16 rx_message_in_bad_msg_fragments;
-} __attribute__ ((packed));
+} __packed;
 
 struct hfa384x_comm_tallies32 {
 	__le32 tx_unicast_frames;
@@ -210,7 +210,7 @@
 	__le32 rx_discards_wep_undecryptable;
 	__le32 rx_message_in_msg_fragments;
 	__le32 rx_message_in_bad_msg_fragments;
-} __attribute__ ((packed));
+} __packed;
 
 struct hfa384x_scan_result_hdr {
 	__le16 reserved;
@@ -219,7 +219,7 @@
 #define HFA384X_SCAN_HOST_INITIATED 1
 #define HFA384X_SCAN_FIRMWARE_INITIATED 2
 #define HFA384X_SCAN_INQUIRY_FROM_HOST 3
-} __attribute__ ((packed));
+} __packed;
 
 #define HFA384X_SCAN_MAX_RESULTS 32
 
@@ -234,7 +234,7 @@
 	u8 ssid[32];
 	u8 sup_rates[10];
 	__le16 rate;
-} __attribute__ ((packed));
+} __packed;
 
 struct hfa384x_hostscan_result {
 	__le16 chid;
@@ -248,7 +248,7 @@
 	u8 sup_rates[10];
 	__le16 rate;
 	__le16 atim;
-} __attribute__ ((packed));
+} __packed;
 
 struct comm_tallies_sums {
 	unsigned int tx_unicast_frames;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 0bd4dfa..56350d5 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -3467,10 +3467,8 @@
 	dma_addr_t p;
 
 	priv->msg_buffers =
-	    (struct ipw2100_tx_packet *)kmalloc(IPW_COMMAND_POOL_SIZE *
-						sizeof(struct
-						       ipw2100_tx_packet),
-						GFP_KERNEL);
+	    kmalloc(IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet),
+		    GFP_KERNEL);
 	if (!priv->msg_buffers) {
 		printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for msg "
 		       "buffers.\n", priv->net_dev->name);
@@ -4499,10 +4497,8 @@
 	}
 
 	priv->tx_buffers =
-	    (struct ipw2100_tx_packet *)kmalloc(TX_PENDED_QUEUE_LENGTH *
-						sizeof(struct
-						       ipw2100_tx_packet),
-						GFP_ATOMIC);
+	    kmalloc(TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet),
+		    GFP_ATOMIC);
 	if (!priv->tx_buffers) {
 		printk(KERN_ERR DRV_NAME
 		       ": %s: alloc failed form tx buffers.\n",
@@ -4651,9 +4647,9 @@
 	/*
 	 * allocate packets
 	 */
-	priv->rx_buffers = (struct ipw2100_rx_packet *)
-	    kmalloc(RX_QUEUE_LENGTH * sizeof(struct ipw2100_rx_packet),
-		    GFP_KERNEL);
+	priv->rx_buffers = kmalloc(RX_QUEUE_LENGTH *
+				   sizeof(struct ipw2100_rx_packet),
+				   GFP_KERNEL);
 	if (!priv->rx_buffers) {
 		IPW_DEBUG_INFO("can't allocate rx packet buffer table\n");
 
@@ -5233,7 +5229,7 @@
 	u8 auth_mode;
 	u8 replay_counters_number;
 	u8 unicast_using_group;
-} __attribute__ ((packed));
+} __packed;
 
 static int ipw2100_set_security_information(struct ipw2100_priv *priv,
 					    int auth_mode,
@@ -8475,7 +8471,7 @@
 	short mode;
 	unsigned int fw_size;
 	unsigned int uc_size;
-} __attribute__ ((packed));
+} __packed;
 
 static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
 {
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 1eab0d6..838002b 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -164,7 +164,7 @@
 		} fields;
 		u8 field;
 	} info;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw2100_bd {
 	u32 host_addr;
@@ -174,7 +174,7 @@
 	 * 1st TBD) */
 	u8 num_fragments;
 	u8 reserved[6];
-} __attribute__ ((packed));
+} __packed;
 
 #define IPW_BD_QUEUE_LENGTH(n) (1<<n)
 #define IPW_BD_ALIGNMENT(L)    (L*sizeof(struct ipw2100_bd))
@@ -232,7 +232,7 @@
 #define IPW_STATUS_FLAG_WEP_ENCRYPTED	(1<<1)
 #define IPW_STATUS_FLAG_CRC_ERROR       (1<<2)
 	u8 rssi;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw2100_status_queue {
 	/* driver (virtual) pointer to queue */
@@ -293,7 +293,7 @@
 	u32 reserved1[3];
 	u32 *ordinal1_ptr;
 	u32 *ordinal2_ptr;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw2100_data_header {
 	u32 host_command_reg;
@@ -307,7 +307,7 @@
 	u8 src_addr[ETH_ALEN];
 	u8 dst_addr[ETH_ALEN];
 	u16 fragment_size;
-} __attribute__ ((packed));
+} __packed;
 
 /* Host command data structure */
 struct host_command {
@@ -316,7 +316,7 @@
 	u32 host_command_sequence;	// UNIQUE COMMAND NUMBER (ID)
 	u32 host_command_length;	// LENGTH
 	u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN];	// COMMAND PARAMETERS
-} __attribute__ ((packed));
+} __packed;
 
 typedef enum {
 	POWER_ON_RESET,
@@ -382,7 +382,7 @@
 	u32 hnhdr_size;		/* size in bytes of data
 				   or number of entries, if table.
 				   Does NOT include header */
-} __attribute__ ((packed));
+} __packed;
 
 #define MAX_KEY_SIZE	16
 #define	MAX_KEYS	8
@@ -814,7 +814,7 @@
 		struct ipw2100_notification notification;
 		struct ipw2100_cmd_header command;
 	} rx_data;
-} __attribute__ ((packed));
+} __packed;
 
 /* Bit 0-7 are for 802.11b tx rates - .  Bit 5-7 are reserved */
 #define TX_RATE_1_MBIT              0x0001
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 3aa3bb1..cb2552a 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -96,7 +96,7 @@
 static u32 ipw_debug_level;
 static int associate;
 static int auto_create = 1;
-static int led_support = 0;
+static int led_support = 1;
 static int disable = 0;
 static int bt_coexist = 0;
 static int hwcrypto = 0;
@@ -6624,13 +6624,12 @@
 		return -EINVAL;
 
 	if (wrqu->data.length) {
-		buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
 		if (buf == NULL) {
 			err = -ENOMEM;
 			goto out;
 		}
 
-		memcpy(buf, extra, wrqu->data.length);
 		kfree(ieee->wpa_ie);
 		ieee->wpa_ie = buf;
 		ieee->wpa_ie_len = wrqu->data.length;
@@ -12083,7 +12082,7 @@
 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
 
 module_param_named(led, led_support, int, 0444);
-MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
+MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
 
 module_param(debug, int, 0444);
 MODULE_PARM_DESC(debug, "debug output mask");
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index bf0eeb2..d7d049c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -388,7 +388,7 @@
 	dma_addr_t dma_addr;		/**< physical addr for BD's */
 	int low_mark;		       /**< low watermark, resume queue if free space more than this */
 	int high_mark;		       /**< high watermark, stop queue if free space less than this */
-} __attribute__ ((packed)); /* XXX */
+} __packed; /* XXX */
 
 struct machdr32 {
 	__le16 frame_ctl;
@@ -399,7 +399,7 @@
 	__le16 seq_ctrl;		// more endians!
 	u8 addr4[MACADRR_BYTE_LEN];
 	__le16 qos_ctrl;
-} __attribute__ ((packed));
+} __packed;
 
 struct machdr30 {
 	__le16 frame_ctl;
@@ -409,7 +409,7 @@
 	u8 addr3[MACADRR_BYTE_LEN];
 	__le16 seq_ctrl;		// more endians!
 	u8 addr4[MACADRR_BYTE_LEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct machdr26 {
 	__le16 frame_ctl;
@@ -419,7 +419,7 @@
 	u8 addr3[MACADRR_BYTE_LEN];
 	__le16 seq_ctrl;		// more endians!
 	__le16 qos_ctrl;
-} __attribute__ ((packed));
+} __packed;
 
 struct machdr24 {
 	__le16 frame_ctl;
@@ -428,20 +428,20 @@
 	u8 addr2[MACADRR_BYTE_LEN];
 	u8 addr3[MACADRR_BYTE_LEN];
 	__le16 seq_ctrl;		// more endians!
-} __attribute__ ((packed));
+} __packed;
 
 // TX TFD with 32 byte MAC Header
 struct tx_tfd_32 {
 	struct machdr32 mchdr;	// 32
 	__le32 uivplaceholder[2];	// 8
-} __attribute__ ((packed));
+} __packed;
 
 // TX TFD with 30 byte MAC Header
 struct tx_tfd_30 {
 	struct machdr30 mchdr;	// 30
 	u8 reserved[2];		// 2
 	__le32 uivplaceholder[2];	// 8
-} __attribute__ ((packed));
+} __packed;
 
 // tx tfd with 26 byte mac header
 struct tx_tfd_26 {
@@ -449,14 +449,14 @@
 	u8 reserved1[2];	// 2
 	__le32 uivplaceholder[2];	// 8
 	u8 reserved2[4];	// 4
-} __attribute__ ((packed));
+} __packed;
 
 // tx tfd with 24 byte mac header
 struct tx_tfd_24 {
 	struct machdr24 mchdr;	// 24
 	__le32 uivplaceholder[2];	// 8
 	u8 reserved[8];		// 8
-} __attribute__ ((packed));
+} __packed;
 
 #define DCT_WEP_KEY_FIELD_LENGTH 16
 
@@ -465,7 +465,7 @@
 	u8 length;
 	__le16 reserved;
 	u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct tfd_data {
 	/* Header */
@@ -504,14 +504,14 @@
 	__le32 num_chunks;
 	__le32 chunk_ptr[NUM_TFD_CHUNKS];
 	__le16 chunk_len[NUM_TFD_CHUNKS];
-} __attribute__ ((packed));
+} __packed;
 
 struct txrx_control_flags {
 	u8 message_type;
 	u8 rx_seq_num;
 	u8 control_bits;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 #define  TFD_SIZE                           128
 #define  TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH   (TFD_SIZE - sizeof(struct txrx_control_flags))
@@ -523,7 +523,7 @@
 		struct tfd_command cmd;
 		u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
 typedef void destructor_func(const void *);
 
@@ -559,7 +559,7 @@
 		__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
 		__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
 	} failed;
-} __attribute__ ((packed));
+} __packed;
 
 /* statistics command response */
 struct ipw_cmd_stats {
@@ -586,13 +586,13 @@
 	__le16 rx_autodetec_no_ofdm;
 	__le16 rx_autodetec_no_barker;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct notif_channel_result {
 	u8 channel_num;
 	struct ipw_cmd_stats stats;
 	u8 uReserved;
-} __attribute__ ((packed));
+} __packed;
 
 #define SCAN_COMPLETED_STATUS_COMPLETE  1
 #define SCAN_COMPLETED_STATUS_ABORTED   2
@@ -602,24 +602,24 @@
 	u8 num_channels;
 	u8 status;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct notif_frag_length {
 	__le16 frag_length;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct notif_beacon_state {
 	__le32 state;
 	__le32 number;
-} __attribute__ ((packed));
+} __packed;
 
 struct notif_tgi_tx_key {
 	u8 key_state;
 	u8 security_type;
 	u8 station_index;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 #define SILENCE_OVER_THRESH (1)
 #define SILENCE_UNDER_THRESH (2)
@@ -631,25 +631,25 @@
 	struct rate_histogram histogram;
 	u8 silence_notification_type;	/* SILENCE_OVER/UNDER_THRESH */
 	__le16 silence_count;
-} __attribute__ ((packed));
+} __packed;
 
 struct notif_association {
 	u8 state;
-} __attribute__ ((packed));
+} __packed;
 
 struct notif_authenticate {
 	u8 state;
 	struct machdr24 addr;
 	__le16 status;
-} __attribute__ ((packed));
+} __packed;
 
 struct notif_calibration {
 	u8 data[104];
-} __attribute__ ((packed));
+} __packed;
 
 struct notif_noise {
 	__le32 value;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_rx_notification {
 	u8 reserved[8];
@@ -669,7 +669,7 @@
 		struct notif_noise noise;
 		u8 raw[0];
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_rx_frame {
 	__le32 reserved1;
@@ -692,14 +692,14 @@
 	u8 rtscts_seen;		// 0x1 RTS seen ; 0x2 CTS seen
 	__le16 length;
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_rx_header {
 	u8 message_type;
 	u8 rx_seq_num;
 	u8 control_bits;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_rx_packet {
 	struct ipw_rx_header header;
@@ -707,7 +707,7 @@
 		struct ipw_rx_frame frame;
 		struct ipw_rx_notification notification;
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
 #define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12
 #define IPW_RX_FRAME_SIZE        (unsigned int)(sizeof(struct ipw_rx_header) + \
@@ -717,7 +717,7 @@
 	dma_addr_t dma_addr;
 	struct sk_buff *skb;
 	struct list_head list;
-};				/* Not transferred over network, so not  __attribute__ ((packed)) */
+};				/* Not transferred over network, so not  __packed */
 
 struct ipw_rx_queue {
 	struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
@@ -730,7 +730,7 @@
 	struct list_head rx_free;	/* Own an SKBs */
 	struct list_head rx_used;	/* No SKB allocated */
 	spinlock_t lock;
-};				/* Not transferred over network, so not  __attribute__ ((packed)) */
+};				/* Not transferred over network, so not  __packed */
 
 struct alive_command_responce {
 	u8 alive_command;
@@ -745,21 +745,21 @@
 	__le16 reserved4;
 	u8 time_stamp[5];	/* month, day, year, hours, minutes */
 	u8 ucode_valid;
-} __attribute__ ((packed));
+} __packed;
 
 #define IPW_MAX_RATES 12
 
 struct ipw_rates {
 	u8 num_rates;
 	u8 rates[IPW_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
 
 struct command_block {
 	unsigned int control;
 	u32 source_addr;
 	u32 dest_addr;
 	unsigned int status;
-} __attribute__ ((packed));
+} __packed;
 
 #define CB_NUMBER_OF_ELEMENTS_SMALL 64
 struct fw_image_desc {
@@ -792,7 +792,7 @@
 	u8 accept_all_mgmt_frames;
 	u8 pass_noise_stats_to_host;
 	u8 reserved3;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_multicast_addr {
 	u8 num_of_multicast_addresses;
@@ -801,7 +801,7 @@
 	u8 mac2[6];
 	u8 mac3[6];
 	u8 mac4[6];
-} __attribute__ ((packed));
+} __packed;
 
 #define DCW_WEP_KEY_INDEX_MASK		0x03	/* bits [0:1] */
 #define DCW_WEP_KEY_SEC_TYPE_MASK	0x30	/* bits [4:5] */
@@ -822,7 +822,7 @@
 	u8 key_index;
 	u8 key_size;
 	u8 key[16];
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_tgi_tx_key {
 	u8 key_id;
@@ -831,7 +831,7 @@
 	u8 flags;
 	u8 key[16];
 	__le32 tx_counter[2];
-} __attribute__ ((packed));
+} __packed;
 
 #define IPW_SCAN_CHANNELS 54
 
@@ -840,7 +840,7 @@
 	__le16 dwell_time;
 	u8 channels_list[IPW_SCAN_CHANNELS];
 	u8 channels_reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 enum {
 	IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0,
@@ -857,7 +857,7 @@
 	u8 scan_type[IPW_SCAN_CHANNELS / 2];
 	u8 reserved;
 	__le16 dwell_time[IPW_SCAN_TYPES];
-} __attribute__ ((packed));
+} __packed;
 
 static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
 {
@@ -902,7 +902,7 @@
 	u8 smr;
 	u8 reserved1;
 	__le16 reserved2;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_supported_rates {
 	u8 ieee_mode;
@@ -910,36 +910,36 @@
 	u8 purpose;
 	u8 reserved;
 	u8 supported_rates[IPW_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_rts_threshold {
 	__le16 rts_threshold;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_frag_threshold {
 	__le16 frag_threshold;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_retry_limit {
 	u8 short_retry_limit;
 	u8 long_retry_limit;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_dino_config {
 	__le32 dino_config_addr;
 	__le16 dino_config_size;
 	u8 dino_response;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_aironet_info {
 	u8 id;
 	u8 length;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_rx_key {
 	u8 station_index;
@@ -950,25 +950,25 @@
 	u8 station_address[6];
 	u8 key_index;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_country_channel_info {
 	u8 first_channel;
 	u8 no_channels;
 	s8 max_tx_power;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_country_info {
 	u8 id;
 	u8 length;
 	u8 country_str[3];
 	struct ipw_country_channel_info groups[7];
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_channel_tx_power {
 	u8 channel_number;
 	s8 tx_power;
-} __attribute__ ((packed));
+} __packed;
 
 #define SCAN_ASSOCIATED_INTERVAL (HZ)
 #define SCAN_INTERVAL (HZ / 10)
@@ -979,18 +979,18 @@
 	u8 num_channels;
 	u8 ieee_mode;
 	struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS];
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_rsn_capabilities {
 	u8 id;
 	u8 length;
 	__le16 version;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_sensitivity_calib {
 	__le16 beacon_rssi_raw;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * Host command structure.
@@ -1019,7 +1019,7 @@
    * nParams=(len+3)/4+status_len
    */
 	u32 param[0];
-} __attribute__ ((packed));
+} __packed;
 
 #define STATUS_HCMD_ACTIVE      (1<<0)	/**< host command in progress */
 
@@ -1114,7 +1114,7 @@
 	u32 event;
 	u32 time;
 	u32 data;
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_fw_error {	 /* XXX */
 	unsigned long jiffies;
@@ -1125,7 +1125,7 @@
 	struct ipw_error_elem *elem;
 	struct ipw_event *log;
 	u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
 
 #ifdef CONFIG_IPW2200_PROMISCUOUS
 
@@ -1170,7 +1170,7 @@
 	s8 rt_dbmnoise;
 	u8 rt_antenna;	/* antenna number */
 	u8 payload[0];  /* payload... */
-} __attribute__ ((packed));
+} __packed;
 #endif
 
 struct ipw_priv {
@@ -1957,7 +1957,7 @@
 struct ipw_fixed_rate {
 	__le16 tx_rates;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 #define IPW_INDIRECT_ADDR_MASK (~0x3ul)
 
@@ -1966,14 +1966,14 @@
 	u8 len;
 	u16 reserved;
 	u32 *param;
-} __attribute__ ((packed));	/* XXX */
+} __packed;	/* XXX */
 
 struct cmdlog_host_cmd {
 	u8 cmd;
 	u8 len;
 	__le16 reserved;
 	char param[124];
-} __attribute__ ((packed));
+} __packed;
 
 struct ipw_cmd_log {
 	unsigned long jiffies;
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 284b0e4..4736861 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -154,7 +154,7 @@
 	u8 ctrl;		/* always 0x03 */
 	u8 oui[P80211_OUI_LEN];	/* organizational universal id */
 
-} __attribute__ ((packed));
+} __packed;
 
 #define SNAP_SIZE sizeof(struct libipw_snap_hdr)
 
@@ -323,7 +323,7 @@
 	u8 keys[WEP_KEYS][SCM_KEY_LEN];
 	u8 level;
 	u16 flags;
-} __attribute__ ((packed));
+} __packed;
 
 /*
 
@@ -347,7 +347,7 @@
 	__le16 duration_id;
 	u8 addr1[ETH_ALEN];
 	u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_hdr_2addr {
 	__le16 frame_ctl;
@@ -355,7 +355,7 @@
 	u8 addr1[ETH_ALEN];
 	u8 addr2[ETH_ALEN];
 	u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_hdr_3addr {
 	__le16 frame_ctl;
@@ -365,7 +365,7 @@
 	u8 addr3[ETH_ALEN];
 	__le16 seq_ctl;
 	u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_hdr_4addr {
 	__le16 frame_ctl;
@@ -376,7 +376,7 @@
 	__le16 seq_ctl;
 	u8 addr4[ETH_ALEN];
 	u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_hdr_3addrqos {
 	__le16 frame_ctl;
@@ -387,13 +387,13 @@
 	__le16 seq_ctl;
 	u8 payload[0];
 	__le16 qos_ctl;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_info_element {
 	u8 id;
 	u8 len;
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * These are the data types that can make up management packets
@@ -406,7 +406,7 @@
 	u16 listen_interval;
 	struct {
 		u16 association_id:14, reserved:2;
-	} __attribute__ ((packed));
+	} __packed;
 	u32 time_stamp[2];
 	u16 reason;
 	u16 status;
@@ -419,7 +419,7 @@
 	__le16 status;
 	/* challenge */
 	struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_channel_switch {
 	u8 id;
@@ -427,7 +427,7 @@
 	u8 mode;
 	u8 channel;
 	u8 count;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_action {
 	struct libipw_hdr_3addr header;
@@ -441,12 +441,12 @@
 		struct libipw_channel_switch channel_switch;
 
 	} format;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_disassoc {
 	struct libipw_hdr_3addr header;
 	__le16 reason;
-} __attribute__ ((packed));
+} __packed;
 
 /* Alias deauth for disassoc */
 #define libipw_deauth libipw_disassoc
@@ -455,7 +455,7 @@
 	struct libipw_hdr_3addr header;
 	/* SSID, supported rates */
 	struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_probe_response {
 	struct libipw_hdr_3addr header;
@@ -465,7 +465,7 @@
 	/* SSID, supported rates, FH params, DS params,
 	 * CF params, IBSS params, TIM (if beacon), RSN */
 	struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
 
 /* Alias beacon for probe_response */
 #define libipw_beacon libipw_probe_response
@@ -476,7 +476,7 @@
 	__le16 listen_interval;
 	/* SSID, supported rates, RSN */
 	struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_reassoc_request {
 	struct libipw_hdr_3addr header;
@@ -484,7 +484,7 @@
 	__le16 listen_interval;
 	u8 current_ap[ETH_ALEN];
 	struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_assoc_response {
 	struct libipw_hdr_3addr header;
@@ -493,7 +493,7 @@
 	__le16 aid;
 	/* supported rates */
 	struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_txb {
 	u8 nr_frags;
@@ -555,19 +555,19 @@
 	u8 qui_subtype;
 	u8 version;
 	u8 ac_info;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_qos_ac_parameter {
 	u8 aci_aifsn;
 	u8 ecw_min_max;
 	__le16 tx_op_limit;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_qos_parameter_info {
 	struct libipw_qos_information_element info_element;
 	u8 reserved;
 	struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_qos_parameters {
 	__le16 cw_min[QOS_QUEUE_NUM];
@@ -575,7 +575,7 @@
 	u8 aifs[QOS_QUEUE_NUM];
 	u8 flag[QOS_QUEUE_NUM];
 	__le16 tx_op_limit[QOS_QUEUE_NUM];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_qos_data {
 	struct libipw_qos_parameters parameters;
@@ -588,7 +588,7 @@
 struct libipw_tim_parameters {
 	u8 tim_count;
 	u8 tim_period;
-} __attribute__ ((packed));
+} __packed;
 
 /*******************************************************/
 
@@ -606,7 +606,7 @@
 	__le64 start_time;
 	__le16 duration;
 	u8 map;
-} __attribute__ ((packed));
+} __packed;
 
 enum {				/* libipw_measurement_request.mode */
 	/* Bit 0 is reserved */
@@ -627,7 +627,7 @@
 	u8 channel;
 	__le64 start_time;
 	__le16 duration;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_measurement_request {
 	struct libipw_info_element ie;
@@ -635,7 +635,7 @@
 	u8 mode;
 	u8 type;
 	struct libipw_measurement_params params[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_measurement_report {
 	struct libipw_info_element ie;
@@ -645,17 +645,17 @@
 	union {
 		struct libipw_basic_report basic[0];
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_tpc_report {
 	u8 transmit_power;
 	u8 link_margin;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_channel_map {
 	u8 channel;
 	u8 map;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_ibss_dfs {
 	struct libipw_info_element ie;
@@ -668,14 +668,14 @@
 	u8 mode;
 	u8 channel;
 	u8 count;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_quiet {
 	u8 count;
 	u8 period;
 	u8 duration;
 	u8 offset;
-} __attribute__ ((packed));
+} __packed;
 
 struct libipw_network {
 	/* These entries are used to identify a unique network */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index dc8ed15..6491e27 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -30,9 +30,11 @@
 
 config IWLWIFI_DEBUGFS
         bool "iwlagn debugfs support"
-        depends on IWLWIFI && IWLWIFI_DEBUG && MAC80211_DEBUGFS
+        depends on IWLWIFI && MAC80211_DEBUGFS
         ---help---
-	  Enable creation of debugfs files for the iwlwifi drivers.
+	  Enable creation of debugfs files for the iwlwifi drivers. This
+	  is a low-impact option that allows getting insight into the
+	  driver's state at runtime.
 
 config IWLWIFI_DEVICE_TRACING
 	bool "iwlwifi device access tracing"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 7c72353..728bb85 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_IWLWIFI)	+= iwlcore.o
 iwlcore-objs 		:= iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwlcore-objs 		+= iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
+iwlcore-objs 		+= iwl-rx.o iwl-tx.o iwl-sta.o
 iwlcore-objs 		+= iwl-scan.o iwl-led.o
 iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
 iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
@@ -11,7 +11,7 @@
 obj-$(CONFIG_IWLAGN)	+= iwlagn.o
 iwlagn-objs		:= iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
 iwlagn-objs		+= iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
-iwlagn-objs		+= iwl-agn-lib.o
+iwlagn-objs		+= iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
 iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
 
 iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 6be2992..1daf159 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -157,6 +157,10 @@
 			BIT(IWL_CALIB_TX_IQ) 		|
 			BIT(IWL_CALIB_TX_IQ_PERD)	|
 			BIT(IWL_CALIB_BASE_BAND);
+	if (priv->cfg->need_dc_calib)
+		priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
+
+	priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
 
 	return 0;
 }
@@ -213,6 +217,7 @@
 		.set_ct_kill = iwl1000_set_ct_threshold,
 	 },
 	.manage_ibss_station = iwlagn_manage_ibss_station,
+	.update_bcast_station = iwl_update_bcast_station,
 	.debugfs_ops = {
 		.rx_stats_read = iwl_ucode_rx_stats_read,
 		.tx_stats_read = iwl_ucode_tx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
index 6a9c64a..ef0835b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
@@ -28,6 +28,28 @@
 
 #include "iwl-3945-debugfs.h"
 
+
+static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+	int p = 0;
+
+	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+		       le32_to_cpu(priv->_3945.statistics.flag));
+	if (le32_to_cpu(priv->_3945.statistics.flag) &
+			UCODE_STATISTICS_CLEAR_MSK)
+		p += scnprintf(buf + p, bufsz - p,
+			       "\tStatistics have been cleared\n");
+	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+		       (le32_to_cpu(priv->_3945.statistics.flag) &
+			UCODE_STATISTICS_FREQUENCY_MSK)
+			? "2.4 GHz" : "5.2 GHz");
+	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+		       (le32_to_cpu(priv->_3945.statistics.flag) &
+			UCODE_STATISTICS_NARROW_BAND_MSK)
+			? "enabled" : "disabled");
+	return p;
+}
+
 ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
 				    char __user *user_buf,
 				    size_t count, loff_t *ppos)
@@ -70,7 +92,7 @@
 	max_cck = &priv->_3945.max_delta.rx.cck;
 	max_general = &priv->_3945.max_delta.rx.general;
 
-	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+	pos += iwl3945_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
 			 "acumulative       delta         max\n",
 			 "Statistics_Rx - OFDM:");
@@ -331,7 +353,7 @@
 	accum_tx = &priv->_3945.accum_statistics.tx;
 	delta_tx = &priv->_3945.delta_statistics.tx;
 	max_tx = &priv->_3945.max_delta.tx;
-	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+	pos += iwl3945_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
 			 "acumulative       delta         max\n",
 			 "Statistics_Tx:");
@@ -438,7 +460,7 @@
 	accum_div = &priv->_3945.accum_statistics.general.div;
 	delta_div = &priv->_3945.delta_statistics.general.div;
 	max_div = &priv->_3945.max_delta.general.div;
-	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+	pos += iwl3945_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
 			 "acumulative       delta         max\n",
 			 "Statistics_General:");
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
index 042f6bc..2c9ed2b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -175,13 +175,13 @@
 struct iwl3945_tfd_tb {
 	__le32 addr;
 	__le32 len;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl3945_tfd {
 	__le32 control_flags;
 	struct iwl3945_tfd_tb tbs[4];
 	u8 __pad[28];
-} __attribute__ ((packed));
+} __packed;
 
 
 #endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 91bcb4e..7c731a7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -96,7 +96,7 @@
 	u8 gain_index;		/* index into power (gain) setup table ... */
 	s8 power;		/* ... for this pwr level for this chnl group */
 	u16 v_det;		/* PA output voltage */
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
@@ -117,7 +117,7 @@
 	u8 group_channel;	/* "representative" channel # in this band */
 	s16 temperature;	/* h/w temperature at factory calib this band
 				 * (signed) */
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * Temperature-based Tx-power compensation data, not band-specific.
@@ -131,7 +131,7 @@
 	u32 Tc;
 	u32 Td;
 	u32 Te;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * EEPROM map
@@ -215,7 +215,7 @@
 /* abs.ofs: 512 */
 	struct iwl3945_eeprom_temperature_corr corrections;  /* abs.ofs: 832 */
 	u8 reserved16[172];	/* fill out to full 1024 byte block */
-} __attribute__ ((packed));
+} __packed;
 
 #define IWL3945_EEPROM_IMG_SIZE 1024
 
@@ -274,7 +274,7 @@
  * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
 struct iwl3945_shared {
 	__le32 tx_base_ptr[8];
-} __attribute__ ((packed));
+} __packed;
 
 static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
 {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index c44a303..a07310f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -279,8 +279,8 @@
 		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
 
 		tx_info = &txq->txb[txq->q.read_ptr];
-		ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
-		tx_info->skb[0] = NULL;
+		ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
+		tx_info->skb = NULL;
 		priv->cfg->ops->lib->txq_free_tfd(priv, txq);
 	}
 
@@ -315,7 +315,7 @@
 		return;
 	}
 
-	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
+	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
 	ieee80211_tx_info_clear_status(info);
 
 	/* Fill the MRR chain with some info about on-chip retransmissions */
@@ -352,7 +352,7 @@
  *  RX handler implementations
  *
  *****************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
 /*
  *  based on the assumption of all statistics counter are in DWORD
  *  FIXME: This function is for debugging, do not deal with
@@ -406,6 +406,11 @@
 	unsigned int plcp_msec;
 	unsigned long plcp_received_jiffies;
 
+	if (priv->cfg->plcp_delta_threshold ==
+	    IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+		IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+		return rc;
+	}
 	memcpy(&current_stat, pkt->u.raw, sizeof(struct
 			iwl3945_notif_statistics));
 	/*
@@ -460,7 +465,7 @@
 	IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
 		     (int)sizeof(struct iwl3945_notif_statistics),
 		     le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
 	iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
 #endif
 	iwl_recover_from_statistics(priv, pkt);
@@ -475,7 +480,7 @@
 	__le32 *flag = (__le32 *)&pkt->u.raw;
 
 	if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
 		memset(&priv->_3945.accum_statistics, 0,
 			sizeof(struct iwl3945_notif_statistics));
 		memset(&priv->_3945.delta_statistics, 0,
@@ -494,158 +499,6 @@
  * Misc. internal state and helper functions
  *
  ******************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUG
-
-/**
- * iwl3945_report_frame - dump frame to syslog during debug sessions
- *
- * You may hack this function to show different aspects of received frames,
- * including selective frame dumps.
- * group100 parameter selects whether to show 1 out of 100 good frames.
- */
-static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
-		      struct iwl_rx_packet *pkt,
-		      struct ieee80211_hdr *header, int group100)
-{
-	u32 to_us;
-	u32 print_summary = 0;
-	u32 print_dump = 0;	/* set to 1 to dump all frames' contents */
-	u32 hundred = 0;
-	u32 dataframe = 0;
-	__le16 fc;
-	u16 seq_ctl;
-	u16 channel;
-	u16 phy_flags;
-	u16 length;
-	u16 status;
-	u16 bcn_tmr;
-	u32 tsf_low;
-	u64 tsf;
-	u8 rssi;
-	u8 agc;
-	u16 sig_avg;
-	u16 noise_diff;
-	struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
-	struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
-	struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
-	u8 *data = IWL_RX_DATA(pkt);
-
-	/* MAC header */
-	fc = header->frame_control;
-	seq_ctl = le16_to_cpu(header->seq_ctrl);
-
-	/* metadata */
-	channel = le16_to_cpu(rx_hdr->channel);
-	phy_flags = le16_to_cpu(rx_hdr->phy_flags);
-	length = le16_to_cpu(rx_hdr->len);
-
-	/* end-of-frame status and timestamp */
-	status = le32_to_cpu(rx_end->status);
-	bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
-	tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
-	tsf = le64_to_cpu(rx_end->timestamp);
-
-	/* signal statistics */
-	rssi = rx_stats->rssi;
-	agc = rx_stats->agc;
-	sig_avg = le16_to_cpu(rx_stats->sig_avg);
-	noise_diff = le16_to_cpu(rx_stats->noise_diff);
-
-	to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
-
-	/* if data frame is to us and all is good,
-	 *   (optionally) print summary for only 1 out of every 100 */
-	if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
-	    cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
-		dataframe = 1;
-		if (!group100)
-			print_summary = 1;	/* print each frame */
-		else if (priv->framecnt_to_us < 100) {
-			priv->framecnt_to_us++;
-			print_summary = 0;
-		} else {
-			priv->framecnt_to_us = 0;
-			print_summary = 1;
-			hundred = 1;
-		}
-	} else {
-		/* print summary for all other frames */
-		print_summary = 1;
-	}
-
-	if (print_summary) {
-		char *title;
-		int rate;
-
-		if (hundred)
-			title = "100Frames";
-		else if (ieee80211_has_retry(fc))
-			title = "Retry";
-		else if (ieee80211_is_assoc_resp(fc))
-			title = "AscRsp";
-		else if (ieee80211_is_reassoc_resp(fc))
-			title = "RasRsp";
-		else if (ieee80211_is_probe_resp(fc)) {
-			title = "PrbRsp";
-			print_dump = 1;	/* dump frame contents */
-		} else if (ieee80211_is_beacon(fc)) {
-			title = "Beacon";
-			print_dump = 1;	/* dump frame contents */
-		} else if (ieee80211_is_atim(fc))
-			title = "ATIM";
-		else if (ieee80211_is_auth(fc))
-			title = "Auth";
-		else if (ieee80211_is_deauth(fc))
-			title = "DeAuth";
-		else if (ieee80211_is_disassoc(fc))
-			title = "DisAssoc";
-		else
-			title = "Frame";
-
-		rate = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
-		if (rate == -1)
-			rate = 0;
-		else
-			rate = iwl3945_rates[rate].ieee / 2;
-
-		/* print frame summary.
-		 * MAC addresses show just the last byte (for brevity),
-		 *    but you can hack it to show more, if you'd like to. */
-		if (dataframe)
-			IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
-				     "len=%u, rssi=%d, chnl=%d, rate=%d,\n",
-				     title, le16_to_cpu(fc), header->addr1[5],
-				     length, rssi, channel, rate);
-		else {
-			/* src/dst addresses assume managed mode */
-			IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, "
-				     "src=0x%02x, rssi=%u, tim=%lu usec, "
-				     "phy=0x%02x, chnl=%d\n",
-				     title, le16_to_cpu(fc), header->addr1[5],
-				     header->addr3[5], rssi,
-				     tsf_low - priv->scan_start_tsf,
-				     phy_flags, channel);
-		}
-	}
-	if (print_dump)
-		iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
-}
-
-static void iwl3945_dbg_report_frame(struct iwl_priv *priv,
-		      struct iwl_rx_packet *pkt,
-		      struct ieee80211_hdr *header, int group100)
-{
-	if (iwl_get_debug_level(priv) & IWL_DL_RX)
-		_iwl3945_dbg_report_frame(priv, pkt, header, group100);
-}
-
-#else
-static inline void iwl3945_dbg_report_frame(struct iwl_priv *priv,
-		      struct iwl_rx_packet *pkt,
-		      struct ieee80211_hdr *header, int group100)
-{
-}
-#endif
 
 /* This is necessary only for a number of statistics, see the caller. */
 static int iwl3945_is_network_packet(struct iwl_priv *priv,
@@ -777,8 +630,6 @@
 			      rx_status.signal, rx_status.signal,
 			      rx_status.rate_idx);
 
-	/* Set "1" to report good data frames in groups of 100 */
-	iwl3945_dbg_report_frame(priv, pkt, header, 1);
 	iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
 
 	if (network_packet) {
@@ -850,25 +701,28 @@
 	/* Unmap tx_cmd */
 	if (counter)
 		pci_unmap_single(dev,
-				pci_unmap_addr(&txq->meta[index], mapping),
-				pci_unmap_len(&txq->meta[index], len),
+				dma_unmap_addr(&txq->meta[index], mapping),
+				dma_unmap_len(&txq->meta[index], len),
 				PCI_DMA_TODEVICE);
 
 	/* unmap chunks if any */
 
-	for (i = 1; i < counter; i++) {
+	for (i = 1; i < counter; i++)
 		pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
 			 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
-		if (txq->txb[txq->q.read_ptr].skb[0]) {
-			struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0];
-			if (txq->txb[txq->q.read_ptr].skb[0]) {
-				/* Can be called from interrupt context */
-				dev_kfree_skb_any(skb);
-				txq->txb[txq->q.read_ptr].skb[0] = NULL;
-			}
+
+	/* free SKB */
+	if (txq->txb) {
+		struct sk_buff *skb;
+
+		skb = txq->txb[txq->q.read_ptr].skb;
+
+		/* can be called from irqs-disabled context */
+		if (skb) {
+			dev_kfree_skb_any(skb);
+			txq->txb[txq->q.read_ptr].skb = NULL;
 		}
 	}
-	return ;
 }
 
 /**
@@ -947,8 +801,7 @@
 		       tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
 }
 
-static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
-			   u16 tx_rate, u8 flags)
+static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
 {
 	unsigned long flags_spin;
 	struct iwl_station_entry *station;
@@ -962,10 +815,9 @@
 	station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
 	station->sta.rate_n_flags = cpu_to_le16(tx_rate);
 	station->sta.mode = STA_CONTROL_MODIFY_MSK;
-
+	iwl_send_add_sta(priv, &station->sta, CMD_ASYNC);
 	spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
 
-	iwl_send_add_sta(priv, &station->sta, flags);
 	IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
 			sta_id, tx_rate);
 	return sta_id;
@@ -997,7 +849,7 @@
 
 static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
 {
-	iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr);
+	iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
 	iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
 	iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
 	iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
@@ -2473,8 +2325,7 @@
 
 		iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
 				 (priv->band == IEEE80211_BAND_5GHZ) ?
-				 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
-				 CMD_ASYNC);
+				 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
 		iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
 
 		return 0;
@@ -2590,6 +2441,7 @@
 
 	priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
 	priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
+	priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
 
 	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index cd4b61a..9166794 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -787,6 +787,6 @@
 struct iwl4965_scd_bc_tbl {
 	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
 	u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
-} __attribute__ ((packed));
+} __packed;
 
 #endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d3afdda..1dd3bc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -346,9 +346,19 @@
 {
 	struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
 
-	if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
+	if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
+	     iwl_is_associated(priv)) {
 		struct iwl_calib_diff_gain_cmd cmd;
 
+		/* clear data for chain noise calibration algorithm */
+		data->chain_noise_a = 0;
+		data->chain_noise_b = 0;
+		data->chain_noise_c = 0;
+		data->chain_signal_a = 0;
+		data->chain_signal_b = 0;
+		data->chain_signal_c = 0;
+		data->beacon_count = 0;
+
 		memset(&cmd, 0, sizeof(cmd));
 		cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
 		cmd.diff_gain_a = 0;
@@ -419,13 +429,6 @@
 		/* Mark so we run this algo only once! */
 		data->state = IWL_CHAIN_NOISE_CALIBRATED;
 	}
-	data->chain_noise_a = 0;
-	data->chain_noise_b = 0;
-	data->chain_noise_c = 0;
-	data->chain_signal_a = 0;
-	data->chain_signal_b = 0;
-	data->chain_signal_c = 0;
-	data->beacon_count = 0;
 }
 
 static void iwl4965_bg_txpower_work(struct work_struct *work)
@@ -669,6 +672,7 @@
 		priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
 
 	priv->hw_params.sens = &iwl4965_sensitivity;
+	priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
 
 	return 0;
 }
@@ -1441,7 +1445,8 @@
 	return ret;
 }
 
-static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
+				     struct ieee80211_channel_switch *ch_switch)
 {
 	int rc;
 	u8 band = 0;
@@ -1449,11 +1454,14 @@
 	u8 ctrl_chan_high = 0;
 	struct iwl4965_channel_switch_cmd cmd;
 	const struct iwl_channel_info *ch_info;
-
+	u32 switch_time_in_usec, ucode_switch_time;
+	u16 ch;
+	u32 tsf_low;
+	u8 switch_count;
+	u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
+	struct ieee80211_vif *vif = priv->vif;
 	band = priv->band == IEEE80211_BAND_2GHZ;
 
-	ch_info = iwl_get_channel_info(priv, priv->band, channel);
-
 	is_ht40 = is_ht40_channel(priv->staging_rxon.flags);
 
 	if (is_ht40 &&
@@ -1462,26 +1470,56 @@
 
 	cmd.band = band;
 	cmd.expect_beacon = 0;
-	cmd.channel = cpu_to_le16(channel);
+	ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq);
+	cmd.channel = cpu_to_le16(ch);
 	cmd.rxon_flags = priv->staging_rxon.flags;
 	cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
-	cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	switch_count = ch_switch->count;
+	tsf_low = ch_switch->timestamp & 0x0ffffffff;
+	/*
+	 * calculate the ucode channel switch time
+	 * adding TSF as one of the factor for when to switch
+	 */
+	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+		    beacon_interval)) {
+			switch_count -= (priv->ucode_beacon_time -
+				tsf_low) / beacon_interval;
+		} else
+			switch_count = 0;
+	}
+	if (switch_count <= 1)
+		cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	else {
+		switch_time_in_usec =
+			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+		ucode_switch_time = iwl_usecs_to_beacons(priv,
+							 switch_time_in_usec,
+							 beacon_interval);
+		cmd.switch_time = iwl_add_beacon_time(priv,
+						      priv->ucode_beacon_time,
+						      ucode_switch_time,
+						      beacon_interval);
+	}
+	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+		      cmd.switch_time);
+	ch_info = iwl_get_channel_info(priv, priv->band, ch);
 	if (ch_info)
 		cmd.expect_beacon = is_channel_radar(ch_info);
 	else {
 		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
-			priv->active_rxon.channel, channel);
+			priv->active_rxon.channel, ch);
 		return -EFAULT;
 	}
 
-	rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40,
+	rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
 				      ctrl_chan_high, &cmd.tx_power);
 	if (rc) {
 		IWL_DEBUG_11H(priv, "error:%d  fill txpower_tbl\n", rc);
 		return rc;
 	}
 
-	priv->switch_rxon.channel = cpu_to_le16(channel);
+	priv->switch_rxon.channel = cmd.channel;
 	priv->switch_rxon.switch_in_progress = true;
 
 	return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
@@ -1542,7 +1580,8 @@
 	u32 R4;
 
 	if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
-		(priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
+	    (priv->_agn.statistics.flag &
+			STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
 		IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
 		R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
 		R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
@@ -1566,8 +1605,8 @@
 	if (!test_bit(STATUS_TEMPERATURE, &priv->status))
 		vt = sign_extend(R4, 23);
 	else
-		vt = sign_extend(
-			le32_to_cpu(priv->statistics.general.temperature), 23);
+		vt = sign_extend(le32_to_cpu(
+				priv->_agn.statistics.general.temperature), 23);
 
 	IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
 
@@ -1747,6 +1786,7 @@
 {
 	unsigned long flags;
 	u16 ra_tid;
+	int ret;
 
 	if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
 	    (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
@@ -1762,7 +1802,9 @@
 	ra_tid = BUILD_RAxTID(sta_id, tid);
 
 	/* Modify device's station table to Tx this TID */
-	iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+	if (ret)
+		return ret;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
@@ -1870,7 +1912,7 @@
 		IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
 				   agg->frame_count, agg->start_idx, idx);
 
-		info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
+		info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
 		info->status.rates[0].count = tx_resp->failure_frame + 1;
 		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
 		info->flags |= iwl_tx_status_to_mac80211(status);
@@ -2026,6 +2068,7 @@
 	int sta_id;
 	int freed;
 	u8 *qc = NULL;
+	unsigned long flags;
 
 	if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
 		IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
@@ -2035,7 +2078,7 @@
 		return;
 	}
 
-	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
+	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
 	memset(&info->status, 0, sizeof(info->status));
 
 	hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
@@ -2050,10 +2093,10 @@
 		return;
 	}
 
+	spin_lock_irqsave(&priv->sta_lock, flags);
 	if (txq->sched_retry) {
 		const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
 		struct iwl_ht_agg *agg = NULL;
-
 		WARN_ON(!qc);
 
 		agg = &priv->stations[sta_id].tid[tid].agg;
@@ -2110,6 +2153,8 @@
 		iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
 
 	iwl_check_abort_status(priv, tx_resp->frame_count, status);
+
+	spin_unlock_irqrestore(&priv->sta_lock, flags);
 }
 
 static int iwl4965_calc_rssi(struct iwl_priv *priv,
@@ -2235,6 +2280,7 @@
 		.set_ct_kill = iwl4965_set_ct_threshold,
 	},
 	.manage_ibss_station = iwlagn_manage_ibss_station,
+	.update_bcast_station = iwl_update_bcast_station,
 	.debugfs_ops = {
 		.rx_stats_read = iwl_ucode_rx_stats_read,
 		.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -2285,7 +2331,7 @@
 	 * Force use of chains B and C for scan RX on 5 GHz band
 	 * because the device has off-channel reception on chain A.
 	 */
-	.scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+	.scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
 };
 
 /* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a28af7e..b8f3e20 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -208,6 +208,8 @@
 		BIT(IWL_CALIB_TX_IQ_PERD)	|
 		BIT(IWL_CALIB_BASE_BAND);
 
+	priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
 	return 0;
 }
 
@@ -247,10 +249,13 @@
 	/* Set initial calibration set */
 	priv->hw_params.sens = &iwl5150_sensitivity;
 	priv->hw_params.calib_init_cfg =
-		BIT(IWL_CALIB_DC)		|
 		BIT(IWL_CALIB_LO)		|
 		BIT(IWL_CALIB_TX_IQ)		|
 		BIT(IWL_CALIB_BASE_BAND);
+	if (priv->cfg->need_dc_calib)
+		priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
+
+	priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
 
 	return 0;
 }
@@ -260,40 +265,76 @@
 	u32 vt = 0;
 	s32 offset =  iwl_temp_calib_to_offset(priv);
 
-	vt = le32_to_cpu(priv->statistics.general.temperature);
+	vt = le32_to_cpu(priv->_agn.statistics.general.temperature);
 	vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
 	/* now vt hold the temperature in Kelvin */
 	priv->temperature = KELVIN_TO_CELSIUS(vt);
 	iwl_tt_handler(priv);
 }
 
-static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
+				     struct ieee80211_channel_switch *ch_switch)
 {
 	struct iwl5000_channel_switch_cmd cmd;
 	const struct iwl_channel_info *ch_info;
+	u32 switch_time_in_usec, ucode_switch_time;
+	u16 ch;
+	u32 tsf_low;
+	u8 switch_count;
+	u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
+	struct ieee80211_vif *vif = priv->vif;
 	struct iwl_host_cmd hcmd = {
 		.id = REPLY_CHANNEL_SWITCH,
 		.len = sizeof(cmd),
-		.flags = CMD_SIZE_HUGE,
+		.flags = CMD_SYNC,
 		.data = &cmd,
 	};
 
-	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
-		priv->active_rxon.channel, channel);
 	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
-	cmd.channel = cpu_to_le16(channel);
+	ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq);
+	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
+		priv->active_rxon.channel, ch);
+	cmd.channel = cpu_to_le16(ch);
 	cmd.rxon_flags = priv->staging_rxon.flags;
 	cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
-	cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
-	ch_info = iwl_get_channel_info(priv, priv->band, channel);
+	switch_count = ch_switch->count;
+	tsf_low = ch_switch->timestamp & 0x0ffffffff;
+	/*
+	 * calculate the ucode channel switch time
+	 * adding TSF as one of the factor for when to switch
+	 */
+	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+		    beacon_interval)) {
+			switch_count -= (priv->ucode_beacon_time -
+				tsf_low) / beacon_interval;
+		} else
+			switch_count = 0;
+	}
+	if (switch_count <= 1)
+		cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	else {
+		switch_time_in_usec =
+			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+		ucode_switch_time = iwl_usecs_to_beacons(priv,
+							 switch_time_in_usec,
+							 beacon_interval);
+		cmd.switch_time = iwl_add_beacon_time(priv,
+						      priv->ucode_beacon_time,
+						      ucode_switch_time,
+						      beacon_interval);
+	}
+	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+		      cmd.switch_time);
+	ch_info = iwl_get_channel_info(priv, priv->band, ch);
 	if (ch_info)
 		cmd.expect_beacon = is_channel_radar(ch_info);
 	else {
 		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
-			priv->active_rxon.channel, channel);
+			priv->active_rxon.channel, ch);
 		return -EFAULT;
 	}
-	priv->switch_rxon.channel = cpu_to_le16(channel);
+	priv->switch_rxon.channel = cmd.channel;
 	priv->switch_rxon.switch_in_progress = true;
 
 	return iwl_send_cmd_sync(priv, &hcmd);
@@ -352,6 +393,7 @@
 		.set_ct_kill = iwl5000_set_ct_threshold,
 	 },
 	.manage_ibss_station = iwlagn_manage_ibss_station,
+	.update_bcast_station = iwl_update_bcast_station,
 	.debugfs_ops = {
 		.rx_stats_read = iwl_ucode_rx_stats_read,
 		.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -414,6 +456,7 @@
 		.set_ct_kill = iwl5150_set_ct_threshold,
 	 },
 	.manage_ibss_station = iwlagn_manage_ibss_station,
+	.update_bcast_station = iwl_update_bcast_station,
 	.debugfs_ops = {
 		.rx_stats_read = iwl_ucode_rx_stats_read,
 		.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -620,6 +663,7 @@
 	.ucode_tracing = true,
 	.sensitivity_calib_by_driver = true,
 	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
 };
 
 struct iwl_cfg iwl5150_abg_cfg = {
@@ -649,6 +693,7 @@
 	.ucode_tracing = true,
 	.sensitivity_calib_by_driver = true,
 	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
 };
 
 MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 9fbf54c..8577664 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -71,6 +71,10 @@
 #define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
 #define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
 
+#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-"
+#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
+#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
+
 
 static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
 {
@@ -80,9 +84,10 @@
 }
 
 /* Indicate calibration version to uCode. */
-static void iwl6050_set_calib_version(struct iwl_priv *priv)
+static void iwl6000_set_calib_version(struct iwl_priv *priv)
 {
-	if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
+	if (priv->cfg->need_dc_calib &&
+	    (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6))
 		iwl_set_bit(priv, CSR_GP_DRIVER_REG,
 				CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
 }
@@ -182,83 +187,77 @@
 		BIT(IWL_CALIB_LO)		|
 		BIT(IWL_CALIB_TX_IQ)		|
 		BIT(IWL_CALIB_BASE_BAND);
+	if (priv->cfg->need_dc_calib)
+		priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
+
+	priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
 
 	return 0;
 }
 
-static int iwl6050_hw_set_hw_params(struct iwl_priv *priv)
-{
-	if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
-	    priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
-		priv->cfg->num_of_queues =
-			priv->cfg->mod_params->num_of_queues;
-
-	priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
-	priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
-	priv->hw_params.scd_bc_tbls_size =
-			priv->cfg->num_of_queues *
-			sizeof(struct iwlagn_scd_bc_tbl);
-	priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
-	priv->hw_params.max_stations = IWL5000_STATION_COUNT;
-	priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
-
-	priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
-	priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
-
-	priv->hw_params.max_bsm_size = 0;
-	priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
-					BIT(IEEE80211_BAND_5GHZ);
-	priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
-
-	priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
-	priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
-	priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
-	priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
-	if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
-		priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
-
-	/* Set initial sensitivity parameters */
-	/* Set initial calibration set */
-	priv->hw_params.sens = &iwl6000_sensitivity;
-	priv->hw_params.calib_init_cfg =
-		BIT(IWL_CALIB_XTAL)		|
-		BIT(IWL_CALIB_DC)		|
-		BIT(IWL_CALIB_LO)		|
-		BIT(IWL_CALIB_TX_IQ)		|
-		BIT(IWL_CALIB_BASE_BAND);
-
-	return 0;
-}
-
-static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+				     struct ieee80211_channel_switch *ch_switch)
 {
 	struct iwl6000_channel_switch_cmd cmd;
 	const struct iwl_channel_info *ch_info;
+	u32 switch_time_in_usec, ucode_switch_time;
+	u16 ch;
+	u32 tsf_low;
+	u8 switch_count;
+	u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
+	struct ieee80211_vif *vif = priv->vif;
 	struct iwl_host_cmd hcmd = {
 		.id = REPLY_CHANNEL_SWITCH,
 		.len = sizeof(cmd),
-		.flags = CMD_SIZE_HUGE,
+		.flags = CMD_SYNC,
 		.data = &cmd,
 	};
 
-	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
-		priv->active_rxon.channel, channel);
-
 	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
-	cmd.channel = cpu_to_le16(channel);
+	ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq);
+	IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+		      priv->active_rxon.channel, ch);
+	cmd.channel = cpu_to_le16(ch);
 	cmd.rxon_flags = priv->staging_rxon.flags;
 	cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
-	cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
-	ch_info = iwl_get_channel_info(priv, priv->band, channel);
+	switch_count = ch_switch->count;
+	tsf_low = ch_switch->timestamp & 0x0ffffffff;
+	/*
+	 * calculate the ucode channel switch time
+	 * adding TSF as one of the factor for when to switch
+	 */
+	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+		    beacon_interval)) {
+			switch_count -= (priv->ucode_beacon_time -
+				tsf_low) / beacon_interval;
+		} else
+			switch_count = 0;
+	}
+	if (switch_count <= 1)
+		cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	else {
+		switch_time_in_usec =
+			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+		ucode_switch_time = iwl_usecs_to_beacons(priv,
+							 switch_time_in_usec,
+							 beacon_interval);
+		cmd.switch_time = iwl_add_beacon_time(priv,
+						      priv->ucode_beacon_time,
+						      ucode_switch_time,
+						      beacon_interval);
+	}
+	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+		      cmd.switch_time);
+	ch_info = iwl_get_channel_info(priv, priv->band, ch);
 	if (ch_info)
 		cmd.expect_beacon = is_channel_radar(ch_info);
 	else {
 		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
-			priv->active_rxon.channel, channel);
+			priv->active_rxon.channel, ch);
 		return -EFAULT;
 	}
-	priv->switch_rxon.channel = cpu_to_le16(channel);
+	priv->switch_rxon.channel = cmd.channel;
 	priv->switch_rxon.switch_in_progress = true;
 
 	return iwl_send_cmd_sync(priv, &hcmd);
@@ -316,8 +315,10 @@
 	.temp_ops = {
 		.temperature = iwlagn_temperature,
 		.set_ct_kill = iwl6000_set_ct_threshold,
+		.set_calib_version = iwl6000_set_calib_version,
 	 },
 	.manage_ibss_station = iwlagn_manage_ibss_station,
+	.update_bcast_station = iwl_update_bcast_station,
 	.debugfs_ops = {
 		.rx_stats_read = iwl_ucode_rx_stats_read,
 		.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -335,79 +336,25 @@
 	.led = &iwlagn_led_ops,
 };
 
-static struct iwl_lib_ops iwl6050_lib = {
-	.set_hw_params = iwl6050_hw_set_hw_params,
-	.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
-	.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
-	.txq_set_sched = iwlagn_txq_set_sched,
-	.txq_agg_enable = iwlagn_txq_agg_enable,
-	.txq_agg_disable = iwlagn_txq_agg_disable,
-	.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
-	.txq_free_tfd = iwl_hw_txq_free_tfd,
-	.txq_init = iwl_hw_tx_queue_init,
-	.rx_handler_setup = iwlagn_rx_handler_setup,
-	.setup_deferred_work = iwlagn_setup_deferred_work,
-	.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
-	.load_ucode = iwlagn_load_ucode,
-	.dump_nic_event_log = iwl_dump_nic_event_log,
-	.dump_nic_error_log = iwl_dump_nic_error_log,
-	.dump_csr = iwl_dump_csr,
-	.dump_fh = iwl_dump_fh,
-	.init_alive_start = iwlagn_init_alive_start,
-	.alive_notify = iwlagn_alive_notify,
-	.send_tx_power = iwlagn_send_tx_power,
-	.update_chain_flags = iwl_update_chain_flags,
-	.set_channel_switch = iwl6000_hw_channel_switch,
-	.apm_ops = {
-		.init = iwl_apm_init,
-		.stop = iwl_apm_stop,
-		.config = iwl6000_nic_config,
-		.set_pwr_src = iwl_set_pwr_src,
-	},
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REG_BAND_1_CHANNELS,
-			EEPROM_REG_BAND_2_CHANNELS,
-			EEPROM_REG_BAND_3_CHANNELS,
-			EEPROM_REG_BAND_4_CHANNELS,
-			EEPROM_REG_BAND_5_CHANNELS,
-			EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
-			EEPROM_REG_BAND_52_HT40_CHANNELS
-		},
-		.verify_signature  = iwlcore_eeprom_verify_signature,
-		.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
-		.release_semaphore = iwlcore_eeprom_release_semaphore,
-		.calib_version	= iwlagn_eeprom_calib_version,
-		.query_addr = iwlagn_eeprom_query_addr,
-		.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
-	},
-	.post_associate = iwl_post_associate,
-	.isr = iwl_isr_ict,
-	.config_ap = iwl_config_ap,
-	.temp_ops = {
-		.temperature = iwlagn_temperature,
-		.set_ct_kill = iwl6000_set_ct_threshold,
-		.set_calib_version = iwl6050_set_calib_version,
-	 },
-	.manage_ibss_station = iwlagn_manage_ibss_station,
-	.debugfs_ops = {
-		.rx_stats_read = iwl_ucode_rx_stats_read,
-		.tx_stats_read = iwl_ucode_tx_stats_read,
-		.general_stats_read = iwl_ucode_general_stats_read,
-	},
-	.recover_from_tx_stall = iwl_bg_monitor_recover,
-	.check_plcp_health = iwl_good_plcp_health,
-	.check_ack_health = iwl_good_ack_health,
+static void do_not_send_bt_config(struct iwl_priv *priv)
+{
+}
+
+static struct iwl_hcmd_ops iwl6000g2b_hcmd = {
+	.rxon_assoc = iwlagn_send_rxon_assoc,
+	.commit_rxon = iwl_commit_rxon,
+	.set_rxon_chain = iwl_set_rxon_chain,
+	.set_tx_ant = iwlagn_send_tx_ant_config,
+	.send_bt_config = do_not_send_bt_config,
 };
 
-static const struct iwl_ops iwl6050_ops = {
-	.lib = &iwl6050_lib,
-	.hcmd = &iwlagn_hcmd,
+static const struct iwl_ops iwl6000g2b_ops = {
+	.lib = &iwl6000_lib,
+	.hcmd = &iwl6000g2b_hcmd,
 	.utils = &iwlagn_hcmd_utils,
 	.led = &iwlagn_led_ops,
 };
 
-
 struct iwl_cfg iwl6000g2a_2agn_cfg = {
 	.name = "6000 Series 2x2 AGN Gen2a",
 	.fw_name_pre = IWL6000G2A_FW_PRE,
@@ -443,6 +390,293 @@
 	.ucode_tracing = true,
 	.sensitivity_calib_by_driver = true,
 	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
+};
+
+struct iwl_cfg iwl6000g2a_2abg_cfg = {
+	.name = "6000 Series 2x2 ABG Gen2a",
+	.fw_name_pre = IWL6000G2A_FW_PRE,
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,
+	.sku = IWL_SKU_A|IWL_SKU_G,
+	.ops = &iwl6000_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+	.mod_params = &iwlagn_mod_params,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_SYSTEM,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.monitor_recover_period = IWL_MONITORING_PERIOD,
+	.max_event_log_size = 512,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
+};
+
+struct iwl_cfg iwl6000g2a_2bg_cfg = {
+	.name = "6000 Series 2x2 BG Gen2a",
+	.fw_name_pre = IWL6000G2A_FW_PRE,
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,
+	.sku = IWL_SKU_G,
+	.ops = &iwl6000_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+	.mod_params = &iwlagn_mod_params,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_SYSTEM,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.monitor_recover_period = IWL_MONITORING_PERIOD,
+	.max_event_log_size = 512,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
+};
+
+struct iwl_cfg iwl6000g2b_2agn_cfg = {
+	.name = "6000 Series 2x2 AGN Gen2b",
+	.fw_name_pre = IWL6000G2B_FW_PRE,
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,
+	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
+	.ops = &iwl6000g2b_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+	.mod_params = &iwlagn_mod_params,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_SYSTEM,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.use_rts_for_ht = true, /* use rts/cts protection */
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.monitor_recover_period = IWL_MONITORING_PERIOD,
+	.max_event_log_size = 512,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
+};
+
+struct iwl_cfg iwl6000g2b_2abg_cfg = {
+	.name = "6000 Series 2x2 ABG Gen2b",
+	.fw_name_pre = IWL6000G2B_FW_PRE,
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,
+	.sku = IWL_SKU_A|IWL_SKU_G,
+	.ops = &iwl6000g2b_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+	.mod_params = &iwlagn_mod_params,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_SYSTEM,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.monitor_recover_period = IWL_MONITORING_PERIOD,
+	.max_event_log_size = 512,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
+};
+
+struct iwl_cfg iwl6000g2b_2bgn_cfg = {
+	.name = "6000 Series 2x2 BGN Gen2b",
+	.fw_name_pre = IWL6000G2B_FW_PRE,
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,
+	.sku = IWL_SKU_G|IWL_SKU_N,
+	.ops = &iwl6000g2b_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+	.mod_params = &iwlagn_mod_params,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_SYSTEM,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.use_rts_for_ht = true, /* use rts/cts protection */
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.monitor_recover_period = IWL_MONITORING_PERIOD,
+	.max_event_log_size = 512,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
+};
+
+struct iwl_cfg iwl6000g2b_2bg_cfg = {
+	.name = "6000 Series 2x2 BG Gen2b",
+	.fw_name_pre = IWL6000G2B_FW_PRE,
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,
+	.sku = IWL_SKU_G,
+	.ops = &iwl6000g2b_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+	.mod_params = &iwlagn_mod_params,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_SYSTEM,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.monitor_recover_period = IWL_MONITORING_PERIOD,
+	.max_event_log_size = 512,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
+};
+
+struct iwl_cfg iwl6000g2b_bgn_cfg = {
+	.name = "6000 Series 1x2 BGN Gen2b",
+	.fw_name_pre = IWL6000G2B_FW_PRE,
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,
+	.sku = IWL_SKU_G|IWL_SKU_N,
+	.ops = &iwl6000g2b_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+	.mod_params = &iwlagn_mod_params,
+	.valid_tx_ant = ANT_A,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_SYSTEM,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.ht_greenfield_support = true,
+	.led_compensation = 51,
+	.use_rts_for_ht = true, /* use rts/cts protection */
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.monitor_recover_period = IWL_MONITORING_PERIOD,
+	.max_event_log_size = 512,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
+};
+
+struct iwl_cfg iwl6000g2b_bg_cfg = {
+	.name = "6000 Series 1x2 BG Gen2b",
+	.fw_name_pre = IWL6000G2B_FW_PRE,
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,
+	.sku = IWL_SKU_G,
+	.ops = &iwl6000g2b_ops,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+	.mod_params = &iwlagn_mod_params,
+	.valid_tx_ant = ANT_A,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = false,
+	.pa_type = IWL_PA_SYSTEM,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.monitor_recover_period = IWL_MONITORING_PERIOD,
+	.max_event_log_size = 512,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
 };
 
 /*
@@ -561,7 +795,7 @@
 	.ucode_api_max = IWL6050_UCODE_API_MAX,
 	.ucode_api_min = IWL6050_UCODE_API_MIN,
 	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
-	.ops = &iwl6050_ops,
+	.ops = &iwl6000_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
@@ -590,6 +824,7 @@
 	.ucode_tracing = true,
 	.sensitivity_calib_by_driver = true,
 	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
 };
 
 struct iwl_cfg iwl6050_2abg_cfg = {
@@ -598,7 +833,7 @@
 	.ucode_api_max = IWL6050_UCODE_API_MAX,
 	.ucode_api_min = IWL6050_UCODE_API_MIN,
 	.sku = IWL_SKU_A|IWL_SKU_G,
-	.ops = &iwl6050_ops,
+	.ops = &iwl6000_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
@@ -625,6 +860,7 @@
 	.ucode_tracing = true,
 	.sensitivity_calib_by_driver = true,
 	.chain_noise_calib_by_driver = true,
+	.need_dc_calib = true,
 };
 
 struct iwl_cfg iwl6000_3agn_cfg = {
@@ -667,3 +903,4 @@
 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
similarity index 98%
rename from drivers/net/wireless/iwlwifi/iwl-calib.c
rename to drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 7e8227773..eb052b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -96,17 +96,16 @@
 			hcmd.len = priv->calib_results[i].buf_len;
 			hcmd.data = priv->calib_results[i].buf;
 			ret = iwl_send_cmd_sync(priv, &hcmd);
-			if (ret)
-				goto err;
+			if (ret) {
+				IWL_ERR(priv, "Error %d iteration %d\n",
+					ret, i);
+				break;
+			}
 		}
 	}
 
-	return 0;
-err:
-	IWL_ERR(priv, "Error %d iteration %d\n", ret, i);
 	return ret;
 }
-EXPORT_SYMBOL(iwl_send_calib_results);
 
 int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
 {
@@ -121,7 +120,6 @@
 	memcpy(res->buf, buf, len);
 	return 0;
 }
-EXPORT_SYMBOL(iwl_calib_set);
 
 void iwl_calib_free_results(struct iwl_priv *priv)
 {
@@ -133,7 +131,6 @@
 		priv->calib_results[i].buf_len = 0;
 	}
 }
-EXPORT_SYMBOL(iwl_calib_free_results);
 
 /*****************************************************************************
  * RUNTIME calibrations framework
@@ -533,7 +530,6 @@
 	ret |= iwl_sensitivity_write(priv);
 	IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
 }
-EXPORT_SYMBOL(iwl_init_sensitivity);
 
 void iwl_sensitivity_calibration(struct iwl_priv *priv,
 				    struct iwl_notif_statistics *resp)
@@ -639,7 +635,6 @@
 	iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
 	iwl_sensitivity_write(priv);
 }
-EXPORT_SYMBOL(iwl_sensitivity_calibration);
 
 static inline u8 find_first_chain(u8 mask)
 {
@@ -846,6 +841,13 @@
 		}
 	}
 
+	if (active_chains != priv->hw_params.valid_rx_ant &&
+	    active_chains != priv->chain_noise_data.active_chains)
+		IWL_DEBUG_CALIB(priv,
+				"Detected that not all antennas are connected! "
+				"Connected: %#x, valid: %#x.\n",
+				active_chains, priv->hw_params.valid_rx_ant);
+
 	/* Save for use within RXON, TX, SCAN commands, etc. */
 	priv->chain_noise_data.active_chains = active_chains;
 	IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
@@ -890,8 +892,6 @@
 	data->state = IWL_CHAIN_NOISE_DONE;
 	iwl_power_update_mode(priv, false);
 }
-EXPORT_SYMBOL(iwl_chain_noise_calibration);
-
 
 void iwl_reset_run_time_calib(struct iwl_priv *priv)
 {
@@ -908,5 +908,3 @@
 	 * periodically after association */
 	iwl_send_statistics_request(priv, CMD_ASYNC, true);
 }
-EXPORT_SYMBOL(iwl_reset_run_time_calib);
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index 48c023b..75d6bfc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -28,6 +28,27 @@
 
 #include "iwl-agn-debugfs.h"
 
+static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+	int p = 0;
+
+	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+		       le32_to_cpu(priv->_agn.statistics.flag));
+	if (le32_to_cpu(priv->_agn.statistics.flag) &
+			UCODE_STATISTICS_CLEAR_MSK)
+		p += scnprintf(buf + p, bufsz - p,
+			       "\tStatistics have been cleared\n");
+	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+		       (le32_to_cpu(priv->_agn.statistics.flag) &
+			UCODE_STATISTICS_FREQUENCY_MSK)
+			? "2.4 GHz" : "5.2 GHz");
+	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+		       (le32_to_cpu(priv->_agn.statistics.flag) &
+			UCODE_STATISTICS_NARROW_BAND_MSK)
+			? "enabled" : "disabled");
+	return p;
+}
+
 ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
   {
@@ -58,24 +79,24 @@
 	 * the last statistics notification from uCode
 	 * might not reflect the current uCode activity
 	 */
-	ofdm = &priv->statistics.rx.ofdm;
-	cck = &priv->statistics.rx.cck;
-	general = &priv->statistics.rx.general;
-	ht = &priv->statistics.rx.ofdm_ht;
-	accum_ofdm = &priv->accum_statistics.rx.ofdm;
-	accum_cck = &priv->accum_statistics.rx.cck;
-	accum_general = &priv->accum_statistics.rx.general;
-	accum_ht = &priv->accum_statistics.rx.ofdm_ht;
-	delta_ofdm = &priv->delta_statistics.rx.ofdm;
-	delta_cck = &priv->delta_statistics.rx.cck;
-	delta_general = &priv->delta_statistics.rx.general;
-	delta_ht = &priv->delta_statistics.rx.ofdm_ht;
-	max_ofdm = &priv->max_delta.rx.ofdm;
-	max_cck = &priv->max_delta.rx.cck;
-	max_general = &priv->max_delta.rx.general;
-	max_ht = &priv->max_delta.rx.ofdm_ht;
+	ofdm = &priv->_agn.statistics.rx.ofdm;
+	cck = &priv->_agn.statistics.rx.cck;
+	general = &priv->_agn.statistics.rx.general;
+	ht = &priv->_agn.statistics.rx.ofdm_ht;
+	accum_ofdm = &priv->_agn.accum_statistics.rx.ofdm;
+	accum_cck = &priv->_agn.accum_statistics.rx.cck;
+	accum_general = &priv->_agn.accum_statistics.rx.general;
+	accum_ht = &priv->_agn.accum_statistics.rx.ofdm_ht;
+	delta_ofdm = &priv->_agn.delta_statistics.rx.ofdm;
+	delta_cck = &priv->_agn.delta_statistics.rx.cck;
+	delta_general = &priv->_agn.delta_statistics.rx.general;
+	delta_ht = &priv->_agn.delta_statistics.rx.ofdm_ht;
+	max_ofdm = &priv->_agn.max_delta.rx.ofdm;
+	max_cck = &priv->_agn.max_delta.rx.cck;
+	max_general = &priv->_agn.max_delta.rx.general;
+	max_ht = &priv->_agn.max_delta.rx.ofdm_ht;
 
-	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+	pos += iwl_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
 			 "acumulative       delta         max\n",
 			 "Statistics_Rx - OFDM:");
@@ -539,11 +560,11 @@
 	  * the last statistics notification from uCode
 	  * might not reflect the current uCode activity
 	  */
-	tx = &priv->statistics.tx;
-	accum_tx = &priv->accum_statistics.tx;
-	delta_tx = &priv->delta_statistics.tx;
-	max_tx = &priv->max_delta.tx;
-	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+	tx = &priv->_agn.statistics.tx;
+	accum_tx = &priv->_agn.accum_statistics.tx;
+	delta_tx = &priv->_agn.delta_statistics.tx;
+	max_tx = &priv->_agn.max_delta.tx;
+	pos += iwl_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos,  "%-32s     current"
 			 "acumulative       delta         max\n",
 			 "Statistics_Tx:");
@@ -756,19 +777,19 @@
 	  * the last statistics notification from uCode
 	  * might not reflect the current uCode activity
 	  */
-	general = &priv->statistics.general;
-	dbg = &priv->statistics.general.dbg;
-	div = &priv->statistics.general.div;
-	accum_general = &priv->accum_statistics.general;
-	delta_general = &priv->delta_statistics.general;
-	max_general = &priv->max_delta.general;
-	accum_dbg = &priv->accum_statistics.general.dbg;
-	delta_dbg = &priv->delta_statistics.general.dbg;
-	max_dbg = &priv->max_delta.general.dbg;
-	accum_div = &priv->accum_statistics.general.div;
-	delta_div = &priv->delta_statistics.general.div;
-	max_div = &priv->max_delta.general.div;
-	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+	general = &priv->_agn.statistics.general;
+	dbg = &priv->_agn.statistics.general.dbg;
+	div = &priv->_agn.statistics.general.div;
+	accum_general = &priv->_agn.accum_statistics.general;
+	delta_general = &priv->_agn.delta_statistics.general;
+	max_general = &priv->_agn.max_delta.general;
+	accum_dbg = &priv->_agn.accum_statistics.general.dbg;
+	delta_dbg = &priv->_agn.delta_statistics.general.dbg;
+	max_dbg = &priv->_agn.max_delta.general.dbg;
+	accum_div = &priv->_agn.accum_statistics.general.div;
+	delta_div = &priv->_agn.delta_statistics.general.div;
+	max_div = &priv->_agn.max_delta.general.div;
+	pos += iwl_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
 			 "acumulative       delta         max\n",
 			 "Statistics_General:");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 01658cf..f06d1fe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -37,7 +37,7 @@
 #include "iwl-io.h"
 #include "iwl-agn.h"
 
-static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
+int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
 {
 	int ret = 0;
 	struct iwl5000_rxon_assoc_cmd rxon_assoc;
@@ -84,7 +84,7 @@
 	return ret;
 }
 
-static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
+int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
 {
 	struct iwl_tx_ant_config_cmd tx_ant_cmd = {
 	  .valid = cpu_to_le32(valid_tx_ant),
@@ -176,14 +176,6 @@
 		data->radio_write = 1;
 		data->state = IWL_CHAIN_NOISE_CALIBRATED;
 	}
-
-	data->chain_noise_a = 0;
-	data->chain_noise_b = 0;
-	data->chain_noise_c = 0;
-	data->chain_signal_a = 0;
-	data->chain_signal_b = 0;
-	data->chain_signal_c = 0;
-	data->beacon_count = 0;
 }
 
 static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
@@ -191,10 +183,20 @@
 	struct iwl_chain_noise_data *data = &priv->chain_noise_data;
 	int ret;
 
-	if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
+	if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
+	     iwl_is_associated(priv)) {
 		struct iwl_calib_chain_noise_reset_cmd cmd;
-		memset(&cmd, 0, sizeof(cmd));
 
+		/* clear data for chain noise calibration algorithm */
+		data->chain_noise_a = 0;
+		data->chain_noise_b = 0;
+		data->chain_noise_c = 0;
+		data->chain_signal_a = 0;
+		data->chain_signal_b = 0;
+		data->chain_signal_c = 0;
+		data->beacon_count = 0;
+
+		memset(&cmd, 0, sizeof(cmd));
 		cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
 		cmd.hdr.first_group = 0;
 		cmd.hdr.groups_num = 1;
@@ -212,7 +214,7 @@
 static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
 			__le32 *tx_flags)
 {
-	*tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
+	*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
 }
 
 /* Calc max signal level (dBm) among 3 possible receivers */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index f9a3fbb..a52b82c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -112,7 +112,7 @@
  */
 struct iwlagn_scd_bc_tbl {
 	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 
 #endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 0f292a2..5f1e7d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -77,7 +77,7 @@
 		IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
 				   agg->frame_count, agg->start_idx, idx);
 
-		info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
+		info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
 		info->status.rates[0].count = tx_resp->failure_frame + 1;
 		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
 		info->flags |= iwl_tx_status_to_mac80211(status);
@@ -93,6 +93,12 @@
 	} else {
 		/* Two or more frames were attempted; expect block-ack */
 		u64 bitmap = 0;
+
+		/*
+		 * Start is the lowest frame sent. It may not be the first
+		 * frame in the batch; we figure this out dynamically during
+		 * the following loop.
+		 */
 		int start = agg->start_idx;
 
 		/* Construct bit-map of pending frames within Tx window */
@@ -131,25 +137,58 @@
 			IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
 					   i, idx, SEQ_TO_SN(sc));
 
+			/*
+			 * sh -> how many frames ahead of the starting frame is
+			 * the current one?
+			 *
+			 * Note that all frames sent in the batch must be in a
+			 * 64-frame window, so this number should be in [0,63].
+			 * If outside of this window, then we've found a new
+			 * "first" frame in the batch and need to change start.
+			 */
 			sh = idx - start;
-			if (sh > 64) {
-				sh = (start - idx) + 0xff;
+
+			/*
+			 * If >= 64, out of window. start must be at the front
+			 * of the circular buffer, idx must be near the end of
+			 * the buffer, and idx is the new "first" frame. Shift
+			 * the indices around.
+			 */
+			if (sh >= 64) {
+				/* Shift bitmap by start - idx, wrapped */
+				sh = 0x100 - idx + start;
 				bitmap = bitmap << sh;
+				/* Now idx is the new start so sh = 0 */
 				sh = 0;
 				start = idx;
-			} else if (sh < -64)
-				sh  = 0xff - (start - idx);
-			else if (sh < 0) {
+			/*
+			 * If <= -64 then wraps the 256-pkt circular buffer
+			 * (e.g., start = 255 and idx = 0, sh should be 1)
+			 */
+			} else if (sh <= -64) {
+				sh  = 0x100 - start + idx;
+			/*
+			 * If < 0 but > -64, out of window. idx is before start
+			 * but not wrapped. Shift the indices around.
+			 */
+			} else if (sh < 0) {
+				/* Shift by how far start is ahead of idx */
 				sh = start - idx;
-				start = idx;
 				bitmap = bitmap << sh;
+				/* Now idx is the new start so sh = 0 */
+				start = idx;
 				sh = 0;
 			}
+			/* Sequence number start + sh was sent in this batch */
 			bitmap |= 1ULL << sh;
 			IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
 					   start, (unsigned long long)bitmap);
 		}
 
+		/*
+		 * Store the bitmap and possibly the new start, if we wrapped
+		 * the buffer above
+		 */
 		agg->bitmap = bitmap;
 		agg->start_idx = start;
 		IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
@@ -184,6 +223,7 @@
 	int tid;
 	int sta_id;
 	int freed;
+	unsigned long flags;
 
 	if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
 		IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
@@ -193,15 +233,16 @@
 		return;
 	}
 
-	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
+	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
 	memset(&info->status, 0, sizeof(info->status));
 
 	tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
 	sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
 
+	spin_lock_irqsave(&priv->sta_lock, flags);
 	if (txq->sched_retry) {
 		const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
-		struct iwl_ht_agg *agg = NULL;
+		struct iwl_ht_agg *agg;
 
 		agg = &priv->stations[sta_id].tid[tid].agg;
 
@@ -256,6 +297,7 @@
 	iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
 
 	iwl_check_abort_status(priv, tx_resp->frame_count, status);
+	spin_unlock_irqrestore(&priv->sta_lock, flags);
 }
 
 void iwlagn_rx_handler_setup(struct iwl_priv *priv)
@@ -319,7 +361,8 @@
 void iwlagn_temperature(struct iwl_priv *priv)
 {
 	/* store temperature from statistics (in Celsius) */
-	priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
+	priv->temperature =
+		le32_to_cpu(priv->_agn.statistics.general.temperature);
 	iwl_tt_handler(priv);
 }
 
@@ -444,7 +487,7 @@
 
 	/* Tell device where to find RBD circular buffer in DRAM */
 	iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
-			   (u32)(rxq->dma_addr >> 8));
+			   (u32)(rxq->bd_dma >> 8));
 
 	/* Tell device where in DRAM to update its Rx status */
 	iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
@@ -709,7 +752,7 @@
 	}
 
 	dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-			  rxq->dma_addr);
+			  rxq->bd_dma);
 	dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
 			  rxq->rb_stts, rxq->rb_stts_dma);
 	rxq->bd = NULL;
@@ -755,132 +798,6 @@
 	return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
 }
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-/**
- * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions
- *
- * You may hack this function to show different aspects of received frames,
- * including selective frame dumps.
- * group100 parameter selects whether to show 1 out of 100 good data frames.
- *    All beacon and probe response frames are printed.
- */
-static void iwlagn_dbg_report_frame(struct iwl_priv *priv,
-		      struct iwl_rx_phy_res *phy_res, u16 length,
-		      struct ieee80211_hdr *header, int group100)
-{
-	u32 to_us;
-	u32 print_summary = 0;
-	u32 print_dump = 0;	/* set to 1 to dump all frames' contents */
-	u32 hundred = 0;
-	u32 dataframe = 0;
-	__le16 fc;
-	u16 seq_ctl;
-	u16 channel;
-	u16 phy_flags;
-	u32 rate_n_flags;
-	u32 tsf_low;
-	int rssi;
-
-	if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
-		return;
-
-	/* MAC header */
-	fc = header->frame_control;
-	seq_ctl = le16_to_cpu(header->seq_ctrl);
-
-	/* metadata */
-	channel = le16_to_cpu(phy_res->channel);
-	phy_flags = le16_to_cpu(phy_res->phy_flags);
-	rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
-	/* signal statistics */
-	rssi = iwlagn_calc_rssi(priv, phy_res);
-	tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
-
-	to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
-
-	/* if data frame is to us and all is good,
-	 *   (optionally) print summary for only 1 out of every 100 */
-	if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
-	    cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
-		dataframe = 1;
-		if (!group100)
-			print_summary = 1;	/* print each frame */
-		else if (priv->framecnt_to_us < 100) {
-			priv->framecnt_to_us++;
-			print_summary = 0;
-		} else {
-			priv->framecnt_to_us = 0;
-			print_summary = 1;
-			hundred = 1;
-		}
-	} else {
-		/* print summary for all other frames */
-		print_summary = 1;
-	}
-
-	if (print_summary) {
-		char *title;
-		int rate_idx;
-		u32 bitrate;
-
-		if (hundred)
-			title = "100Frames";
-		else if (ieee80211_has_retry(fc))
-			title = "Retry";
-		else if (ieee80211_is_assoc_resp(fc))
-			title = "AscRsp";
-		else if (ieee80211_is_reassoc_resp(fc))
-			title = "RasRsp";
-		else if (ieee80211_is_probe_resp(fc)) {
-			title = "PrbRsp";
-			print_dump = 1;	/* dump frame contents */
-		} else if (ieee80211_is_beacon(fc)) {
-			title = "Beacon";
-			print_dump = 1;	/* dump frame contents */
-		} else if (ieee80211_is_atim(fc))
-			title = "ATIM";
-		else if (ieee80211_is_auth(fc))
-			title = "Auth";
-		else if (ieee80211_is_deauth(fc))
-			title = "DeAuth";
-		else if (ieee80211_is_disassoc(fc))
-			title = "DisAssoc";
-		else
-			title = "Frame";
-
-		rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
-		if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
-			bitrate = 0;
-			WARN_ON_ONCE(1);
-		} else {
-			bitrate = iwl_rates[rate_idx].ieee / 2;
-		}
-
-		/* print frame summary.
-		 * MAC addresses show just the last byte (for brevity),
-		 *    but you can hack it to show more, if you'd like to. */
-		if (dataframe)
-			IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
-				     "len=%u, rssi=%d, chnl=%d, rate=%u,\n",
-				     title, le16_to_cpu(fc), header->addr1[5],
-				     length, rssi, channel, bitrate);
-		else {
-			/* src/dst addresses assume managed mode */
-			IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
-				     "len=%u, rssi=%d, tim=%lu usec, "
-				     "phy=0x%02x, chnl=%d\n",
-				     title, le16_to_cpu(fc), header->addr1[5],
-				     header->addr3[5], length, rssi,
-				     tsf_low - priv->scan_start_tsf,
-				     phy_flags, channel);
-		}
-	}
-	if (print_dump)
-		iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
-}
-#endif
-
 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
 {
 	u32 decrypt_out = 0;
@@ -988,7 +905,7 @@
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rx_phy_res *phy_res;
 	__le32 rx_pkt_status;
-	struct iwl4965_rx_mpdu_res_start *amsdu;
+	struct iwl_rx_mpdu_res_start *amsdu;
 	u32 len;
 	u32 ampdu_status;
 	u32 rate_n_flags;
@@ -1017,7 +934,7 @@
 			return;
 		}
 		phy_res = &priv->_agn.last_phy_res;
-		amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
+		amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
 		header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
 		len = le16_to_cpu(amsdu->byte_count);
 		rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
@@ -1060,11 +977,6 @@
 	/* Find max signal strength (dBm) among 3 antenna/receiver chains */
 	rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-	/* Set "1" to report good data frames in groups of 100 */
-	if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
-		iwlagn_dbg_report_frame(priv, phy_res, len, header, 1);
-#endif
 	iwl_dbg_log_rx_data_frame(priv, len, header);
 	IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
 		rx_status.signal, (unsigned long long)rx_status.mactime);
@@ -1252,6 +1164,7 @@
 	bool is_active = false;
 	int  chan_mod;
 	u8 active_chains;
+	u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
 
 	conf = ieee80211_get_hw_conf(priv->hw);
 
@@ -1403,11 +1316,14 @@
 
 	band = priv->scan_band;
 
-	if (priv->cfg->scan_antennas[band])
-		rx_ant = priv->cfg->scan_antennas[band];
+	if (priv->cfg->scan_rx_antennas[band])
+		rx_ant = priv->cfg->scan_rx_antennas[band];
 
-	priv->scan_tx_ant[band] =
-			iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
+	if (priv->cfg->scan_tx_antennas[band])
+		scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
+
+	priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
+						    scan_tx_antennas);
 	rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
 	scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
 
@@ -1433,13 +1349,15 @@
 	if (!priv->is_internal_short_scan) {
 		cmd_len = iwl_fill_probe_req(priv,
 					(struct ieee80211_mgmt *)scan->data,
+					vif->addr,
 					priv->scan_request->ie,
 					priv->scan_request->ie_len,
 					IWL_MAX_SCAN_SIZE - sizeof(*scan));
 	} else {
+		/* use bcast addr, will not be transmitted but must be valid */
 		cmd_len = iwl_fill_probe_req(priv,
 					(struct ieee80211_mgmt *)scan->data,
-					NULL, 0,
+					iwl_bcast_addr, NULL, 0,
 					IWL_MAX_SCAN_SIZE - sizeof(*scan));
 
 	}
@@ -1502,3 +1420,18 @@
 	return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
 				  vif->bss_conf.bssid);
 }
+
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+			    int sta_id, int tid, int freed)
+{
+	WARN_ON(!spin_is_locked(&priv->sta_lock));
+
+	if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+		priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+	else {
+		IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
+			priv->stations[sta_id].tid[tid].tfds_in_queue,
+			freed);
+		priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+	}
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index cf4a95b..35c86d2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -313,8 +313,7 @@
 			 */
 			IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
 				tid);
-			ieee80211_stop_tx_ba_session(sta, tid,
-						WLAN_BACK_INITIATOR);
+			ieee80211_stop_tx_ba_session(sta, tid);
 		}
 	} else
 		IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
@@ -325,18 +324,11 @@
 			      struct iwl_lq_sta *lq_data,
 			      struct ieee80211_sta *sta)
 {
-	if ((tid < TID_MAX_LOAD_COUNT) &&
-	    !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) {
-		if (priv->cfg->use_rts_for_ht) {
-			/*
-			 * switch to RTS/CTS if it is the prefer protection
-			 * method for HT traffic
-			 */
-			IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
-			priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
-			iwlcore_commit_rxon(priv);
-		}
-	}
+	if (tid < TID_MAX_LOAD_COUNT)
+		rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
+	else
+		IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
+			tid, TID_MAX_LOAD_COUNT);
 }
 
 static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
new file mode 100644
index 0000000..d54edc3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -0,0 +1,284 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-calib.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+
+void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+				struct iwl_rx_mem_buffer *rxb)
+
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_missed_beacon_notif *missed_beacon;
+
+	missed_beacon = &pkt->u.missed_beacon;
+	if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+	    priv->missed_beacon_threshold) {
+		IWL_DEBUG_CALIB(priv,
+		    "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+		    le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+		    le32_to_cpu(missed_beacon->total_missed_becons),
+		    le32_to_cpu(missed_beacon->num_recvd_beacons),
+		    le32_to_cpu(missed_beacon->num_expected_beacons));
+		if (!test_bit(STATUS_SCANNING, &priv->status))
+			iwl_init_sensitivity(priv);
+	}
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ *   before arriving beacon.  This measurement can be done only if we know
+ *   exactly when to expect beacons, therefore only when we're associated. */
+static void iwl_rx_calc_noise(struct iwl_priv *priv)
+{
+	struct statistics_rx_non_phy *rx_info
+				= &(priv->_agn.statistics.rx.general);
+	int num_active_rx = 0;
+	int total_silence = 0;
+	int bcn_silence_a =
+		le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+	int bcn_silence_b =
+		le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+	int bcn_silence_c =
+		le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+	int last_rx_noise;
+
+	if (bcn_silence_a) {
+		total_silence += bcn_silence_a;
+		num_active_rx++;
+	}
+	if (bcn_silence_b) {
+		total_silence += bcn_silence_b;
+		num_active_rx++;
+	}
+	if (bcn_silence_c) {
+		total_silence += bcn_silence_c;
+		num_active_rx++;
+	}
+
+	/* Average among active antennas */
+	if (num_active_rx)
+		last_rx_noise = (total_silence / num_active_rx) - 107;
+	else
+		last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+
+	IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
+			bcn_silence_a, bcn_silence_b, bcn_silence_c,
+			last_rx_noise);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/*
+ *  based on the assumption of all statistics counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void iwl_accumulative_statistics(struct iwl_priv *priv,
+					__le32 *stats)
+{
+	int i;
+	__le32 *prev_stats;
+	u32 *accum_stats;
+	u32 *delta, *max_delta;
+
+	prev_stats = (__le32 *)&priv->_agn.statistics;
+	accum_stats = (u32 *)&priv->_agn.accum_statistics;
+	delta = (u32 *)&priv->_agn.delta_statistics;
+	max_delta = (u32 *)&priv->_agn.max_delta;
+
+	for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
+	     i += sizeof(__le32), stats++, prev_stats++, delta++,
+	     max_delta++, accum_stats++) {
+		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+			*delta = (le32_to_cpu(*stats) -
+				le32_to_cpu(*prev_stats));
+			*accum_stats += *delta;
+			if (*delta > *max_delta)
+				*max_delta = *delta;
+		}
+	}
+
+	/* reset accumulative statistics for "no-counter" type statistics */
+	priv->_agn.accum_statistics.general.temperature =
+		priv->_agn.statistics.general.temperature;
+	priv->_agn.accum_statistics.general.temperature_m =
+		priv->_agn.statistics.general.temperature_m;
+	priv->_agn.accum_statistics.general.ttl_timestamp =
+		priv->_agn.statistics.general.ttl_timestamp;
+	priv->_agn.accum_statistics.tx.tx_power.ant_a =
+		priv->_agn.statistics.tx.tx_power.ant_a;
+	priv->_agn.accum_statistics.tx.tx_power.ant_b =
+		priv->_agn.statistics.tx.tx_power.ant_b;
+	priv->_agn.accum_statistics.tx.tx_power.ant_c =
+		priv->_agn.statistics.tx.tx_power.ant_c;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+				struct iwl_rx_packet *pkt)
+{
+	bool rc = true;
+	int combined_plcp_delta;
+	unsigned int plcp_msec;
+	unsigned long plcp_received_jiffies;
+
+	if (priv->cfg->plcp_delta_threshold ==
+	    IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+		IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+		return rc;
+	}
+
+	/*
+	 * check for plcp_err and trigger radio reset if it exceeds
+	 * the plcp error threshold plcp_delta.
+	 */
+	plcp_received_jiffies = jiffies;
+	plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+					(long) priv->plcp_jiffies);
+	priv->plcp_jiffies = plcp_received_jiffies;
+	/*
+	 * check to make sure plcp_msec is not 0 to prevent division
+	 * by zero.
+	 */
+	if (plcp_msec) {
+		combined_plcp_delta =
+			(le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
+			le32_to_cpu(priv->_agn.statistics.rx.ofdm.plcp_err)) +
+			(le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
+			le32_to_cpu(priv->_agn.statistics.rx.ofdm_ht.plcp_err));
+
+		if ((combined_plcp_delta > 0) &&
+		    ((combined_plcp_delta * 100) / plcp_msec) >
+			priv->cfg->plcp_delta_threshold) {
+			/*
+			 * if plcp_err exceed the threshold,
+			 * the following data is printed in csv format:
+			 *    Text: plcp_err exceeded %d,
+			 *    Received ofdm.plcp_err,
+			 *    Current ofdm.plcp_err,
+			 *    Received ofdm_ht.plcp_err,
+			 *    Current ofdm_ht.plcp_err,
+			 *    combined_plcp_delta,
+			 *    plcp_msec
+			 */
+			IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+				"%u, %u, %u, %u, %d, %u mSecs\n",
+				priv->cfg->plcp_delta_threshold,
+				le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
+				le32_to_cpu(
+				       priv->_agn.statistics.rx.ofdm.plcp_err),
+				le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
+				le32_to_cpu(
+				  priv->_agn.statistics.rx.ofdm_ht.plcp_err),
+				combined_plcp_delta, plcp_msec);
+			rc = false;
+		}
+	}
+	return rc;
+}
+
+void iwl_rx_statistics(struct iwl_priv *priv,
+			      struct iwl_rx_mem_buffer *rxb)
+{
+	int change;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+
+	IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
+		     (int)sizeof(priv->_agn.statistics),
+		     le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+
+	change = ((priv->_agn.statistics.general.temperature !=
+		   pkt->u.stats.general.temperature) ||
+		  ((priv->_agn.statistics.flag &
+		    STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+		   (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
+	iwl_recover_from_statistics(priv, pkt);
+
+	memcpy(&priv->_agn.statistics, &pkt->u.stats,
+	       sizeof(priv->_agn.statistics));
+
+	set_bit(STATUS_STATISTICS, &priv->status);
+
+	/* Reschedule the statistics timer to occur in
+	 * REG_RECALIB_PERIOD seconds to ensure we get a
+	 * thermal update even if the uCode doesn't give
+	 * us one */
+	mod_timer(&priv->statistics_periodic, jiffies +
+		  msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+	if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+	    (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
+		iwl_rx_calc_noise(priv);
+		queue_work(priv->workqueue, &priv->run_time_calib_work);
+	}
+	if (priv->cfg->ops->lib->temp_ops.temperature && change)
+		priv->cfg->ops->lib->temp_ops.temperature(priv);
+}
+
+void iwl_reply_statistics(struct iwl_priv *priv,
+			      struct iwl_rx_mem_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+	if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		memset(&priv->_agn.accum_statistics, 0,
+			sizeof(struct iwl_notif_statistics));
+		memset(&priv->_agn.delta_statistics, 0,
+			sizeof(struct iwl_notif_statistics));
+		memset(&priv->_agn.max_delta, 0,
+			sizeof(struct iwl_notif_statistics));
+#endif
+		IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+	}
+	iwl_rx_statistics(priv, rxb);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 7d614c4..2573234e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -233,6 +233,7 @@
 {
 	unsigned long flags;
 	u16 ra_tid;
+	int ret;
 
 	if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
 	    (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
@@ -248,7 +249,9 @@
 	ra_tid = BUILD_RAxTID(sta_id, tid);
 
 	/* Modify device's station table to Tx this TID */
-	iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+	if (ret)
+		return ret;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
@@ -469,7 +472,8 @@
 	}
 
 	/* Set up antennas */
-	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+					      priv->hw_params.valid_tx_ant);
 	rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
 
 	/* Set the rate in the TX cmd */
@@ -567,10 +571,7 @@
 	hdr_len = ieee80211_hdrlen(fc);
 
 	/* Find index into station table for destination station */
-	if (!info->control.sta)
-		sta_id = priv->hw_params.bcast_sta_id;
-	else
-		sta_id = iwl_sta_id(info->control.sta);
+	sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
 	if (sta_id == IWL_INVALID_STATION) {
 		IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
 			       hdr->addr1);
@@ -598,11 +599,17 @@
 	}
 
 	txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
+
+	/* irqs already disabled/saved above when locking priv->lock */
+	spin_lock(&priv->sta_lock);
+
 	if (ieee80211_is_data_qos(fc)) {
 		qc = ieee80211_get_qos_ctl(hdr);
 		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-		if (unlikely(tid >= MAX_TID_COUNT))
+		if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+			spin_unlock(&priv->sta_lock);
 			goto drop_unlock;
+		}
 		seq_number = priv->stations[sta_id].tid[tid].seq_number;
 		seq_number &= IEEE80211_SCTL_SEQ;
 		hdr->seq_ctrl = hdr->seq_ctrl &
@@ -620,15 +627,22 @@
 	swq_id = txq->swq_id;
 	q = &txq->q;
 
-	if (unlikely(iwl_queue_space(q) < q->high_mark))
+	if (unlikely(iwl_queue_space(q) < q->high_mark)) {
+		spin_unlock(&priv->sta_lock);
 		goto drop_unlock;
+	}
 
-	if (ieee80211_is_data_qos(fc))
+	if (ieee80211_is_data_qos(fc)) {
 		priv->stations[sta_id].tid[tid].tfds_in_queue++;
+		if (!ieee80211_has_morefrags(fc))
+			priv->stations[sta_id].tid[tid].seq_number = seq_number;
+	}
+
+	spin_unlock(&priv->sta_lock);
 
 	/* Set up driver data for this TFD */
 	memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
-	txq->txb[q->write_ptr].skb[0] = skb;
+	txq->txb[q->write_ptr].skb = skb;
 
 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
 	out_cmd = txq->cmd[q->write_ptr];
@@ -694,8 +708,8 @@
 	txcmd_phys = pci_map_single(priv->pci_dev,
 				    &out_cmd->hdr, len,
 				    PCI_DMA_BIDIRECTIONAL);
-	pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
-	pci_unmap_len_set(out_meta, len, len);
+	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+	dma_unmap_len_set(out_meta, len, len);
 	/* Add buffer containing Tx command and MAC(!) header to TFD's
 	 * first entry */
 	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
@@ -703,8 +717,6 @@
 
 	if (!ieee80211_has_morefrags(hdr->frame_control)) {
 		txq->need_update = 1;
-		if (qc)
-			priv->stations[sta_id].tid[tid].seq_number = seq_number;
 	} else {
 		wait_write_ptr = 1;
 		txq->need_update = 0;
@@ -1009,6 +1021,8 @@
 	if (ret)
 		return ret;
 
+	spin_lock_irqsave(&priv->sta_lock, flags);
+	tid_data = &priv->stations[sta_id].tid[tid];
 	if (tid_data->tfds_in_queue == 0) {
 		IWL_DEBUG_HT(priv, "HW queue is empty\n");
 		tid_data->agg.state = IWL_AGG_ON;
@@ -1018,6 +1032,7 @@
 			     tid_data->tfds_in_queue);
 		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
 	}
+	spin_unlock_irqrestore(&priv->sta_lock, flags);
 	return ret;
 }
 
@@ -1040,11 +1055,14 @@
 		return -ENXIO;
 	}
 
+	spin_lock_irqsave(&priv->sta_lock, flags);
+
 	if (priv->stations[sta_id].tid[tid].agg.state ==
 				IWL_EMPTYING_HW_QUEUE_ADDBA) {
 		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+		spin_unlock_irqrestore(&priv->sta_lock, flags);
 		return 0;
 	}
 
@@ -1062,13 +1080,17 @@
 		IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
 		priv->stations[sta_id].tid[tid].agg.state =
 				IWL_EMPTYING_HW_QUEUE_DELBA;
+		spin_unlock_irqrestore(&priv->sta_lock, flags);
 		return 0;
 	}
 
 	IWL_DEBUG_HT(priv, "HW queue is empty\n");
 	priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	/* do not restore/save irqs */
+	spin_unlock(&priv->sta_lock);
+	spin_lock(&priv->lock);
+
 	/*
 	 * the only reason this call can fail is queue number out of range,
 	 * which can happen if uCode is reloaded and all the station
@@ -1092,6 +1114,8 @@
 	u8 *addr = priv->stations[sta_id].sta.sta.addr;
 	struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
 
+	WARN_ON(!spin_is_locked(&priv->sta_lock));
+
 	switch (priv->stations[sta_id].tid[tid].agg.state) {
 	case IWL_EMPTYING_HW_QUEUE_DELBA:
 		/* We are reclaiming the last packet of the */
@@ -1116,6 +1140,7 @@
 		}
 		break;
 	}
+
 	return 0;
 }
 
@@ -1159,12 +1184,12 @@
 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
 
 		tx_info = &txq->txb[txq->q.read_ptr];
-		iwlagn_tx_status(priv, tx_info->skb[0]);
+		iwlagn_tx_status(priv, tx_info->skb);
 
-		hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+		hdr = (struct ieee80211_hdr *)tx_info->skb->data;
 		if (hdr && ieee80211_is_data_qos(hdr->frame_control))
 			nfreed++;
-		tx_info->skb[0] = NULL;
+		tx_info->skb = NULL;
 
 		if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
 			priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
@@ -1188,7 +1213,7 @@
 	int i, sh, ack;
 	u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
 	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-	u64 bitmap;
+	u64 bitmap, sent_bitmap;
 	int successes = 0;
 	struct ieee80211_tx_info *info;
 
@@ -1216,24 +1241,26 @@
 
 	/* check for success or failure according to the
 	 * transmitted bitmap and block-ack bitmap */
-	bitmap &= agg->bitmap;
+	sent_bitmap = bitmap & agg->bitmap;
 
 	/* For each frame attempted in aggregation,
 	 * update driver's record of tx frame's status. */
-	for (i = 0; i < agg->frame_count ; i++) {
-		ack = bitmap & (1ULL << i);
-		successes += !!ack;
+	i = 0;
+	while (sent_bitmap) {
+		ack = sent_bitmap & 1ULL;
+		successes += ack;
 		IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
 			ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
 			agg->start_idx + i);
+		sent_bitmap >>= 1;
+		++i;
 	}
 
-	info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
+	info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
 	memset(&info->status, 0, sizeof(info->status));
 	info->flags |= IEEE80211_TX_STAT_ACK;
 	info->flags |= IEEE80211_TX_STAT_AMPDU;
 	info->status.ampdu_ack_len = successes;
-	info->status.ampdu_ack_map = bitmap;
 	info->status.ampdu_len = agg->frame_count;
 	iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
 
@@ -1281,6 +1308,7 @@
 	int index;
 	int sta_id;
 	int tid;
+	unsigned long flags;
 
 	/* "flow" corresponds to Tx queue */
 	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
@@ -1308,7 +1336,7 @@
 	/* Find index just before block-ack window */
 	index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
 
-	/* TODO: Need to get this copy more safely - now good for debug */
+	spin_lock_irqsave(&priv->sta_lock, flags);
 
 	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
 			   "sta_id = %d\n",
@@ -1344,4 +1372,6 @@
 
 		iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
 	}
+
+	spin_unlock_irqrestore(&priv->sta_lock, flags);
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 637286c..6f77441 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -423,3 +423,126 @@
 
 	return 0;
 }
+
+
+/**
+ * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
+{
+	u32 val;
+	int ret = 0;
+	u32 errcnt = 0;
+	u32 i;
+
+	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+	for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IWL_DL_IO is set */
+		iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+			i + IWLAGN_RTC_INST_LOWER_BOUND);
+		val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+			ret = -EIO;
+			errcnt++;
+			if (errcnt >= 3)
+				break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
+				 u32 len)
+{
+	u32 val;
+	u32 save_len = len;
+	int ret = 0;
+	u32 errcnt;
+
+	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+	iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+			   IWLAGN_RTC_INST_LOWER_BOUND);
+
+	errcnt = 0;
+	for (; len > 0; len -= sizeof(u32), image++) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IWL_DL_IO is set */
+		val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+			IWL_ERR(priv, "uCode INST section is invalid at "
+				  "offset 0x%x, is 0x%x, s/b 0x%x\n",
+				  save_len - len, val, le32_to_cpu(*image));
+			ret = -EIO;
+			errcnt++;
+			if (errcnt >= 20)
+				break;
+		}
+	}
+
+	if (!errcnt)
+		IWL_DEBUG_INFO(priv,
+		    "ucode image in INSTRUCTION memory is good\n");
+
+	return ret;
+}
+
+/**
+ * iwl_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+int iwl_verify_ucode(struct iwl_priv *priv)
+{
+	__le32 *image;
+	u32 len;
+	int ret;
+
+	/* Try bootstrap */
+	image = (__le32 *)priv->ucode_boot.v_addr;
+	len = priv->ucode_boot.len;
+	ret = iwlcore_verify_inst_sparse(priv, image, len);
+	if (!ret) {
+		IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try initialize */
+	image = (__le32 *)priv->ucode_init.v_addr;
+	len = priv->ucode_init.len;
+	ret = iwlcore_verify_inst_sparse(priv, image, len);
+	if (!ret) {
+		IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try runtime/protocol */
+	image = (__le32 *)priv->ucode_code.v_addr;
+	len = priv->ucode_code.len;
+	ret = iwlcore_verify_inst_sparse(priv, image, len);
+	if (!ret) {
+		IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+	/* Since nothing seems to match, show first several data entries in
+	 * instruction SRAM, so maybe visual inspection will give a clue.
+	 * Selection of bootstrap image (vs. other images) is arbitrary. */
+	image = (__le32 *)priv->ucode_boot.v_addr;
+	len = priv->ucode_boot.len;
+	ret = iwl_verify_inst_full(priv, image, len);
+
+	return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 24aff65..3368cfd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -120,7 +120,7 @@
 	    (priv->switch_rxon.channel != priv->staging_rxon.channel)) {
 		IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
 		      le16_to_cpu(priv->switch_rxon.channel));
-		priv->switch_rxon.switch_in_progress = false;
+		iwl_chswitch_done(priv, false);
 	}
 
 	/* If we don't need to send a full RXON, we can use
@@ -367,7 +367,8 @@
 
 	/* Set up packet rate and flags */
 	rate = iwl_rate_get_lowest_plcp(priv);
-	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+					      priv->hw_params.valid_tx_ant);
 	rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
 	if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
 		rate_flags |= RATE_MCS_CCK_MSK;
@@ -474,18 +475,25 @@
 	/* Unmap tx_cmd */
 	if (num_tbs)
 		pci_unmap_single(dev,
-				pci_unmap_addr(&txq->meta[index], mapping),
-				pci_unmap_len(&txq->meta[index], len),
+				dma_unmap_addr(&txq->meta[index], mapping),
+				dma_unmap_len(&txq->meta[index], len),
 				PCI_DMA_BIDIRECTIONAL);
 
 	/* Unmap chunks, if any. */
-	for (i = 1; i < num_tbs; i++) {
+	for (i = 1; i < num_tbs; i++)
 		pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
 				iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
 
-		if (txq->txb) {
-			dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
-			txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
+	/* free SKB */
+	if (txq->txb) {
+		struct sk_buff *skb;
+
+		skb = txq->txb[txq->q.read_ptr].skb;
+
+		/* can be called from irqs-disabled context */
+		if (skb) {
+			dev_kfree_skb_any(skb);
+			txq->txb[txq->q.read_ptr].skb = NULL;
 		}
 	}
 }
@@ -933,6 +941,8 @@
 		fill_rx = 1;
 
 	while (i != r) {
+		int len;
+
 		rxb = rxq->queue[i];
 
 		/* If an RXB doesn't have a Rx queue slot associated with it,
@@ -947,8 +957,9 @@
 			       PCI_DMA_FROMDEVICE);
 		pkt = rxb_addr(rxb);
 
-		trace_iwlwifi_dev_rx(priv, pkt,
-			le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+		len += sizeof(u32); /* account for status word */
+		trace_iwlwifi_dev_rx(priv, pkt, len);
 
 		/* Reclaim a command buffer only if this packet is a response
 		 *   to a (driver-originated) command.
@@ -1450,13 +1461,13 @@
 
 	actual_ack_cnt_delta =
 		le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
-		le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
+		le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt);
 	expected_ack_cnt_delta =
 		le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
-		le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
+		le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt);
 	ba_timeout_delta =
 		le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
-		le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
+		le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout);
 	if ((priv->_agn.agg_tids_count > 0) &&
 	    (expected_ack_cnt_delta > 0) &&
 	    (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
@@ -1466,12 +1477,17 @@
 				" expected_ack_cnt = %d\n",
 				actual_ack_cnt_delta, expected_ack_cnt_delta);
 
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		/*
+		 * This is ifdef'ed on DEBUGFS because otherwise the
+		 * statistics aren't available. If DEBUGFS is set but
+		 * DEBUG is not, these will just compile out.
+		 */
 		IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
-				priv->delta_statistics.tx.rx_detected_cnt);
+				priv->_agn.delta_statistics.tx.rx_detected_cnt);
 		IWL_DEBUG_RADIO(priv,
 				"ack_or_ba_timeout_collision delta = %d\n",
-				priv->delta_statistics.tx.
+				priv->_agn.delta_statistics.tx.
 				ack_or_ba_timeout_collision);
 #endif
 		IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
@@ -1694,6 +1710,9 @@
 	size_t inst_size, data_size, init_size, init_data_size, boot_size;
 
 	u32 build;
+
+	u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+	u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
 };
 
 static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
@@ -1871,6 +1890,42 @@
 			capa->max_probe_length =
 				le32_to_cpup((__le32 *)tlv_data);
 			break;
+		case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
+			if (tlv_len != 4)
+				return -EINVAL;
+			pieces->init_evtlog_ptr =
+				le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
+			if (tlv_len != 4)
+				return -EINVAL;
+			pieces->init_evtlog_size =
+				le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
+			if (tlv_len != 4)
+				return -EINVAL;
+			pieces->init_errlog_ptr =
+				le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
+			if (tlv_len != 4)
+				return -EINVAL;
+			pieces->inst_evtlog_ptr =
+				le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
+			if (tlv_len != 4)
+				return -EINVAL;
+			pieces->inst_evtlog_size =
+				le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
+			if (tlv_len != 4)
+				return -EINVAL;
+			pieces->inst_errlog_ptr =
+				le32_to_cpup((__le32 *)tlv_data);
+			break;
 		default:
 			break;
 		}
@@ -2063,6 +2118,26 @@
 			goto err_pci_alloc;
 	}
 
+	/* Now that we can no longer fail, copy information */
+
+	/*
+	 * The (size - 16) / 12 formula is based on the information recorded
+	 * for each event, which is of mode 1 (including timestamp) for all
+	 * new microcodes that include this information.
+	 */
+	priv->_agn.init_evtlog_ptr = pieces.init_evtlog_ptr;
+	if (pieces.init_evtlog_size)
+		priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
+	else
+		priv->_agn.init_evtlog_size = priv->cfg->max_event_log_size;
+	priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr;
+	priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr;
+	if (pieces.inst_evtlog_size)
+		priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
+	else
+		priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size;
+	priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr;
+
 	/* Copy images into buffers for card's bus-master reads ... */
 
 	/* Runtime instructions (first block of data in file) */
@@ -2195,10 +2270,15 @@
 	u32 blink1, blink2, ilink1, ilink2;
 	u32 pc, hcmd;
 
-	if (priv->ucode_type == UCODE_INIT)
+	if (priv->ucode_type == UCODE_INIT) {
 		base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
-	else
+		if (!base)
+			base = priv->_agn.init_errlog_ptr;
+	} else {
 		base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
+		if (!base)
+			base = priv->_agn.inst_errlog_ptr;
+	}
 
 	if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
 		IWL_ERR(priv,
@@ -2230,9 +2310,9 @@
 	trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
 				      blink1, blink2, ilink1, ilink2);
 
-	IWL_ERR(priv, "Desc                               Time       "
+	IWL_ERR(priv, "Desc                                  Time       "
 		"data1      data2      line\n");
-	IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
+	IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
 		desc_lookup(desc), desc, time, data1, data2, line);
 	IWL_ERR(priv, "pc      blink1  blink2  ilink1  ilink2  hcmd\n");
 	IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
@@ -2258,10 +2338,16 @@
 
 	if (num_events == 0)
 		return pos;
-	if (priv->ucode_type == UCODE_INIT)
+
+	if (priv->ucode_type == UCODE_INIT) {
 		base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
-	else
+		if (!base)
+			base = priv->_agn.init_evtlog_ptr;
+	} else {
 		base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+		if (!base)
+			base = priv->_agn.inst_evtlog_ptr;
+	}
 
 	if (mode == 0)
 		event_size = 2 * sizeof(u32);
@@ -2363,13 +2449,21 @@
 	u32 num_wraps;  /* # times uCode wrapped to top of log */
 	u32 next_entry; /* index of next entry to be written by uCode */
 	u32 size;       /* # entries that we'll print */
+	u32 logsize;
 	int pos = 0;
 	size_t bufsz = 0;
 
-	if (priv->ucode_type == UCODE_INIT)
+	if (priv->ucode_type == UCODE_INIT) {
 		base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
-	else
+		logsize = priv->_agn.init_evtlog_size;
+		if (!base)
+			base = priv->_agn.init_evtlog_ptr;
+	} else {
 		base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+		logsize = priv->_agn.inst_evtlog_size;
+		if (!base)
+			base = priv->_agn.inst_evtlog_ptr;
+	}
 
 	if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
 		IWL_ERR(priv,
@@ -2384,16 +2478,16 @@
 	num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
 	next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
 
-	if (capacity > priv->cfg->max_event_log_size) {
+	if (capacity > logsize) {
 		IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
-			capacity, priv->cfg->max_event_log_size);
-		capacity = priv->cfg->max_event_log_size;
+			capacity, logsize);
+		capacity = logsize;
 	}
 
-	if (next_entry > priv->cfg->max_event_log_size) {
+	if (next_entry > logsize) {
 		IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
-			next_entry, priv->cfg->max_event_log_size);
-		next_entry = priv->cfg->max_event_log_size;
+			next_entry, logsize);
+		next_entry = logsize;
 	}
 
 	size = num_wraps ? capacity : next_entry;
@@ -2518,8 +2612,6 @@
 
 		if (priv->cfg->ops->hcmd->set_rxon_chain)
 			priv->cfg->ops->hcmd->set_rxon_chain(priv);
-
-		memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
 	}
 
 	/* Configure Bluetooth device coexistence support */
@@ -2843,9 +2935,9 @@
 	}
 
 	if (priv->start_calib) {
-		iwl_chain_noise_calibration(priv, &priv->statistics);
+		iwl_chain_noise_calibration(priv, &priv->_agn.statistics);
 
-		iwl_sensitivity_calibration(priv, &priv->statistics);
+		iwl_sensitivity_calibration(priv, &priv->_agn.statistics);
 	}
 
 	mutex_unlock(&priv->mutex);
@@ -2934,20 +3026,16 @@
 	IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
 			vif->bss_conf.aid, vif->bss_conf.beacon_int);
 
-	if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+	if (vif->bss_conf.use_short_preamble)
 		priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
 	else
 		priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
 
 	if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
-		if (vif->bss_conf.assoc_capability &
-					WLAN_CAPABILITY_SHORT_SLOT_TIME)
+		if (vif->bss_conf.use_short_slot)
 			priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
 		else
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
-		if (vif->type == NL80211_IFTYPE_ADHOC)
-			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 	}
 
 	iwlcore_commit_rxon(priv);
@@ -3173,8 +3261,7 @@
 
 		priv->staging_rxon.assoc_id = 0;
 
-		if (vif->bss_conf.assoc_capability &
-						WLAN_CAPABILITY_SHORT_PREAMBLE)
+		if (vif->bss_conf.use_short_preamble)
 			priv->staging_rxon.flags |=
 				RXON_FLG_SHORT_PREAMBLE_MSK;
 		else
@@ -3182,17 +3269,12 @@
 				~RXON_FLG_SHORT_PREAMBLE_MSK;
 
 		if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
-			if (vif->bss_conf.assoc_capability &
-						WLAN_CAPABILITY_SHORT_SLOT_TIME)
+			if (vif->bss_conf.use_short_slot)
 				priv->staging_rxon.flags |=
 					RXON_FLG_SHORT_SLOT_MSK;
 			else
 				priv->staging_rxon.flags &=
 					~RXON_FLG_SHORT_SLOT_MSK;
-
-			if (vif->type == NL80211_IFTYPE_ADHOC)
-				priv->staging_rxon.flags &=
-					~RXON_FLG_SHORT_SLOT_MSK;
 		}
 		/* restore RXON assoc */
 		priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
@@ -3238,17 +3320,9 @@
 		return -EOPNOTSUPP;
 	}
 
-	if (sta) {
-		sta_id = iwl_sta_id(sta);
-
-		if (sta_id == IWL_INVALID_STATION) {
-			IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
-					   sta->addr);
-			return -EINVAL;
-		}
-	} else {
-		sta_id = priv->hw_params.bcast_sta_id;
-	}
+	sta_id = iwl_sta_id_or_broadcast(priv, sta);
+	if (sta_id == IWL_INVALID_STATION)
+		return -EINVAL;
 
 	mutex_lock(&priv->mutex);
 	iwl_scan_cancel_timeout(priv, 100);
@@ -3294,13 +3368,32 @@
 	return ret;
 }
 
+/*
+ * switch to RTS/CTS for TX
+ */
+static void iwl_enable_rts_cts(struct iwl_priv *priv)
+{
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
+	if (!test_bit(STATUS_SCANNING, &priv->status)) {
+		IWL_DEBUG_INFO(priv, "use RTS/CTS protection\n");
+		iwlcore_commit_rxon(priv);
+	} else {
+		/* scanning, defer the request until scan completed */
+		IWL_DEBUG_INFO(priv, "defer setting RTS/CTS protection\n");
+	}
+}
+
 static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
 				struct ieee80211_vif *vif,
 				enum ieee80211_ampdu_mlme_action action,
 				struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 {
 	struct iwl_priv *priv = hw->priv;
-	int ret;
+	int ret = -EINVAL;
 
 	IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
 		     sta->addr, tid);
@@ -3308,17 +3401,19 @@
 	if (!(priv->cfg->sku & IWL_SKU_N))
 		return -EACCES;
 
+	mutex_lock(&priv->mutex);
+
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
 		IWL_DEBUG_HT(priv, "start Rx\n");
-		return iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+		ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+		break;
 	case IEEE80211_AMPDU_RX_STOP:
 		IWL_DEBUG_HT(priv, "stop Rx\n");
 		ret = iwl_sta_rx_agg_stop(priv, sta, tid);
 		if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-			return 0;
-		else
-			return ret;
+			ret = 0;
+		break;
 	case IEEE80211_AMPDU_TX_START:
 		IWL_DEBUG_HT(priv, "start Tx\n");
 		ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
@@ -3327,7 +3422,7 @@
 			IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
 				     priv->_agn.agg_tids_count);
 		}
-		return ret;
+		break;
 	case IEEE80211_AMPDU_TX_STOP:
 		IWL_DEBUG_HT(priv, "stop Tx\n");
 		ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
@@ -3337,18 +3432,22 @@
 				     priv->_agn.agg_tids_count);
 		}
 		if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-			return 0;
-		else
-			return ret;
+			ret = 0;
+		break;
 	case IEEE80211_AMPDU_TX_OPERATIONAL:
-		/* do nothing */
-		return -EOPNOTSUPP;
-	default:
-		IWL_DEBUG_HT(priv, "unknown\n");
-		return -EINVAL;
+		if (priv->cfg->use_rts_for_ht) {
+			/*
+			 * switch to RTS/CTS if it is the prefer protection
+			 * method for HT traffic
+			 */
+			iwl_enable_rts_cts(priv);
+		}
+		ret = 0;
 		break;
 	}
-	return 0;
+	mutex_unlock(&priv->mutex);
+
+	return ret;
 }
 
 static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
@@ -3423,6 +3522,98 @@
 	return 0;
 }
 
+static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
+				   struct ieee80211_channel_switch *ch_switch)
+{
+	struct iwl_priv *priv = hw->priv;
+	const struct iwl_channel_info *ch_info;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+	u16 ch;
+	unsigned long flags = 0;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (iwl_is_rfkill(priv))
+		goto out_exit;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+	    test_bit(STATUS_SCANNING, &priv->status))
+		goto out_exit;
+
+	if (!iwl_is_associated(priv))
+		goto out_exit;
+
+	/* channel switch in progress */
+	if (priv->switch_rxon.switch_in_progress == true)
+		goto out_exit;
+
+	mutex_lock(&priv->mutex);
+	if (priv->cfg->ops->lib->set_channel_switch) {
+
+		ch = ieee80211_frequency_to_channel(
+			ch_switch->channel->center_freq);
+		if (le16_to_cpu(priv->active_rxon.channel) != ch) {
+			ch_info = iwl_get_channel_info(priv,
+						       conf->channel->band,
+						       ch);
+			if (!is_channel_valid(ch_info)) {
+				IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+				goto out;
+			}
+			spin_lock_irqsave(&priv->lock, flags);
+
+			priv->current_ht_config.smps = conf->smps_mode;
+
+			/* Configure HT40 channels */
+			ht_conf->is_ht = conf_is_ht(conf);
+			if (ht_conf->is_ht) {
+				if (conf_is_ht40_minus(conf)) {
+					ht_conf->extension_chan_offset =
+						IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+					ht_conf->is_40mhz = true;
+				} else if (conf_is_ht40_plus(conf)) {
+					ht_conf->extension_chan_offset =
+						IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+					ht_conf->is_40mhz = true;
+				} else {
+					ht_conf->extension_chan_offset =
+						IEEE80211_HT_PARAM_CHA_SEC_NONE;
+					ht_conf->is_40mhz = false;
+				}
+			} else
+				ht_conf->is_40mhz = false;
+
+			/* if we are switching from ht to 2.4 clear flags
+			 * from any ht related info since 2.4 does not
+			 * support ht */
+			if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
+				priv->staging_rxon.flags = 0;
+
+			iwl_set_rxon_channel(priv, conf->channel);
+			iwl_set_rxon_ht(priv, ht_conf);
+			iwl_set_flags_for_band(priv, conf->channel->band,
+					       priv->vif);
+			spin_unlock_irqrestore(&priv->lock, flags);
+
+			iwl_set_rate(priv);
+			/*
+			 * at this point, staging_rxon has the
+			 * configuration for channel switch
+			 */
+			if (priv->cfg->ops->lib->set_channel_switch(priv,
+								    ch_switch))
+				priv->switch_rxon.switch_in_progress = false;
+		}
+	}
+out:
+	mutex_unlock(&priv->mutex);
+out_exit:
+	if (!priv->switch_rxon.switch_in_progress)
+		ieee80211_chswitch_done(priv->vif, false);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
 /*****************************************************************************
  *
  * driver setup and teardown
@@ -3479,6 +3670,7 @@
 	cancel_delayed_work(&priv->scan_check);
 	cancel_work_sync(&priv->start_internal_scan);
 	cancel_delayed_work(&priv->alive_start);
+	cancel_work_sync(&priv->run_time_calib_work);
 	cancel_work_sync(&priv->beacon_update);
 	del_timer_sync(&priv->statistics_periodic);
 	del_timer_sync(&priv->ucode_trace);
@@ -3594,6 +3786,7 @@
 	.sta_notify = iwl_mac_sta_notify,
 	.sta_add = iwlagn_mac_sta_add,
 	.sta_remove = iwl_mac_sta_remove,
+	.channel_switch = iwl_mac_channel_switch,
 };
 
 static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3604,6 +3797,7 @@
 	struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
 	unsigned long flags;
 	u16 pci_cmd;
+	u8 perm_addr[ETH_ALEN];
 
 	/************************
 	 * 1. Allocating HW data
@@ -3633,9 +3827,6 @@
 	priv->pci_dev = pdev;
 	priv->inta_mask = CSR_INI_SET_MASK;
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-	atomic_set(&priv->restrict_refcnt, 0);
-#endif
 	if (iwl_alloc_traffic_mem(priv))
 		IWL_ERR(priv, "Not enough memory to generate traffic log\n");
 
@@ -3724,9 +3915,9 @@
 		goto out_free_eeprom;
 
 	/* extract MAC Address */
-	iwl_eeprom_get_mac(priv, priv->mac_addr);
-	IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr);
-	SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
+	iwl_eeprom_get_mac(priv, perm_addr);
+	IWL_DEBUG_INFO(priv, "MAC address: %pM\n", perm_addr);
+	SET_IEEE80211_PERM_ADDR(priv->hw, perm_addr);
 
 	/************************
 	 * 5. Setup HW constants
@@ -3993,6 +4184,47 @@
 	{IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6000g2a_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6000g2a_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6000g2a_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)},
+
+/* 6x00 Series Gen2b */
+	{IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)},
+	{IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5201, iwl6000g2b_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5205, iwl6000g2b_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5206, iwl6000g2b_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5207, iwl6000g2b_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5221, iwl6000g2b_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5225, iwl6000g2b_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5226, iwl6000g2b_2abg_cfg)},
 
 /* 6x50 WiFi/WiMax Series */
 	{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 2d74805..be9d298 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,6 +65,33 @@
 
 #include "iwl-dev.h"
 
+/* configuration for the _agn devices */
+extern struct iwl_cfg iwl4965_agn_cfg;
+extern struct iwl_cfg iwl5300_agn_cfg;
+extern struct iwl_cfg iwl5100_agn_cfg;
+extern struct iwl_cfg iwl5350_agn_cfg;
+extern struct iwl_cfg iwl5100_bgn_cfg;
+extern struct iwl_cfg iwl5100_abg_cfg;
+extern struct iwl_cfg iwl5150_agn_cfg;
+extern struct iwl_cfg iwl5150_abg_cfg;
+extern struct iwl_cfg iwl6000g2a_2agn_cfg;
+extern struct iwl_cfg iwl6000g2a_2abg_cfg;
+extern struct iwl_cfg iwl6000g2a_2bg_cfg;
+extern struct iwl_cfg iwl6000g2b_bgn_cfg;
+extern struct iwl_cfg iwl6000g2b_bg_cfg;
+extern struct iwl_cfg iwl6000g2b_2agn_cfg;
+extern struct iwl_cfg iwl6000g2b_2abg_cfg;
+extern struct iwl_cfg iwl6000g2b_2bgn_cfg;
+extern struct iwl_cfg iwl6000g2b_2bg_cfg;
+extern struct iwl_cfg iwl6000i_2agn_cfg;
+extern struct iwl_cfg iwl6000i_2abg_cfg;
+extern struct iwl_cfg iwl6000i_2bg_cfg;
+extern struct iwl_cfg iwl6000_3agn_cfg;
+extern struct iwl_cfg iwl6050_2agn_cfg;
+extern struct iwl_cfg iwl6050_2abg_cfg;
+extern struct iwl_cfg iwl1000_bgn_cfg;
+extern struct iwl_cfg iwl1000_bg_cfg;
+
 extern struct iwl_mod_params iwlagn_mod_params;
 extern struct iwl_hcmd_ops iwlagn_hcmd;
 extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
@@ -93,6 +120,8 @@
 int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
 			   u16 ssn_idx, u8 tx_fifo);
 void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+			    int sta_id, int tid, int freed);
 
 /* uCode */
 int iwlagn_load_ucode(struct iwl_priv *priv);
@@ -102,6 +131,7 @@
 			   struct iwl_rx_mem_buffer *rxb);
 void iwlagn_init_alive_start(struct iwl_priv *priv);
 int iwlagn_alive_notify(struct iwl_priv *priv);
+int iwl_verify_ucode(struct iwl_priv *priv);
 
 /* lib */
 void iwl_check_abort_status(struct iwl_priv *priv,
@@ -171,6 +201,16 @@
 	       (status == TX_STATUS_DIRECT_DONE);
 }
 
+/* rx */
+void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+				struct iwl_rx_mem_buffer *rxb);
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+			  struct iwl_rx_packet *pkt);
+void iwl_rx_statistics(struct iwl_priv *priv,
+		       struct iwl_rx_mem_buffer *rxb);
+void iwl_reply_statistics(struct iwl_priv *priv,
+			  struct iwl_rx_mem_buffer *rxb);
+
 /* scan */
 void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
 
@@ -178,4 +218,8 @@
 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
 			       struct ieee80211_vif *vif, bool add);
 
+/* hcmd */
+int iwlagn_send_rxon_assoc(struct iwl_priv *priv);
+int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
+
 #endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 9aab020..acf8e98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -95,7 +95,7 @@
 
 	/* Multi-Station support */
 	REPLY_ADD_STA = 0x18,
-	REPLY_REMOVE_STA = 0x19,	/* not used */
+	REPLY_REMOVE_STA = 0x19,
 	REPLY_REMOVE_ALL_STA = 0x1a,	/* not used */
 
 	/* Security */
@@ -227,7 +227,7 @@
 
 	/* command or response/notification data follows immediately */
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 
 /**
@@ -247,7 +247,7 @@
 struct iwl3945_tx_power {
 	u8 tx_gain;		/* gain for analog radio */
 	u8 dsp_atten;		/* gain for DSP */
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * struct iwl3945_power_per_rate
@@ -258,7 +258,7 @@
 	u8 rate;		/* plcp */
 	struct iwl3945_tx_power tpc;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * iwlagn rate_n_flags bit fields
@@ -389,7 +389,7 @@
  */
 struct tx_power_dual_stream {
 	__le32 dw;
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * struct iwl4965_tx_power_db
@@ -398,7 +398,7 @@
  */
 struct iwl4965_tx_power_db {
 	struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * Command REPLY_TX_POWER_DBM_CMD = 0x98
@@ -412,7 +412,7 @@
 	u8 flags;
 	s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * Command TX_ANT_CONFIGURATION_CMD = 0x98
@@ -422,7 +422,7 @@
  */
 struct iwl_tx_ant_config_cmd {
 	__le32 valid;
-} __attribute__ ((packed));
+} __packed;
 
 /******************************************************************************
  * (0a)
@@ -478,7 +478,7 @@
 	__le32 therm_r4[2];	/* signed */
 	__le32 tx_atten[5][2];	/* signed MIMO gain comp, 5 freq groups,
 				 * 2 Tx chains */
-} __attribute__ ((packed));
+} __packed;
 
 
 /**
@@ -570,7 +570,7 @@
 	__le32 error_event_table_ptr;	/* SRAM address for error log */
 	__le32 timestamp;
 	__le32 is_valid;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_ERROR = 0x2 (response only, not a command)
@@ -582,7 +582,7 @@
 	__le16 bad_cmd_seq_num;
 	__le32 error_info;
 	__le64 timestamp;
-} __attribute__ ((packed));
+} __packed;
 
 /******************************************************************************
  * (1)
@@ -718,7 +718,7 @@
 	__le32 filter_flags;
 	__le16 channel;
 	__le16 reserved5;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl4965_rxon_cmd {
 	u8 node_addr[6];
@@ -738,7 +738,7 @@
 	__le16 channel;
 	u8 ofdm_ht_single_stream_basic_rates;
 	u8 ofdm_ht_dual_stream_basic_rates;
-} __attribute__ ((packed));
+} __packed;
 
 /* 5000 HW just extend this command */
 struct iwl_rxon_cmd {
@@ -763,7 +763,7 @@
 	u8 reserved5;
 	__le16 acquisition_data;
 	__le16 reserved6;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
@@ -774,7 +774,7 @@
 	u8 ofdm_basic_rates;
 	u8 cck_basic_rates;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl4965_rxon_assoc_cmd {
 	__le32 flags;
@@ -785,7 +785,7 @@
 	u8 ofdm_ht_dual_stream_basic_rates;
 	__le16 rx_chain_select_flags;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl5000_rxon_assoc_cmd {
 	__le32 flags;
@@ -800,7 +800,7 @@
 	__le16 rx_chain_select_flags;
 	__le16 acquisition_data;
 	__le32 reserved3;
-} __attribute__ ((packed));
+} __packed;
 
 #define IWL_CONN_MAX_LISTEN_INTERVAL	10
 #define IWL_MAX_UCODE_BEACON_INTERVAL	4 /* 4096 */
@@ -816,7 +816,7 @@
 	__le32 beacon_init_val;
 	__le16 listen_interval;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
@@ -829,7 +829,7 @@
 	__le32 rxon_filter_flags;
 	__le32 switch_time;
 	struct iwl3945_power_per_rate power[IWL_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl4965_channel_switch_cmd {
 	u8 band;
@@ -839,7 +839,7 @@
 	__le32 rxon_filter_flags;
 	__le32 switch_time;
 	struct iwl4965_tx_power_db tx_power;
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * struct iwl5000_channel_switch_cmd
@@ -860,7 +860,7 @@
 	__le32 rxon_filter_flags;
 	__le32 switch_time;
 	__le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * struct iwl6000_channel_switch_cmd
@@ -881,7 +881,7 @@
 	__le32 rxon_filter_flags;
 	__le32 switch_time;
 	__le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
@@ -890,7 +890,7 @@
 	__le16 band;
 	__le16 channel;
 	__le32 status;		/* 0 - OK, 1 - fail */
-} __attribute__ ((packed));
+} __packed;
 
 /******************************************************************************
  * (2)
@@ -920,7 +920,7 @@
 	u8 aifsn;
 	u8 reserved1;
 	__le16 edca_txop;
-} __attribute__ ((packed));
+} __packed;
 
 /* QoS flags defines */
 #define QOS_PARAM_FLG_UPDATE_EDCA_MSK	cpu_to_le32(0x01)
@@ -939,7 +939,7 @@
 struct iwl_qosparam_cmd {
 	__le32 qos_flags;
 	struct iwl_ac_qos ac[AC_NUM];
-} __attribute__ ((packed));
+} __packed;
 
 /******************************************************************************
  * (3)
@@ -952,7 +952,6 @@
 
 /* Special, dedicated locations within device's station table */
 #define	IWL_AP_ID		0
-#define IWL_MULTICAST_ID	1
 #define	IWL_STA_ID		2
 #define	IWL3945_BROADCAST_ID	24
 #define IWL3945_STATION_COUNT	25
@@ -1015,7 +1014,7 @@
 	u8 key_offset;
 	u8 reserved2;
 	u8 key[16];		/* 16-byte unicast decryption key */
-} __attribute__ ((packed));
+} __packed;
 
 /* 5000 */
 struct iwl_keyinfo {
@@ -1029,7 +1028,7 @@
 	__le64 tx_secur_seq_cnt;
 	__le64 hw_tkip_mic_rx_key;
 	__le64 hw_tkip_mic_tx_key;
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * struct sta_id_modify
@@ -1049,7 +1048,7 @@
 	u8 sta_id;
 	u8 modify_mask;
 	__le16 reserved2;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_ADD_STA = 0x18 (command)
@@ -1103,7 +1102,7 @@
 	/* Starting Sequence Number for added block-ack support.
 	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
 	__le16 add_immediate_ba_ssn;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl4965_addsta_cmd {
 	u8 mode;		/* 1: modify existing, 0: add new station */
@@ -1140,7 +1139,7 @@
 	__le16 sleep_tx_count;
 
 	__le16 reserved2;
-} __attribute__ ((packed));
+} __packed;
 
 /* 5000 */
 struct iwl_addsta_cmd {
@@ -1178,7 +1177,7 @@
 	__le16 sleep_tx_count;
 
 	__le16 reserved2;
-} __attribute__ ((packed));
+} __packed;
 
 
 #define ADD_STA_SUCCESS_MSK		0x1
@@ -1190,7 +1189,7 @@
  */
 struct iwl_add_sta_resp {
 	u8 status;	/* ADD_STA_* */
-} __attribute__ ((packed));
+} __packed;
 
 #define REM_STA_SUCCESS_MSK              0x1
 /*
@@ -1198,7 +1197,7 @@
  */
 struct iwl_rem_sta_resp {
 	u8 status;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  *  REPLY_REM_STA = 0x19 (command)
@@ -1208,7 +1207,7 @@
 	u8 reserved[3];
 	u8 addr[ETH_ALEN]; /* MAC addr of the first station */
 	u8 reserved2[2];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_WEP_KEY = 0x20
@@ -1220,7 +1219,7 @@
 	u8 key_size;
 	u8 reserved2[3];
 	u8 key[16];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_wep_cmd {
 	u8 num_keys;
@@ -1228,7 +1227,7 @@
 	u8 flags;
 	u8 reserved;
 	struct iwl_wep_key key[0];
-} __attribute__ ((packed));
+} __packed;
 
 #define WEP_KEY_WEP_TYPE 1
 #define WEP_KEYS_MAX 4
@@ -1282,7 +1281,7 @@
 	__le16 sig_avg;
 	__le16 noise_diff;
 	u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl3945_rx_frame_hdr {
 	__le16 channel;
@@ -1291,13 +1290,13 @@
 	u8 rate;
 	__le16 len;
 	u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl3945_rx_frame_end {
 	__le32 status;
 	__le64 timestamp;
 	__le32 beacon_timestamp;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_3945_RX = 0x1b (response only, not a command)
@@ -1311,7 +1310,7 @@
 	struct iwl3945_rx_frame_stats stats;
 	struct iwl3945_rx_frame_hdr hdr;
 	struct iwl3945_rx_frame_end end;
-} __attribute__ ((packed));
+} __packed;
 
 #define IWL39_RX_FRAME_SIZE	(4 + sizeof(struct iwl3945_rx_frame))
 
@@ -1327,7 +1326,7 @@
 	__le16 agc_info;	/* agc code 0:6, agc dB 7:13, reserved 14:15 */
 	u8 rssi_info[6];	/* we use even entries, 0/2/4 for A/B/C rssi */
 	u8 pad[0];
-} __attribute__ ((packed));
+} __packed;
 
 
 #define IWL50_RX_RES_PHY_CNT 8
@@ -1345,7 +1344,7 @@
 
 struct iwl5000_non_cfg_phy {
 	__le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT];  /* up to 8 phy entries */
-} __attribute__ ((packed));
+} __packed;
 
 
 /*
@@ -1365,12 +1364,12 @@
 	__le32 rate_n_flags;	/* RATE_MCS_* */
 	__le16 byte_count;	/* frame's byte-count */
 	__le16 reserved3;
-} __attribute__ ((packed));
+} __packed;
 
-struct iwl4965_rx_mpdu_res_start {
+struct iwl_rx_mpdu_res_start {
 	__le16 byte_count;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 
 /******************************************************************************
@@ -1400,18 +1399,27 @@
 
 /* REPLY_TX Tx flags field */
 
-/* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
+/*
+ * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
  * before this frame. if CTS-to-self required check
- * RXON_FLG_SELF_CTS_EN status. */
-#define TX_CMD_FLG_RTS_CTS_MSK cpu_to_le32(1 << 0)
+ * RXON_FLG_SELF_CTS_EN status.
+ * unused in 3945/4965, used in 5000 series and after
+ */
+#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
 
-/* 1: Use Request-To-Send protocol before this frame.
- * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */
+/*
+ * 1: Use Request-To-Send protocol before this frame.
+ * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
+ * used in 3945/4965, unused in 5000 series and after
+ */
 #define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
 
-/* 1: Transmit Clear-To-Send to self before this frame.
+/*
+ * 1: Transmit Clear-To-Send to self before this frame.
  * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
- * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */
+ * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
+ * used in 3945/4965, unused in 5000 series and after
+ */
 #define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
 
 /* 1: Expect ACK from receiving station
@@ -1431,8 +1439,11 @@
  * Set when Txing a block-ack request frame.  Also set TX_CMD_FLG_ACK_MSK. */
 #define TX_CMD_FLG_IMM_BA_RSP_MASK  cpu_to_le32(1 << 6)
 
-/* 1: Frame requires full Tx-Op protection.
- * Set this if either RTS or CTS Tx Flag gets set. */
+/*
+ * 1: Frame requires full Tx-Op protection.
+ * Set this if either RTS or CTS Tx Flag gets set.
+ * used in 3945/4965, unused in 5000 series and after
+ */
 #define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
 
 /* Tx antenna selection field; used only for 3945, reserved (0) for 4965.
@@ -1557,7 +1568,7 @@
 	 */
 	u8 payload[0];
 	struct ieee80211_hdr hdr[0];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_TX = 0x1c (response)
@@ -1569,7 +1580,7 @@
 	u8 rate;
 	__le32 wireless_media_time;
 	__le32 status;		/* TX status */
-} __attribute__ ((packed));
+} __packed;
 
 
 /*
@@ -1581,7 +1592,7 @@
 	u8 try_cnt;		/* Tx attempts */
 	u8 bt_kill_cnt;		/* Tx attempts blocked by Bluetooth device */
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_tx_cmd {
 	/*
@@ -1660,7 +1671,7 @@
 	 */
 	u8 payload[0];
 	struct ieee80211_hdr hdr[0];
-} __attribute__ ((packed));
+} __packed;
 
 /* TX command response is sent after *3945* transmission attempts.
  *
@@ -1826,7 +1837,7 @@
 struct agg_tx_status {
 	__le16 status;
 	__le16 sequence;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl4965_tx_resp {
 	u8 frame_count;		/* 1 no aggregation, >1 aggregation */
@@ -1863,7 +1874,7 @@
 		__le32 status;
 		struct agg_tx_status agg_status[0]; /* for each agg frame */
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * definitions for initial rate index field
@@ -1927,7 +1938,7 @@
 	 */
 	struct agg_tx_status status;	/* TX status (in aggregation -
 					 * status of 1st frame) */
-} __attribute__ ((packed));
+} __packed;
 /*
  * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
  *
@@ -1945,7 +1956,7 @@
 	__le64 bitmap;
 	__le16 scd_flow;
 	__le16 scd_ssn;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
@@ -1958,14 +1969,14 @@
 	u8 reserved;
 	__le16 channel;
 	struct iwl3945_power_per_rate power[IWL_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl4965_txpowertable_cmd {
 	u8 band;		/* 0: 5 GHz, 1: 2.4 GHz */
 	u8 reserved;
 	__le16 channel;
 	struct iwl4965_tx_power_db tx_power;
-} __attribute__ ((packed));
+} __packed;
 
 
 /**
@@ -1987,13 +1998,13 @@
 	__le16 rate_n_flags;
 	u8 try_cnt;
 	u8 next_rate_index;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl3945_rate_scaling_cmd {
 	u8 table_id;
 	u8 reserved[3];
 	struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
 
 
 /*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
@@ -2040,7 +2051,7 @@
 	 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
 	 */
 	u8 start_rate_index[LINK_QUAL_AC_NUM];
-} __attribute__ ((packed));
+} __packed;
 
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000) /* 4 milliseconds */
 #define LINK_QUAL_AGG_TIME_LIMIT_MAX	(65535)
@@ -2081,7 +2092,7 @@
 	u8 agg_frame_cnt_limit;
 
 	__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
@@ -2287,7 +2298,7 @@
 		__le32 rate_n_flags;	/* RATE_MCS_*, IWL_RATE_* */
 	} rs_table[LINK_QUAL_MAX_RETRY_NUM];
 	__le32 reserved2;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * BT configuration enable flags:
@@ -2328,7 +2339,7 @@
 	u8 reserved;
 	__le32 kill_ack_mask;
 	__le32 kill_cts_mask;
-} __attribute__ ((packed));
+} __packed;
 
 /******************************************************************************
  * (6)
@@ -2353,7 +2364,7 @@
 	u8 channel;		/* channel to measure */
 	u8 type;		/* see enum iwl_measure_type */
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
@@ -2372,7 +2383,7 @@
 	__le16 channel_count;	/* minimum 1, maximum 10 */
 	__le16 reserved3;
 	struct iwl_measure_channel channels[10];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
@@ -2383,7 +2394,7 @@
 	__le16 status;		/* 0 - command will be handled
 				 * 1 - cannot handle (conflicts with another
 				 *     measurement) */
-} __attribute__ ((packed));
+} __packed;
 
 enum iwl_measurement_state {
 	IWL_MEASUREMENT_START = 0,
@@ -2406,13 +2417,13 @@
 struct iwl_measurement_histogram {
 	__le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM];	/* in 0.8usec counts */
 	__le32 cck[NUM_ELEMENTS_IN_HISTOGRAM];	/* in 1usec counts */
-} __attribute__ ((packed));
+} __packed;
 
 /* clear channel availability counters */
 struct iwl_measurement_cca_counters {
 	__le32 ofdm;
 	__le32 cck;
-} __attribute__ ((packed));
+} __packed;
 
 enum iwl_measure_type {
 	IWL_MEASURE_BASIC = (1 << 0),
@@ -2448,7 +2459,7 @@
 	struct iwl_measurement_histogram histogram;
 	__le32 stop_time;	/* lower 32-bits of TSF */
 	__le32 status;		/* see iwl_measurement_status */
-} __attribute__ ((packed));
+} __packed;
 
 /******************************************************************************
  * (7)
@@ -2504,7 +2515,7 @@
 	__le32 rx_data_timeout;
 	__le32 tx_data_timeout;
 	__le32 sleep_interval[IWL_POWER_VEC_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_powertable_cmd {
 	__le16 flags;
@@ -2514,7 +2525,7 @@
 	__le32 tx_data_timeout;
 	__le32 sleep_interval[IWL_POWER_VEC_SIZE];
 	__le32 keep_alive_beacons;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
@@ -2527,7 +2538,7 @@
 	__le32 sleep_time;
 	__le32 tsf_low;
 	__le32 bcon_timer;
-} __attribute__ ((packed));
+} __packed;
 
 /* Sleep states.  3945 and 4965 identical. */
 enum {
@@ -2552,14 +2563,14 @@
 #define CARD_STATE_CMD_HALT    0x02	/* Power down permanently */
 struct iwl_card_state_cmd {
 	__le32 status;		/* CARD_STATE_CMD_* request new power state */
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
  */
 struct iwl_card_state_notif {
 	__le32 flags;
-} __attribute__ ((packed));
+} __packed;
 
 #define HW_CARD_DISABLED   0x01
 #define SW_CARD_DISABLED   0x02
@@ -2570,14 +2581,14 @@
 	__le32   reserved;
 	__le32   critical_temperature_M;
 	__le32   critical_temperature_R;
-}  __attribute__ ((packed));
+}  __packed;
 
 /* 1000, and 6x00 */
 struct iwl_ct_kill_throttling_config {
 	__le32   critical_temperature_exit;
 	__le32   reserved;
 	__le32   critical_temperature_enter;
-}  __attribute__ ((packed));
+}  __packed;
 
 /******************************************************************************
  * (8)
@@ -2622,7 +2633,7 @@
 	struct iwl3945_tx_power tpc;
 	__le16 active_dwell;	/* in 1024-uSec TU (time units), typ 5-50 */
 	__le16 passive_dwell;	/* in 1024-uSec TU (time units), typ 20-500 */
-} __attribute__ ((packed));
+} __packed;
 
 /* set number of direct probes u8 type */
 #define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
@@ -2641,7 +2652,7 @@
 	u8 dsp_atten;		/* gain for DSP */
 	__le16 active_dwell;	/* in 1024-uSec TU (time units), typ 5-50 */
 	__le16 passive_dwell;	/* in 1024-uSec TU (time units), typ 20-500 */
-} __attribute__ ((packed));
+} __packed;
 
 /* set number of direct probes __le32 type */
 #define IWL_SCAN_PROBE_MASK(n) 	cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
@@ -2658,7 +2669,7 @@
 	u8 id;
 	u8 len;
 	u8 ssid[32];
-} __attribute__ ((packed));
+} __packed;
 
 #define PROBE_OPTION_MAX_3945		4
 #define PROBE_OPTION_MAX		20
@@ -2764,7 +2775,7 @@
 	 * before requesting another scan.
 	 */
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_scan_cmd {
 	__le16 len;
@@ -2808,7 +2819,7 @@
 	 * before requesting another scan.
 	 */
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 /* Can abort will notify by complete notification with abort status. */
 #define CAN_ABORT_STATUS	cpu_to_le32(0x1)
@@ -2820,7 +2831,7 @@
  */
 struct iwl_scanreq_notification {
 	__le32 status;		/* 1: okay, 2: cannot fulfill request */
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
@@ -2833,7 +2844,7 @@
 	u8 band;
 	u8 reserved[2];
 	__le32 status;
-} __attribute__ ((packed));
+} __packed;
 
 #define  SCAN_OWNER_STATUS 0x1;
 #define  MEASURE_OWNER_STATUS 0x2;
@@ -2849,7 +2860,7 @@
 	__le32 tsf_low;
 	__le32 tsf_high;
 	__le32 statistics[NUMBER_OF_STATISTICS];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
@@ -2861,7 +2872,7 @@
 	u8 last_channel;
 	__le32 tsf_low;
 	__le32 tsf_high;
-} __attribute__ ((packed));
+} __packed;
 
 
 /******************************************************************************
@@ -2879,14 +2890,14 @@
 	__le32 low_tsf;
 	__le32 high_tsf;
 	__le32 ibss_mgr_status;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl4965_beacon_notif {
 	struct iwl4965_tx_resp beacon_notify_hdr;
 	__le32 low_tsf;
 	__le32 high_tsf;
 	__le32 ibss_mgr_status;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
@@ -2898,7 +2909,7 @@
 	u8 tim_size;
 	u8 reserved1;
 	struct ieee80211_hdr frame[0];	/* beacon frame */
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_tx_beacon_cmd {
 	struct iwl_tx_cmd tx;
@@ -2906,7 +2917,7 @@
 	u8 tim_size;
 	u8 reserved1;
 	struct ieee80211_hdr frame[0];	/* beacon frame */
-} __attribute__ ((packed));
+} __packed;
 
 /******************************************************************************
  * (10)
@@ -2932,7 +2943,7 @@
 		__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
 		__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
 	} failed;
-} __attribute__ ((packed));
+} __packed;
 
 /* statistics command response */
 
@@ -2952,7 +2963,7 @@
 	__le32 rxe_frame_limit_overrun;
 	__le32 sent_ack_cnt;
 	__le32 sent_cts_cnt;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl39_statistics_rx_non_phy {
 	__le32 bogus_cts;	/* CTS received when not expecting CTS */
@@ -2963,13 +2974,13 @@
 				 * filtering process */
 	__le32 non_channel_beacons;	/* beacons with our bss id but not on
 					 * our serving channel */
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl39_statistics_rx {
 	struct iwl39_statistics_rx_phy ofdm;
 	struct iwl39_statistics_rx_phy cck;
 	struct iwl39_statistics_rx_non_phy general;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl39_statistics_tx {
 	__le32 preamble_cnt;
@@ -2981,20 +2992,20 @@
 	__le32 ack_timeout;
 	__le32 expected_ack_cnt;
 	__le32 actual_ack_cnt;
-} __attribute__ ((packed));
+} __packed;
 
 struct statistics_dbg {
 	__le32 burst_check;
 	__le32 burst_count;
 	__le32 reserved[4];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl39_statistics_div {
 	__le32 tx_on_a;
 	__le32 tx_on_b;
 	__le32 exec_time;
 	__le32 probe_time;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl39_statistics_general {
 	__le32 temperature;
@@ -3004,7 +3015,7 @@
 	__le32 slots_idle;
 	__le32 ttl_timestamp;
 	struct iwl39_statistics_div div;
-} __attribute__ ((packed));
+} __packed;
 
 struct statistics_rx_phy {
 	__le32 ina_cnt;
@@ -3027,7 +3038,7 @@
 	__le32 mh_format_err;
 	__le32 re_acq_main_rssi_sum;
 	__le32 reserved3;
-} __attribute__ ((packed));
+} __packed;
 
 struct statistics_rx_ht_phy {
 	__le32 plcp_err;
@@ -3040,7 +3051,7 @@
 	__le32 agg_mpdu_cnt;
 	__le32 agg_cnt;
 	__le32 unsupport_mcs;
-} __attribute__ ((packed));
+} __packed;
 
 #define INTERFERENCE_DATA_AVAILABLE      cpu_to_le32(1)
 
@@ -3075,14 +3086,14 @@
 	__le32 beacon_energy_a;
 	__le32 beacon_energy_b;
 	__le32 beacon_energy_c;
-} __attribute__ ((packed));
+} __packed;
 
 struct statistics_rx {
 	struct statistics_rx_phy ofdm;
 	struct statistics_rx_phy cck;
 	struct statistics_rx_non_phy general;
 	struct statistics_rx_ht_phy ofdm_ht;
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * struct statistics_tx_power - current tx power
@@ -3096,7 +3107,7 @@
 	u8 ant_b;
 	u8 ant_c;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct statistics_tx_non_phy_agg {
 	__le32 ba_timeout;
@@ -3109,7 +3120,7 @@
 	__le32 underrun;
 	__le32 bt_prio_kill;
 	__le32 rx_ba_rsp_cnt;
-} __attribute__ ((packed));
+} __packed;
 
 struct statistics_tx {
 	__le32 preamble_cnt;
@@ -3134,7 +3145,7 @@
 	 */
 	struct statistics_tx_power tx_power;
 	__le32 reserved1;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct statistics_div {
@@ -3144,7 +3155,7 @@
 	__le32 probe_time;
 	__le32 reserved1;
 	__le32 reserved2;
-} __attribute__ ((packed));
+} __packed;
 
 struct statistics_general {
 	__le32 temperature;   /* radio temperature */
@@ -3164,7 +3175,7 @@
 	__le32 num_of_sos_states;
 	__le32 reserved2;
 	__le32 reserved3;
-} __attribute__ ((packed));
+} __packed;
 
 #define UCODE_STATISTICS_CLEAR_MSK		(0x1 << 0)
 #define UCODE_STATISTICS_FREQUENCY_MSK		(0x1 << 1)
@@ -3189,7 +3200,7 @@
 #define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
 struct iwl_statistics_cmd {
 	__le32 configuration_flags;	/* IWL_STATS_CONF_* */
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
@@ -3214,14 +3225,14 @@
 	struct iwl39_statistics_rx rx;
 	struct iwl39_statistics_tx tx;
 	struct iwl39_statistics_general general;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_notif_statistics {
 	__le32 flag;
 	struct statistics_rx rx;
 	struct statistics_tx tx;
 	struct statistics_general general;
-} __attribute__ ((packed));
+} __packed;
 
 
 /*
@@ -3253,7 +3264,7 @@
 	__le32 total_missed_becons;
 	__le32 num_expected_beacons;
 	__le32 num_recvd_beacons;
-} __attribute__ ((packed));
+} __packed;
 
 
 /******************************************************************************
@@ -3455,7 +3466,7 @@
 struct iwl_sensitivity_cmd {
 	__le16 control;			/* always use "1" */
 	__le16 table[HD_TABLE_SIZE];	/* use HD_* as index */
-} __attribute__ ((packed));
+} __packed;
 
 
 /**
@@ -3536,31 +3547,31 @@
 	__le32 send_res;
 	__le32 apply_res;
 	__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_calib_cfg_status_s {
 	struct iwl_calib_cfg_elmnt_s once;
 	struct iwl_calib_cfg_elmnt_s perd;
 	__le32 flags;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_calib_cfg_cmd {
 	struct iwl_calib_cfg_status_s ucd_calib_cfg;
 	struct iwl_calib_cfg_status_s drv_calib_cfg;
 	__le32 reserved1;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_calib_hdr {
 	u8 op_code;
 	u8 first_group;
 	u8 groups_num;
 	u8 data_valid;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_calib_cmd {
 	struct iwl_calib_hdr hdr;
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 /* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
 struct iwl_calib_diff_gain_cmd {
@@ -3569,14 +3580,14 @@
 	s8 diff_gain_b;
 	s8 diff_gain_c;
 	u8 reserved1;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_calib_xtal_freq_cmd {
 	struct iwl_calib_hdr hdr;
 	u8 cap_pin1;
 	u8 cap_pin2;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 /* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
 struct iwl_calib_chain_noise_reset_cmd {
@@ -3590,7 +3601,7 @@
 	u8 delta_gain_1;
 	u8 delta_gain_2;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 /******************************************************************************
  * (12)
@@ -3613,7 +3624,7 @@
 	u8 on;			/* # intervals on while blinking;
 				 * "0", regardless of "off", turns LED off */
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * station priority table entries
@@ -3749,7 +3760,7 @@
 	u8 win_medium_prio;
 	u8 reserved;
 	u8 flags;
-} __attribute__ ((packed));
+} __packed;
 
 /* COEX flag masks */
 
@@ -3766,7 +3777,7 @@
 	u8 flags;
 	u8 reserved[3];
 	struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * Coexistence MEDIUM NOTIFICATION
@@ -3795,7 +3806,7 @@
 struct iwl_coex_medium_notification {
 	__le32 status;
 	__le32 events;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * Coexistence EVENT  Command
@@ -3810,11 +3821,11 @@
 	u8 flags;
 	u8 event;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwl_coex_event_resp {
 	__le32 status;
-} __attribute__ ((packed));
+} __packed;
 
 
 /******************************************************************************
@@ -3858,7 +3869,7 @@
 		__le32 status;
 		u8 raw[0];
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
 int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 5bbc529..a56fb46 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -141,13 +141,14 @@
 }
 EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
 
-u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
+u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
 {
 	int i;
 	u8 ind = ant;
+
 	for (i = 0; i < RATE_ANT_NUM - 1; i++) {
 		ind = (ind + 1) < RATE_ANT_NUM ?  ind + 1 : 0;
-		if (priv->hw_params.valid_tx_ant & BIT(ind))
+		if (valid & BIT(ind))
 			return ind;
 	}
 	return ant;
@@ -457,7 +458,7 @@
 		if (!sta_ht_inf->ht_supported)
 			return 0;
 	}
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
 	if (priv->disable_ht40)
 		return 0;
 #endif
@@ -506,11 +507,11 @@
 	}
 
 	beacon_int = iwl_adjust_beacon_interval(beacon_int,
-				priv->hw_params.max_beacon_itrvl * 1024);
+				priv->hw_params.max_beacon_itrvl * TIME_UNIT);
 	priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
 
 	tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
-	interval_tm = beacon_int * 1024;
+	interval_tm = beacon_int * TIME_UNIT;
 	rem = do_div(tsf, interval_tm);
 	priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
 
@@ -932,9 +933,9 @@
 }
 EXPORT_SYMBOL(iwl_set_rxon_channel);
 
-static void iwl_set_flags_for_band(struct iwl_priv *priv,
-				   enum ieee80211_band band,
-				   struct ieee80211_vif *vif)
+void iwl_set_flags_for_band(struct iwl_priv *priv,
+			    enum ieee80211_band band,
+			    struct ieee80211_vif *vif)
 {
 	if (band == IEEE80211_BAND_5GHZ) {
 		priv->staging_rxon.flags &=
@@ -943,19 +944,17 @@
 		priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
 	} else {
 		/* Copied from iwl_post_associate() */
-		if (vif && vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+		if (vif && vif->bss_conf.use_short_slot)
 			priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
 		else
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 
-		if (vif && vif->type == NL80211_IFTYPE_ADHOC)
-			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
 		priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
 		priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
 		priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
 	}
 }
+EXPORT_SYMBOL(iwl_set_flags_for_band);
 
 /*
  * initialize rxon structure with default values from eeprom
@@ -1021,15 +1020,17 @@
 	/* clear both MIX and PURE40 mode flag */
 	priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
 					RXON_FLG_CHANNEL_MODE_PURE_40);
-	memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
-	memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
+
+	if (vif)
+		memcpy(priv->staging_rxon.node_addr, vif->addr, ETH_ALEN);
+
 	priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
 	priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
 	priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
 }
 EXPORT_SYMBOL(iwl_connection_init_rx_config);
 
-static void iwl_set_rate(struct iwl_priv *priv)
+void iwl_set_rate(struct iwl_priv *priv)
 {
 	const struct ieee80211_supported_band *hw = NULL;
 	struct ieee80211_rate *rate;
@@ -1057,6 +1058,21 @@
 	priv->staging_rxon.ofdm_basic_rates =
 	   (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
 }
+EXPORT_SYMBOL(iwl_set_rate);
+
+void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
+{
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	if (priv->switch_rxon.switch_in_progress) {
+		ieee80211_chswitch_done(priv->vif, is_success);
+		mutex_lock(&priv->mutex);
+		priv->switch_rxon.switch_in_progress = false;
+		mutex_unlock(&priv->mutex);
+	}
+}
+EXPORT_SYMBOL(iwl_chswitch_done);
 
 void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
 {
@@ -1071,11 +1087,12 @@
 			priv->staging_rxon.channel = csa->channel;
 			IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
 			      le16_to_cpu(csa->channel));
-		} else
+			iwl_chswitch_done(priv, true);
+		} else {
 			IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
 			      le16_to_cpu(csa->channel));
-
-		priv->switch_rxon.switch_in_progress = false;
+			iwl_chswitch_done(priv, false);
+		}
 	}
 }
 EXPORT_SYMBOL(iwl_rx_csa);
@@ -1507,130 +1524,6 @@
 }
 EXPORT_SYMBOL(iwl_send_statistics_request);
 
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- *   using sample data 100 bytes apart.  If these sample points are good,
- *   it's a pretty good bet that everything between them is good, too.
- */
-static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
-	u32 val;
-	int ret = 0;
-	u32 errcnt = 0;
-	u32 i;
-
-	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-	for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
-		/* read data comes through single port, auto-incr addr */
-		/* NOTE: Use the debugless read so we don't flood kernel log
-		 * if IWL_DL_IO is set */
-		iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-			i + IWL49_RTC_INST_LOWER_BOUND);
-		val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-		if (val != le32_to_cpu(*image)) {
-			ret = -EIO;
-			errcnt++;
-			if (errcnt >= 3)
-				break;
-		}
-	}
-
-	return ret;
-}
-
-/**
- * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
- *     looking at all data.
- */
-static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
-				 u32 len)
-{
-	u32 val;
-	u32 save_len = len;
-	int ret = 0;
-	u32 errcnt;
-
-	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-	iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-			   IWL49_RTC_INST_LOWER_BOUND);
-
-	errcnt = 0;
-	for (; len > 0; len -= sizeof(u32), image++) {
-		/* read data comes through single port, auto-incr addr */
-		/* NOTE: Use the debugless read so we don't flood kernel log
-		 * if IWL_DL_IO is set */
-		val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-		if (val != le32_to_cpu(*image)) {
-			IWL_ERR(priv, "uCode INST section is invalid at "
-				  "offset 0x%x, is 0x%x, s/b 0x%x\n",
-				  save_len - len, val, le32_to_cpu(*image));
-			ret = -EIO;
-			errcnt++;
-			if (errcnt >= 20)
-				break;
-		}
-	}
-
-	if (!errcnt)
-		IWL_DEBUG_INFO(priv,
-		    "ucode image in INSTRUCTION memory is good\n");
-
-	return ret;
-}
-
-/**
- * iwl_verify_ucode - determine which instruction image is in SRAM,
- *    and verify its contents
- */
-int iwl_verify_ucode(struct iwl_priv *priv)
-{
-	__le32 *image;
-	u32 len;
-	int ret;
-
-	/* Try bootstrap */
-	image = (__le32 *)priv->ucode_boot.v_addr;
-	len = priv->ucode_boot.len;
-	ret = iwlcore_verify_inst_sparse(priv, image, len);
-	if (!ret) {
-		IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	/* Try initialize */
-	image = (__le32 *)priv->ucode_init.v_addr;
-	len = priv->ucode_init.len;
-	ret = iwlcore_verify_inst_sparse(priv, image, len);
-	if (!ret) {
-		IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	/* Try runtime/protocol */
-	image = (__le32 *)priv->ucode_code.v_addr;
-	len = priv->ucode_code.len;
-	ret = iwlcore_verify_inst_sparse(priv, image, len);
-	if (!ret) {
-		IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
-	/* Since nothing seems to match, show first several data entries in
-	 * instruction SRAM, so maybe visual inspection will give a clue.
-	 * Selection of bootstrap image (vs. other images) is arbitrary. */
-	image = (__le32 *)priv->ucode_boot.v_addr;
-	len = priv->ucode_boot.len;
-	ret = iwl_verify_inst_full(priv, image, len);
-
-	return ret;
-}
-EXPORT_SYMBOL(iwl_verify_ucode);
-
-
 void iwl_rf_kill_ct_config(struct iwl_priv *priv)
 {
 	struct iwl_ct_kill_config cmd;
@@ -2051,8 +1944,6 @@
 	if (priv->cfg->ops->hcmd->set_rxon_chain)
 		priv->cfg->ops->hcmd->set_rxon_chain(priv);
 
-	memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
-
 	return iwlcore_commit_rxon(priv);
 }
 
@@ -2061,7 +1952,8 @@
 	struct iwl_priv *priv = hw->priv;
 	int err = 0;
 
-	IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
+	IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+			   vif->type, vif->addr);
 
 	mutex_lock(&priv->mutex);
 
@@ -2079,9 +1971,6 @@
 	priv->vif = vif;
 	priv->iw_mode = vif->type;
 
-	IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
-	memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
-
 	err = iwl_set_mode(priv, vif);
 	if (err)
 		goto out_err;
@@ -2115,6 +2004,11 @@
 	}
 	if (priv->vif == vif) {
 		priv->vif = NULL;
+		if (priv->scan_vif == vif) {
+			ieee80211_scan_completed(priv->hw, true);
+			priv->scan_vif = NULL;
+			priv->scan_request = NULL;
+		}
 		memset(priv->bssid, 0, ETH_ALEN);
 	}
 	mutex_unlock(&priv->mutex);
@@ -2215,22 +2109,10 @@
 
 		iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
 		spin_unlock_irqrestore(&priv->lock, flags);
-		if (iwl_is_associated(priv) &&
-		    (le16_to_cpu(priv->active_rxon.channel) != ch) &&
-		    priv->cfg->ops->lib->set_channel_switch) {
-			iwl_set_rate(priv);
-			/*
-			 * at this point, staging_rxon has the
-			 * configuration for channel switch
-			 */
-			ret = priv->cfg->ops->lib->set_channel_switch(priv,
-				ch);
-			if (!ret) {
-				iwl_print_rx_config_cmd(priv);
-				goto out;
-			}
-			priv->switch_rxon.switch_in_progress = false;
-		}
+
+		if (priv->cfg->ops->lib->update_bcast_station)
+			ret = priv->cfg->ops->lib->update_bcast_station(priv);
+
  set_ch_out:
 		/* The list of supported rates and rate mask can be different
 		 * for each band; since the band may have changed, reset
@@ -2588,7 +2470,7 @@
 EXPORT_SYMBOL(iwl_update_stats);
 #endif
 
-const static char *get_csr_string(int cmd)
+static const char *get_csr_string(int cmd)
 {
 	switch (cmd) {
 		IWL_CMD(CSR_HW_IF_CONFIG_REG);
@@ -2659,7 +2541,7 @@
 }
 EXPORT_SYMBOL(iwl_dump_csr);
 
-const static char *get_fh_string(int cmd)
+static const char *get_fh_string(int cmd)
 {
 	switch (cmd) {
 		IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
@@ -2881,6 +2763,61 @@
 }
 EXPORT_SYMBOL(iwl_bg_monitor_recover);
 
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
+{
+	u32 quot;
+	u32 rem;
+	u32 interval = beacon_interval * TIME_UNIT;
+
+	if (!interval || !usec)
+		return 0;
+
+	quot = (usec / interval) &
+		(iwl_beacon_time_mask_high(priv,
+		priv->hw_params.beacon_time_tsf_bits) >>
+		priv->hw_params.beacon_time_tsf_bits);
+	rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
+				   priv->hw_params.beacon_time_tsf_bits);
+
+	return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(iwl_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
+			   u32 addon, u32 beacon_interval)
+{
+	u32 base_low = base & iwl_beacon_time_mask_low(priv,
+					priv->hw_params.beacon_time_tsf_bits);
+	u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
+					priv->hw_params.beacon_time_tsf_bits);
+	u32 interval = beacon_interval * TIME_UNIT;
+	u32 res = (base & iwl_beacon_time_mask_high(priv,
+				priv->hw_params.beacon_time_tsf_bits)) +
+				(addon & iwl_beacon_time_mask_high(priv,
+				priv->hw_params.beacon_time_tsf_bits));
+
+	if (base_low > addon_low)
+		res += base_low - addon_low;
+	else if (base_low < addon_low) {
+		res += interval + base_low - addon_low;
+		res += (1 << priv->hw_params.beacon_time_tsf_bits);
+	} else
+		res += (1 << priv->hw_params.beacon_time_tsf_bits);
+
+	return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(iwl_add_beacon_time);
+
 #ifdef CONFIG_PM
 
 int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -2908,6 +2845,7 @@
 {
 	struct iwl_priv *priv = pci_get_drvdata(pdev);
 	int ret;
+	bool hw_rfkill = false;
 
 	/*
 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -2922,6 +2860,17 @@
 	pci_restore_state(pdev);
 	iwl_enable_interrupts(priv);
 
+	if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+		hw_rfkill = true;
+
+	if (hw_rfkill)
+		set_bit(STATUS_RF_KILL_HW, &priv->status);
+	else
+		clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+	wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
+
 	return 0;
 }
 EXPORT_SYMBOL(iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 31775bd..15930e0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -79,6 +79,8 @@
 	.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
 	.driver_data = (kernel_ulong_t)&(cfg)
 
+#define TIME_UNIT		1024
+
 #define IWL_SKU_G       0x1
 #define IWL_SKU_A       0x2
 #define IWL_SKU_N       0x8
@@ -173,7 +175,8 @@
 	void (*dump_nic_error_log)(struct iwl_priv *priv);
 	void (*dump_csr)(struct iwl_priv *priv);
 	int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
-	int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
+	int (*set_channel_switch)(struct iwl_priv *priv,
+				  struct ieee80211_channel_switch *ch_switch);
 	/* power management */
 	struct iwl_apm_ops apm_ops;
 
@@ -193,6 +196,7 @@
 	/* station management */
 	int (*manage_ibss_station)(struct iwl_priv *priv,
 				   struct ieee80211_vif *vif, bool add);
+	int (*update_bcast_station)(struct iwl_priv *priv);
 	/* recover from tx queue stall */
 	void (*recover_from_tx_stall)(unsigned long data);
 	/* check for plcp health */
@@ -325,7 +329,9 @@
 	const bool ucode_tracing;
 	const bool sensitivity_calib_by_driver;
 	const bool chain_noise_calib_by_driver;
-	u8 scan_antennas[IEEE80211_NUM_BANDS];
+	u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+	u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
+	const bool need_dc_calib;
 };
 
 /***************************
@@ -343,6 +349,9 @@
 int iwl_full_rxon_required(struct iwl_priv *priv);
 void iwl_set_rxon_chain(struct iwl_priv *priv);
 int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
+void iwl_set_flags_for_band(struct iwl_priv *priv,
+			    enum ieee80211_band band,
+			    struct ieee80211_vif *vif);
 u8 iwl_get_single_channel_number(struct iwl_priv *priv,
 				  enum ieee80211_band band);
 void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
@@ -350,6 +359,7 @@
 			 struct ieee80211_sta_ht_cap *sta_ht_inf);
 void iwl_connection_init_rx_config(struct iwl_priv *priv,
 				   struct ieee80211_vif *vif);
+void iwl_set_rate(struct iwl_priv *priv);
 int iwl_set_decrypted_flag(struct iwl_priv *priv,
 			   struct ieee80211_hdr *hdr,
 			   u32 decrypt_res,
@@ -447,20 +457,11 @@
 int iwl_rx_queue_space(const struct iwl_rx_queue *q);
 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
 /* Handlers */
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
-			       struct iwl_rx_mem_buffer *rxb);
 void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
 					  struct iwl_rx_mem_buffer *rxb);
-bool iwl_good_plcp_health(struct iwl_priv *priv,
-				 struct iwl_rx_packet *pkt);
-bool iwl_good_ack_health(struct iwl_priv *priv,
-				 struct iwl_rx_packet *pkt);
 void iwl_recover_from_statistics(struct iwl_priv *priv,
 				struct iwl_rx_packet *pkt);
-void iwl_rx_statistics(struct iwl_priv *priv,
-			      struct iwl_rx_mem_buffer *rxb);
-void iwl_reply_statistics(struct iwl_priv *priv,
-			  struct iwl_rx_mem_buffer *rxb);
+void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
 void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
 
 /* TX helpers */
@@ -474,8 +475,6 @@
 				 dma_addr_t addr, u16 len, u8 reset, u8 pad);
 int iwl_hw_tx_queue_init(struct iwl_priv *priv,
 			 struct iwl_tx_queue *txq);
-void iwl_free_tfds_in_queue(struct iwl_priv *priv,
-			    int sta_id, int tid, int freed);
 void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
 		      int slots_num, u32 txq_id);
@@ -495,7 +494,7 @@
 
 u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
 
-u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx);
+u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
 
 static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
 {
@@ -528,7 +527,7 @@
 void iwl_internal_short_hw_scan(struct iwl_priv *priv);
 int iwl_force_reset(struct iwl_priv *priv, int mode);
 u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
-		       const u8 *ie, int ie_len, int left);
+		       const u8 *ta, const u8 *ie, int ie_len, int left);
 void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
 u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
 			      enum ieee80211_band band,
@@ -595,6 +594,9 @@
 }
 
 void iwl_bg_monitor_recover(unsigned long data);
+u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
+__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
+			   u32 addon, u32 beacon_interval);
 
 #ifdef CONFIG_PM
 int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
@@ -693,7 +695,6 @@
 extern void iwl_send_bt_config(struct iwl_priv *priv);
 extern int iwl_send_statistics_request(struct iwl_priv *priv,
 				       u8 flags, bool clear);
-extern int iwl_verify_ucode(struct iwl_priv *priv);
 extern int iwl_send_lq_cmd(struct iwl_priv *priv,
 		struct iwl_link_quality_cmd *lq, u8 flags, bool init);
 void iwl_apm_stop(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 9659c5d..7d9ffc1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -106,27 +106,6 @@
 	.open = iwl_dbgfs_open_file_generic,                            \
 };
 
-int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
-	int p = 0;
-
-	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
-		       le32_to_cpu(priv->statistics.flag));
-	if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
-		p += scnprintf(buf + p, bufsz - p,
-			       "\tStatistics have been cleared\n");
-	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
-		       (le32_to_cpu(priv->statistics.flag) &
-			UCODE_STATISTICS_FREQUENCY_MSK)
-			? "2.4 GHz" : "5.2 GHz");
-	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
-		       (le32_to_cpu(priv->statistics.flag) &
-			UCODE_STATISTICS_NARROW_BAND_MSK)
-			? "enabled" : "disabled");
-	return p;
-}
-EXPORT_SYMBOL(iwl_dbgfs_statistics_flag);
-
 static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
 						char __user *user_buf,
 						size_t count, loff_t *ppos) {
@@ -330,45 +309,35 @@
 
 	for (i = 0; i < max_sta; i++) {
 		station = &priv->stations[i];
-		if (station->used) {
-			pos += scnprintf(buf + pos, bufsz - pos,
-					"station %d:\ngeneral data:\n", i+1);
-			pos += scnprintf(buf + pos, bufsz - pos, "id: %u\n",
-					station->sta.sta.sta_id);
-			pos += scnprintf(buf + pos, bufsz - pos, "mode: %u\n",
-					station->sta.mode);
-			pos += scnprintf(buf + pos, bufsz - pos,
-					"flags: 0x%x\n",
-					station->sta.station_flags_msk);
-			pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n");
-			pos += scnprintf(buf + pos, bufsz - pos,
-					"seq_num\t\ttxq_id");
-			pos += scnprintf(buf + pos, bufsz - pos,
-					"\tframe_count\twait_for_ba\t");
-			pos += scnprintf(buf + pos, bufsz - pos,
-					"start_idx\tbitmap0\t");
-			pos += scnprintf(buf + pos, bufsz - pos,
-					"bitmap1\trate_n_flags");
-			pos += scnprintf(buf + pos, bufsz - pos, "\n");
+		if (!station->used)
+			continue;
+		pos += scnprintf(buf + pos, bufsz - pos,
+				 "station %d - addr: %pM, flags: %#x\n",
+				 i, station->sta.sta.addr,
+				 station->sta.station_flags_msk);
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"TID\tseq_num\ttxq_id\tframes\ttfds\t");
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"start_idx\tbitmap\t\t\trate_n_flags\n");
 
-			for (j = 0; j < MAX_TID_COUNT; j++) {
+		for (j = 0; j < MAX_TID_COUNT; j++) {
+			pos += scnprintf(buf + pos, bufsz - pos,
+				"%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+				j, station->tid[j].seq_number,
+				station->tid[j].agg.txq_id,
+				station->tid[j].agg.frame_count,
+				station->tid[j].tfds_in_queue,
+				station->tid[j].agg.start_idx,
+				station->tid[j].agg.bitmap,
+				station->tid[j].agg.rate_n_flags);
+
+			if (station->tid[j].agg.wait_for_ba)
 				pos += scnprintf(buf + pos, bufsz - pos,
-						"[%d]:\t\t%u", j,
-						station->tid[j].seq_number);
-				pos += scnprintf(buf + pos, bufsz - pos,
-						"\t%u\t\t%u\t\t%u\t\t",
-						station->tid[j].agg.txq_id,
-						station->tid[j].agg.frame_count,
-						station->tid[j].agg.wait_for_ba);
-				pos += scnprintf(buf + pos, bufsz - pos,
-						"%u\t%llu\t%u",
-						station->tid[j].agg.start_idx,
-						(unsigned long long)station->tid[j].agg.bitmap,
-						station->tid[j].agg.rate_n_flags);
-				pos += scnprintf(buf + pos, bufsz - pos, "\n");
-			}
+						 " - waitforba");
 			pos += scnprintf(buf + pos, bufsz - pos, "\n");
 		}
+
+		pos += scnprintf(buf + pos, bufsz - pos, "\n");
 	}
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -1049,8 +1018,13 @@
 						rxq->write);
 	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
 						rxq->free_count);
-	pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+	if (rxq->rb_stts) {
+		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
 			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
+	} else {
+		pos += scnprintf(buf + pos, bufsz - pos,
+					"closed_rb_num: Not Allocated\n");
+	}
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
@@ -1456,10 +1430,10 @@
 		return -EFAULT;
 	if (sscanf(buf, "%d", &plcp) != 1)
 		return -EINVAL;
-	if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+	if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
 		(plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
 		priv->cfg->plcp_delta_threshold =
-			IWL_MAX_PLCP_ERR_THRESHOLD_DEF;
+			IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
 	else
 		priv->cfg->plcp_delta_threshold = plcp;
 	return count;
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index f3f3473..728752a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -48,25 +48,6 @@
 #include "iwl-power.h"
 #include "iwl-agn-rs.h"
 
-/* configuration for the iwl4965 */
-extern struct iwl_cfg iwl4965_agn_cfg;
-extern struct iwl_cfg iwl5300_agn_cfg;
-extern struct iwl_cfg iwl5100_agn_cfg;
-extern struct iwl_cfg iwl5350_agn_cfg;
-extern struct iwl_cfg iwl5100_bgn_cfg;
-extern struct iwl_cfg iwl5100_abg_cfg;
-extern struct iwl_cfg iwl5150_agn_cfg;
-extern struct iwl_cfg iwl5150_abg_cfg;
-extern struct iwl_cfg iwl6000g2a_2agn_cfg;
-extern struct iwl_cfg iwl6000i_2agn_cfg;
-extern struct iwl_cfg iwl6000i_2abg_cfg;
-extern struct iwl_cfg iwl6000i_2bg_cfg;
-extern struct iwl_cfg iwl6000_3agn_cfg;
-extern struct iwl_cfg iwl6050_2agn_cfg;
-extern struct iwl_cfg iwl6050_2abg_cfg;
-extern struct iwl_cfg iwl1000_bgn_cfg;
-extern struct iwl_cfg iwl1000_bg_cfg;
-
 struct iwl_tx_queue;
 
 /* CT-KILL constants */
@@ -133,8 +114,8 @@
 	 * structure is stored at the end of the shared queue memory. */
 	u32 flags;
 
-	DECLARE_PCI_UNMAP_ADDR(mapping)
-	DECLARE_PCI_UNMAP_LEN(len)
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+	DEFINE_DMA_UNMAP_LEN(len);
 };
 
 /*
@@ -157,11 +138,11 @@
 				* space more than this */
 	int high_mark;         /* high watermark, stop queue if free
 				* space less than this */
-} __attribute__ ((packed));
+} __packed;
 
 /* One for each TFD */
 struct iwl_tx_info {
-	struct sk_buff *skb[IWL_NUM_OF_TBS - 1];
+	struct sk_buff *skb;
 };
 
 /**
@@ -343,8 +324,8 @@
 		struct iwl_tx_cmd tx;
 		struct iwl6000_channel_switch_cmd chswitch;
 		u8 payload[DEF_CMD_PAYLOAD_SIZE];
-	} __attribute__ ((packed)) cmd;
-} __attribute__ ((packed));
+	} __packed cmd;
+} __packed;
 
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
 
@@ -367,7 +348,7 @@
 /**
  * struct iwl_rx_queue - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @dma_addr: bus address of buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
@@ -381,7 +362,7 @@
  */
 struct iwl_rx_queue {
 	__le32 *bd;
-	dma_addr_t dma_addr;
+	dma_addr_t bd_dma;
 	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
 	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
 	u32 read;
@@ -433,7 +414,7 @@
 
 
 struct iwl_tid_data {
-	u16 seq_number;
+	u16 seq_number; /* agn only */
 	u16 tfds_in_queue;
 	struct iwl_ht_agg agg;
 };
@@ -583,6 +564,12 @@
 	IWL_UCODE_TLV_INIT_DATA		= 4,
 	IWL_UCODE_TLV_BOOT		= 5,
 	IWL_UCODE_TLV_PROBE_MAX_LEN	= 6, /* a u32 value */
+	IWL_UCODE_TLV_RUNT_EVTLOG_PTR	= 8,
+	IWL_UCODE_TLV_RUNT_EVTLOG_SIZE	= 9,
+	IWL_UCODE_TLV_RUNT_ERRLOG_PTR	= 10,
+	IWL_UCODE_TLV_INIT_EVTLOG_PTR	= 11,
+	IWL_UCODE_TLV_INIT_EVTLOG_SIZE	= 12,
+	IWL_UCODE_TLV_INIT_ERRLOG_PTR	= 13,
 };
 
 struct iwl_ucode_tlv {
@@ -590,7 +577,7 @@
 	__le16 alternative;	/* see comment */
 	__le32 length;		/* not including type/length fields */
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 #define IWL_TLV_UCODE_MAGIC	0x0a4c5749
 
@@ -675,6 +662,7 @@
  * @sw_crypto: 0 for hw, 1 for sw
  * @max_xxx_size: for ucode uses
  * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
  * @calib_init_cfg: setup initial calibrations for the hw
  * @struct iwl_sensitivity_ranges: range of sensitivity values
  */
@@ -701,6 +689,7 @@
 	u32 ct_kill_threshold; /* value in hw-dependent units */
 	u32 ct_kill_exit_threshold; /* value in hw-dependent units */
 				    /* for 1000, 6000 series and up */
+	u16 beacon_time_tsf_bits;
 	u32 calib_init_cfg;
 	const struct iwl_sensitivity_ranges *sens;
 };
@@ -1047,11 +1036,12 @@
  * This is the threshold value of plcp error rate per 100mSecs.  It is
  * used to set and check for the validity of plcp_delta.
  */
-#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN	(0)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN	(1)
 #define IWL_MAX_PLCP_ERR_THRESHOLD_DEF	(50)
 #define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF	(100)
 #define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF	(200)
 #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX	(255)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE	(0)
 
 #define IWL_DELAY_NEXT_FORCE_RF_RESET  (HZ*3)
 #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
@@ -1075,6 +1065,20 @@
 	unsigned long last_force_reset_jiffies;
 };
 
+/* extend beacon time format bit shifting  */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0  - interval
+ */
+#define IWL3945_EXT_BEACON_TIME_POS	24
+/*
+ * for _agn devices
+ * bits 31:22 - extended
+ * bits 21:0  - interval
+ */
+#define IWLAGN_EXT_BEACON_TIME_POS	22
+
 struct iwl_priv {
 
 	/* ieee device used by generic ieee processing code */
@@ -1109,7 +1113,7 @@
 	/* force reset */
 	struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
 
-	/* we allocate array of iwl4965_channel_info for NIC's valid channels.
+	/* we allocate array of iwl_channel_info for NIC's valid channels.
 	 *    Access via channel # using indirect index array */
 	struct iwl_channel_info *channel_info;	/* channel info array */
 	u8 channel_count;	/* # of channels */
@@ -1127,6 +1131,7 @@
 	void *scan_cmd;
 	enum ieee80211_band scan_band;
 	struct cfg80211_scan_request *scan_request;
+	struct ieee80211_vif *scan_vif;
 	bool is_internal_short_scan;
 	u8 scan_tx_ant[IEEE80211_NUM_BANDS];
 	u8 mgmt_tx_ant;
@@ -1174,7 +1179,7 @@
 	struct iwl_switch_rxon switch_rxon;
 
 	/* 1st responses from initialize and runtime uCode images.
-	 * 4965's initialize alive response contains some calibration data. */
+	 * _agn's initialize alive response contains some calibration data. */
 	struct iwl_init_alive_resp card_alive_init;
 	struct iwl_alive_resp card_alive;
 
@@ -1220,18 +1225,12 @@
 	struct iwl_power_mgr power_data;
 	struct iwl_tt_mgmt thermal_throttle;
 
-	struct iwl_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_notif_statistics accum_statistics;
-	struct iwl_notif_statistics delta_statistics;
-	struct iwl_notif_statistics max_delta;
-#endif
-
 	/* context information */
 	u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
-	u8 mac_addr[ETH_ALEN];
 
-	/*station table variables */
+	/* station table variables */
+
+	/* Note: if lock and sta_lock are needed, lock must be acquired first */
 	spinlock_t sta_lock;
 	int num_stations;
 	struct iwl_station_entry stations[IWL_STATION_COUNT];
@@ -1273,7 +1272,7 @@
 			struct delayed_work rfkill_poll;
 
 			struct iwl3945_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
 			struct iwl3945_notif_statistics accum_statistics;
 			struct iwl3945_notif_statistics delta_statistics;
 			struct iwl3945_notif_statistics max_delta;
@@ -1315,6 +1314,16 @@
 			bool last_phy_res_valid;
 
 			struct completion firmware_loading_complete;
+
+			u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+			u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+
+			struct iwl_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+			struct iwl_notif_statistics accum_statistics;
+			struct iwl_notif_statistics delta_statistics;
+			struct iwl_notif_statistics max_delta;
+#endif
 		} _agn;
 #endif
 	};
@@ -1353,9 +1362,7 @@
 	/* debugging info */
 	u32 debug_level; /* per device debugging will override global
 			    iwl_debug_level if set */
-	u32 framecnt_to_us;
-	atomic_t restrict_refcnt;
-	bool disable_ht40;
+#endif /* CONFIG_IWLWIFI_DEBUG */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	/* debugfs */
 	u16 tx_traffic_idx;
@@ -1364,8 +1371,8 @@
 	u8 *rx_traffic;
 	struct dentry *debugfs_dir;
 	u32 dbgfs_sram_offset, dbgfs_sram_len;
+	bool disable_ht40;
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
-#endif /* CONFIG_IWLWIFI_DEBUG */
 
 	struct work_struct txpower_work;
 	u32 disable_sens_cal;
@@ -1419,9 +1426,9 @@
 static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
 							 int txq_id, int idx)
 {
-	if (priv->txq[txq_id].txb[idx].skb[0])
+	if (priv->txq[txq_id].txb[idx].skb)
 		return (struct ieee80211_hdr *)priv->txq[txq_id].
-				txb[idx].skb[0]->data;
+				txb[idx].skb->data;
 	return NULL;
 }
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index ee11452..a45d02e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -629,6 +629,9 @@
 	    calib_ver < priv->cfg->eeprom_calib_ver)
 		goto err;
 
+	IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+		 eeprom_ver, calib_ver);
+
 	return 0;
 err:
 	IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 95aa202..5488006 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -118,7 +118,7 @@
 struct iwl_eeprom_channel {
 	u8 flags;		/* EEPROM_CHANNEL_* flags copied from EEPROM */
 	s8 max_power_avg;	/* max power (dBm) on this chnl, limit 31 */
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * iwl_eeprom_enhanced_txpwr structure
@@ -144,7 +144,7 @@
 	s8 reserved;
 	s8 mimo2_max;
 	s8 mimo3_max;
-} __attribute__ ((packed));
+} __packed;
 
 /* 3945 Specific */
 #define EEPROM_3945_EEPROM_VERSION	(0x2f)
@@ -312,7 +312,7 @@
 	u8 gain_idx;		/* Index into gain table */
 	u8 actual_pow;		/* Measured RF output power, half-dBm */
 	s8 pa_det;		/* Power amp detector level (not used) */
-} __attribute__ ((packed));
+} __packed;
 
 
 /*
@@ -328,7 +328,7 @@
 	struct iwl_eeprom_calib_measure
 		measurements[EEPROM_TX_POWER_TX_CHAINS]
 			[EEPROM_TX_POWER_MEASUREMENTS];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * txpower subband info.
@@ -345,7 +345,7 @@
 	u8 ch_to;	/* channel number of highest channel in subband */
 	struct iwl_eeprom_calib_ch_info ch1;
 	struct iwl_eeprom_calib_ch_info ch2;
-} __attribute__ ((packed));
+} __packed;
 
 
 /*
@@ -374,7 +374,7 @@
 	__le16 voltage;		/* signed */
 	struct iwl_eeprom_calib_subband_info
 		band_info[EEPROM_TX_POWER_BANDS];
-} __attribute__ ((packed));
+} __packed;
 
 
 #define ADDRESS_MSK                 0x0000FFFF
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 113c366..a3fcbb5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -449,7 +449,7 @@
 	__le16 finished_rb_num;
 	__le16 finished_fr_nam;
 	__le32 __unused; /* 3945 only */
-} __attribute__ ((packed));
+} __packed;
 
 
 #define TFD_QUEUE_SIZE_MAX      (256)
@@ -475,7 +475,7 @@
 struct iwl_tfd_tb {
 	__le32 lo;
 	__le16 hi_n_len;
-} __attribute__((packed));
+} __packed;
 
 /**
  * struct iwl_tfd
@@ -510,7 +510,7 @@
 	u8 num_tbs;
 	struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
 	__le32 __pad;
-} __attribute__ ((packed));
+} __packed;
 
 /* Keep Warm Size */
 #define IWL_KW_SIZE 0x1000	/* 4k */
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 3ff6b9d..621abe3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -92,6 +92,11 @@
 static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
 				    struct fw_desc *desc)
 {
+	if (!desc->len) {
+		desc->v_addr = NULL;
+		return -EINVAL;
+	}
+
 	desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
 					  &desc->p_addr, GFP_KERNEL);
 	return (desc->v_addr != NULL) ? 0 : -ENOMEM;
@@ -170,4 +175,26 @@
 	iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
 }
 
+/**
+ * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
+					   u16 tsf_bits)
+{
+	return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
+					    u16 tsf_bits)
+{
+	return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
 #endif				/* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 0a5d7cf..b437f31 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -175,7 +175,7 @@
 	INIT_LIST_HEAD(&rxq->rx_used);
 
 	/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
-	rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
+	rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
 				     GFP_KERNEL);
 	if (!rxq->bd)
 		goto err_bd;
@@ -199,32 +199,12 @@
 
 err_rb:
 	dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-			  rxq->dma_addr);
+			  rxq->bd_dma);
 err_bd:
 	return -ENOMEM;
 }
 EXPORT_SYMBOL(iwl_rx_queue_alloc);
 
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb)
-
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_missed_beacon_notif *missed_beacon;
-
-	missed_beacon = &pkt->u.missed_beacon;
-	if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
-	    priv->missed_beacon_threshold) {
-		IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
-		    le32_to_cpu(missed_beacon->consecutive_missed_beacons),
-		    le32_to_cpu(missed_beacon->total_missed_becons),
-		    le32_to_cpu(missed_beacon->num_recvd_beacons),
-		    le32_to_cpu(missed_beacon->num_expected_beacons));
-		if (!test_bit(STATUS_SCANNING, &priv->status))
-			iwl_init_sensitivity(priv);
-	}
-}
-EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
 
 void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
 					  struct iwl_rx_mem_buffer *rxb)
@@ -243,161 +223,6 @@
 }
 EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
 
-
-
-/* Calculate noise level, based on measurements during network silence just
- *   before arriving beacon.  This measurement can be done only if we know
- *   exactly when to expect beacons, therefore only when we're associated. */
-static void iwl_rx_calc_noise(struct iwl_priv *priv)
-{
-	struct statistics_rx_non_phy *rx_info
-				= &(priv->statistics.rx.general);
-	int num_active_rx = 0;
-	int total_silence = 0;
-	int bcn_silence_a =
-		le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
-	int bcn_silence_b =
-		le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
-	int bcn_silence_c =
-		le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
-	int last_rx_noise;
-
-	if (bcn_silence_a) {
-		total_silence += bcn_silence_a;
-		num_active_rx++;
-	}
-	if (bcn_silence_b) {
-		total_silence += bcn_silence_b;
-		num_active_rx++;
-	}
-	if (bcn_silence_c) {
-		total_silence += bcn_silence_c;
-		num_active_rx++;
-	}
-
-	/* Average among active antennas */
-	if (num_active_rx)
-		last_rx_noise = (total_silence / num_active_rx) - 107;
-	else
-		last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
-	IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
-			bcn_silence_a, bcn_silence_b, bcn_silence_c,
-			last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-/*
- *  based on the assumption of all statistics counter are in DWORD
- *  FIXME: This function is for debugging, do not deal with
- *  the case of counters roll-over.
- */
-static void iwl_accumulative_statistics(struct iwl_priv *priv,
-					__le32 *stats)
-{
-	int i;
-	__le32 *prev_stats;
-	u32 *accum_stats;
-	u32 *delta, *max_delta;
-
-	prev_stats = (__le32 *)&priv->statistics;
-	accum_stats = (u32 *)&priv->accum_statistics;
-	delta = (u32 *)&priv->delta_statistics;
-	max_delta = (u32 *)&priv->max_delta;
-
-	for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
-	     i += sizeof(__le32), stats++, prev_stats++, delta++,
-	     max_delta++, accum_stats++) {
-		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
-			*delta = (le32_to_cpu(*stats) -
-				le32_to_cpu(*prev_stats));
-			*accum_stats += *delta;
-			if (*delta > *max_delta)
-				*max_delta = *delta;
-		}
-	}
-
-	/* reset accumulative statistics for "no-counter" type statistics */
-	priv->accum_statistics.general.temperature =
-		priv->statistics.general.temperature;
-	priv->accum_statistics.general.temperature_m =
-		priv->statistics.general.temperature_m;
-	priv->accum_statistics.general.ttl_timestamp =
-		priv->statistics.general.ttl_timestamp;
-	priv->accum_statistics.tx.tx_power.ant_a =
-		priv->statistics.tx.tx_power.ant_a;
-	priv->accum_statistics.tx.tx_power.ant_b =
-		priv->statistics.tx.tx_power.ant_b;
-	priv->accum_statistics.tx.tx_power.ant_c =
-		priv->statistics.tx.tx_power.ant_c;
-}
-#endif
-
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-bool iwl_good_plcp_health(struct iwl_priv *priv,
-				struct iwl_rx_packet *pkt)
-{
-	bool rc = true;
-	int combined_plcp_delta;
-	unsigned int plcp_msec;
-	unsigned long plcp_received_jiffies;
-
-	/*
-	 * check for plcp_err and trigger radio reset if it exceeds
-	 * the plcp error threshold plcp_delta.
-	 */
-	plcp_received_jiffies = jiffies;
-	plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
-					(long) priv->plcp_jiffies);
-	priv->plcp_jiffies = plcp_received_jiffies;
-	/*
-	 * check to make sure plcp_msec is not 0 to prevent division
-	 * by zero.
-	 */
-	if (plcp_msec) {
-		combined_plcp_delta =
-			(le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
-			le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
-			(le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
-			le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
-
-		if ((combined_plcp_delta > 0) &&
-		    ((combined_plcp_delta * 100) / plcp_msec) >
-			priv->cfg->plcp_delta_threshold) {
-			/*
-			 * if plcp_err exceed the threshold,
-			 * the following data is printed in csv format:
-			 *    Text: plcp_err exceeded %d,
-			 *    Received ofdm.plcp_err,
-			 *    Current ofdm.plcp_err,
-			 *    Received ofdm_ht.plcp_err,
-			 *    Current ofdm_ht.plcp_err,
-			 *    combined_plcp_delta,
-			 *    plcp_msec
-			 */
-			IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
-				"%u, %u, %u, %u, %d, %u mSecs\n",
-				priv->cfg->plcp_delta_threshold,
-				le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
-				le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
-				le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
-				le32_to_cpu(
-				  priv->statistics.rx.ofdm_ht.plcp_err),
-				combined_plcp_delta, plcp_msec);
-			rc = false;
-		}
-	}
-	return rc;
-}
-EXPORT_SYMBOL(iwl_good_plcp_health);
-
 void iwl_recover_from_statistics(struct iwl_priv *priv,
 				struct iwl_rx_packet *pkt)
 {
@@ -431,69 +256,6 @@
 }
 EXPORT_SYMBOL(iwl_recover_from_statistics);
 
-void iwl_rx_statistics(struct iwl_priv *priv,
-			      struct iwl_rx_mem_buffer *rxb)
-{
-	int change;
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-
-	IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
-		     (int)sizeof(priv->statistics),
-		     le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-
-	change = ((priv->statistics.general.temperature !=
-		   pkt->u.stats.general.temperature) ||
-		  ((priv->statistics.flag &
-		    STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
-		   (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-	iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-	iwl_recover_from_statistics(priv, pkt);
-
-	memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
-
-	set_bit(STATUS_STATISTICS, &priv->status);
-
-	/* Reschedule the statistics timer to occur in
-	 * REG_RECALIB_PERIOD seconds to ensure we get a
-	 * thermal update even if the uCode doesn't give
-	 * us one */
-	mod_timer(&priv->statistics_periodic, jiffies +
-		  msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
-
-	if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
-	    (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
-		iwl_rx_calc_noise(priv);
-		queue_work(priv->workqueue, &priv->run_time_calib_work);
-	}
-	if (priv->cfg->ops->lib->temp_ops.temperature && change)
-		priv->cfg->ops->lib->temp_ops.temperature(priv);
-}
-EXPORT_SYMBOL(iwl_rx_statistics);
-
-void iwl_reply_statistics(struct iwl_priv *priv,
-			      struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-	if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUG
-		memset(&priv->accum_statistics, 0,
-			sizeof(struct iwl_notif_statistics));
-		memset(&priv->delta_statistics, 0,
-			sizeof(struct iwl_notif_statistics));
-		memset(&priv->max_delta, 0,
-			sizeof(struct iwl_notif_statistics));
-#endif
-		IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
-	}
-	iwl_rx_statistics(priv, rxb);
-}
-EXPORT_SYMBOL(iwl_reply_statistics);
-
 /*
  * returns non-zero if packet should be dropped
  */
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 386c5f9..2a7c399 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -333,7 +333,8 @@
 		goto out_unlock;
 	}
 
-	if (test_bit(STATUS_SCANNING, &priv->status)) {
+	if (test_bit(STATUS_SCANNING, &priv->status) &&
+	    !priv->is_internal_short_scan) {
 		IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
 		ret = -EAGAIN;
 		goto out_unlock;
@@ -348,8 +349,16 @@
 	/* mac80211 will only ask for one band at a time */
 	priv->scan_band = req->channels[0]->band;
 	priv->scan_request = req;
+	priv->scan_vif = vif;
 
-	ret = iwl_scan_initiate(priv, vif);
+	/*
+	 * If an internal scan is in progress, just set
+	 * up the scan_request as per above.
+	 */
+	if (priv->is_internal_short_scan)
+		ret = 0;
+	else
+		ret = iwl_scan_initiate(priv, vif);
 
 	IWL_DEBUG_MAC80211(priv, "leave\n");
 
@@ -438,7 +447,7 @@
  */
 
 u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
-		       const u8 *ies, int ie_len, int left)
+		       const u8 *ta, const u8 *ies, int ie_len, int left)
 {
 	int len = 0;
 	u8 *pos = NULL;
@@ -451,7 +460,7 @@
 
 	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
 	memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
-	memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
+	memcpy(frame->sa, ta, ETH_ALEN);
 	memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
 	frame->seq_ctrl = 0;
 
@@ -514,7 +523,30 @@
 		priv->is_internal_short_scan = false;
 		IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
 		internal = true;
+	} else {
+		priv->scan_request = NULL;
+		priv->scan_vif = NULL;
 	}
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		goto out;
+
+	if (internal && priv->scan_request)
+		iwl_scan_initiate(priv, priv->scan_vif);
+
+	/* Since setting the TXPOWER may have been deferred while
+	 * performing the scan, fire one off */
+	iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+
+	/*
+	 * Since setting the RXON may have been deferred while
+	 * performing the scan, fire one off if needed
+	 */
+	if (memcmp(&priv->active_rxon,
+		   &priv->staging_rxon, sizeof(priv->staging_rxon)))
+		iwlcore_commit_rxon(priv);
+
+ out:
 	mutex_unlock(&priv->mutex);
 
 	/*
@@ -524,15 +556,6 @@
 	 */
 	if (!internal)
 		ieee80211_scan_completed(priv->hw, false);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	/* Since setting the TXPOWER may have been deferred while
-	 * performing the scan, fire one off */
-	mutex_lock(&priv->mutex);
-	iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
-	mutex_unlock(&priv->mutex);
 }
 EXPORT_SYMBOL(iwl_bg_scan_completed);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index af6babe..c4ca0b5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -42,7 +42,7 @@
 	__le64 start_time;
 	__le16 duration;
 	u8 map;
-} __attribute__ ((packed));
+} __packed;
 
 enum {				/* ieee80211_measurement_request.mode */
 	/* Bit 0 is reserved */
@@ -63,13 +63,13 @@
 	u8 channel;
 	__le64 start_time;
 	__le16 duration;
-} __attribute__ ((packed));
+} __packed;
 
 struct ieee80211_info_element {
 	u8 id;
 	u8 len;
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct ieee80211_measurement_request {
 	struct ieee80211_info_element ie;
@@ -77,7 +77,7 @@
 	u8 mode;
 	u8 type;
 	struct ieee80211_measurement_params params[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct ieee80211_measurement_report {
 	struct ieee80211_info_element ie;
@@ -87,6 +87,6 @@
 	union {
 		struct ieee80211_basic_report basic[0];
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
 #endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index c27c13f..9511f03 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -30,6 +30,7 @@
 #include <net/mac80211.h>
 #include <linux/etherdevice.h>
 #include <linux/sched.h>
+#include <linux/lockdep.h>
 
 #include "iwl-dev.h"
 #include "iwl-core.h"
@@ -54,18 +55,19 @@
 	}
 }
 
-static void iwl_process_add_sta_resp(struct iwl_priv *priv,
-				     struct iwl_addsta_cmd *addsta,
-				     struct iwl_rx_packet *pkt,
-				     bool sync)
+static int iwl_process_add_sta_resp(struct iwl_priv *priv,
+				    struct iwl_addsta_cmd *addsta,
+				    struct iwl_rx_packet *pkt,
+				    bool sync)
 {
 	u8 sta_id = addsta->sta.sta_id;
 	unsigned long flags;
+	int ret = -EIO;
 
 	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
 			pkt->hdr.flags);
-		return;
+		return ret;
 	}
 
 	IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
@@ -77,6 +79,7 @@
 	case ADD_STA_SUCCESS_MSK:
 		IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
 		iwl_sta_ucode_activate(priv, sta_id);
+		ret = 0;
 		break;
 	case ADD_STA_NO_ROOM_IN_TABLE:
 		IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
@@ -114,6 +117,8 @@
 		       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
 		       addsta->sta.addr);
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+	return ret;
 }
 
 static void iwl_add_sta_callback(struct iwl_priv *priv,
@@ -145,8 +150,10 @@
 
 	if (flags & CMD_ASYNC)
 		cmd.callback = iwl_add_sta_callback;
-	else
+	else {
 		cmd.flags |= CMD_WANT_SKB;
+		might_sleep();
+	}
 
 	cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
 	ret = iwl_send_cmd(priv, &cmd);
@@ -156,7 +163,7 @@
 
 	if (ret == 0) {
 		pkt = (struct iwl_rx_packet *)cmd.reply_page;
-		iwl_process_add_sta_resp(priv, sta, pkt, true);
+		ret = iwl_process_add_sta_resp(priv, sta, pkt, true);
 	}
 	iwl_free_pages(priv, cmd.reply_page);
 
@@ -311,10 +318,10 @@
 				  struct ieee80211_sta_ht_cap *ht_info,
 				  u8 *sta_id_r)
 {
-	struct iwl_station_entry *station;
 	unsigned long flags_spin;
 	int ret = 0;
 	u8 sta_id;
+	struct iwl_addsta_cmd sta_cmd;
 
 	*sta_id_r = 0;
 	spin_lock_irqsave(&priv->sta_lock, flags_spin);
@@ -347,14 +354,15 @@
 	}
 
 	priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
-	station = &priv->stations[sta_id];
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
 	spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
 
 	/* Add station to device's station table */
-	ret = iwl_send_add_sta(priv, &station->sta, CMD_SYNC);
+	ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 	if (ret) {
-		IWL_ERR(priv, "Adding station %pM failed.\n", station->sta.sta.addr);
 		spin_lock_irqsave(&priv->sta_lock, flags_spin);
+		IWL_ERR(priv, "Adding station %pM failed.\n",
+			priv->stations[sta_id].sta.sta.addr);
 		priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
 		priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
 		spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
@@ -488,7 +496,7 @@
 }
 
 static int iwl_send_remove_station(struct iwl_priv *priv,
-				   struct iwl_station_entry *station)
+				   const u8 *addr, int sta_id)
 {
 	struct iwl_rx_packet *pkt;
 	int ret;
@@ -505,7 +513,7 @@
 
 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
 	rm_sta_cmd.num_sta = 1;
-	memcpy(&rm_sta_cmd.addr, &station->sta.sta.addr , ETH_ALEN);
+	memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
 
 	cmd.flags |= CMD_WANT_SKB;
 
@@ -525,7 +533,7 @@
 		switch (pkt->u.rem_sta.status) {
 		case REM_STA_SUCCESS_MSK:
 			spin_lock_irqsave(&priv->sta_lock, flags_spin);
-			iwl_sta_ucode_deactivate(priv, station->sta.sta.sta_id);
+			iwl_sta_ucode_deactivate(priv, sta_id);
 			spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
 			IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
 			break;
@@ -546,7 +554,6 @@
 int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
 		       const u8 *addr)
 {
-	struct iwl_station_entry *station;
 	unsigned long flags;
 
 	if (!iwl_is_ready(priv)) {
@@ -592,10 +599,9 @@
 
 	BUG_ON(priv->num_stations < 0);
 
-	station = &priv->stations[sta_id];
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
-	return iwl_send_remove_station(priv, station);
+	return iwl_send_remove_station(priv, addr, sta_id);
 out_err:
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 	return -EINVAL;
@@ -643,11 +649,13 @@
  */
 void iwl_restore_stations(struct iwl_priv *priv)
 {
-	struct iwl_station_entry *station;
+	struct iwl_addsta_cmd sta_cmd;
+	struct iwl_link_quality_cmd lq;
 	unsigned long flags_spin;
 	int i;
 	bool found = false;
 	int ret;
+	bool send_lq;
 
 	if (!iwl_is_ready(priv)) {
 		IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
@@ -669,13 +677,20 @@
 
 	for (i = 0; i < priv->hw_params.max_stations; i++) {
 		if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+			memcpy(&sta_cmd, &priv->stations[i].sta,
+			       sizeof(struct iwl_addsta_cmd));
+			send_lq = false;
+			if (priv->stations[i].lq) {
+				memcpy(&lq, priv->stations[i].lq,
+				       sizeof(struct iwl_link_quality_cmd));
+				send_lq = true;
+			}
 			spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-			station = &priv->stations[i];
-			ret = iwl_send_add_sta(priv, &priv->stations[i].sta, CMD_SYNC);
+			ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 			if (ret) {
-				IWL_ERR(priv, "Adding station %pM failed.\n",
-					station->sta.sta.addr);
 				spin_lock_irqsave(&priv->sta_lock, flags_spin);
+				IWL_ERR(priv, "Adding station %pM failed.\n",
+					priv->stations[i].sta.sta.addr);
 				priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
 				priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
 				spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
@@ -684,8 +699,8 @@
 			 * Rate scaling has already been initialized, send
 			 * current LQ command
 			 */
-			if (station->lq)
-				iwl_send_lq_cmd(priv, station->lq, CMD_SYNC, true);
+			if (send_lq)
+				iwl_send_lq_cmd(priv, &lq, CMD_SYNC, true);
 			spin_lock_irqsave(&priv->sta_lock, flags_spin);
 			priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
 		}
@@ -823,7 +838,9 @@
 {
 	unsigned long flags;
 	__le16 key_flags = 0;
-	int ret;
+	struct iwl_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&priv->mutex);
 
 	keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
 
@@ -863,11 +880,10 @@
 	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
 
-	ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
-	return ret;
+	return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 
 static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
@@ -876,7 +892,9 @@
 {
 	unsigned long flags;
 	__le16 key_flags = 0;
-	int ret;
+	struct iwl_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&priv->mutex);
 
 	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
 	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -911,11 +929,10 @@
 	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
 
-	ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
-	return ret;
+	return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 
 static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -972,24 +989,16 @@
 	unsigned long flags;
 	int i;
 
-	if (sta) {
-		sta_id = iwl_sta_id(sta);
-
-		if (sta_id == IWL_INVALID_STATION) {
-			IWL_DEBUG_MAC80211(priv, "leave - %pM not initialised.\n",
-					   sta->addr);
-			return;
-		}
-	} else
-		sta_id = priv->hw_params.bcast_sta_id;
-
-
 	if (iwl_scan_cancel(priv)) {
 		/* cancel scan failed, just live w/ bad key and rely
 		   briefly on SW decryption */
 		return;
 	}
 
+	sta_id = iwl_sta_id_or_broadcast(priv, sta);
+	if (sta_id == IWL_INVALID_STATION)
+		return;
+
 	spin_lock_irqsave(&priv->sta_lock, flags);
 
 	priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
@@ -1013,9 +1022,11 @@
 				u8 sta_id)
 {
 	unsigned long flags;
-	int ret = 0;
 	u16 key_flags;
 	u8 keyidx;
+	struct iwl_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&priv->mutex);
 
 	priv->key_mapping_key--;
 
@@ -1062,9 +1073,10 @@
 		spin_unlock_irqrestore(&priv->sta_lock, flags);
 		return 0;
 	}
-	ret =  iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
-	return ret;
+
+	return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 EXPORT_SYMBOL(iwl_remove_dynamic_key);
 
@@ -1073,6 +1085,8 @@
 {
 	int ret;
 
+	lockdep_assert_held(&priv->mutex);
+
 	priv->key_mapping_key++;
 	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
 
@@ -1245,6 +1259,36 @@
 }
 EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
 
+/**
+ * iwl_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwlagn. Placed here to have all bcast station management
+ * code together.
+ */
+int iwl_update_bcast_station(struct iwl_priv *priv)
+{
+	unsigned long flags;
+	struct iwl_link_quality_cmd *link_cmd;
+	u8 sta_id = priv->hw_params.bcast_sta_id;
+
+	link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+	if (!link_cmd) {
+		IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&priv->sta_lock, flags);
+	if (priv->stations[sta_id].lq)
+		kfree(priv->stations[sta_id].lq);
+	else
+		IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
+	priv->stations[sta_id].lq = link_cmd;
+	spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iwl_update_bcast_station);
+
 void iwl_dealloc_bcast_station(struct iwl_priv *priv)
 {
 	unsigned long flags;
@@ -1268,18 +1312,22 @@
 /**
  * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
  */
-void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
+int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
 {
 	unsigned long flags;
+	struct iwl_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&priv->mutex);
 
 	/* Remove "disable" flag, to enable Tx for this TID */
 	spin_lock_irqsave(&priv->sta_lock, flags);
 	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
 	priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
-	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+	return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
 
@@ -1288,6 +1336,9 @@
 {
 	unsigned long flags;
 	int sta_id;
+	struct iwl_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&priv->mutex);
 
 	sta_id = iwl_sta_id(sta);
 	if (sta_id == IWL_INVALID_STATION)
@@ -1299,10 +1350,10 @@
 	priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
 	priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
-	return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
-				CMD_ASYNC);
+	return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 EXPORT_SYMBOL(iwl_sta_rx_agg_start);
 
@@ -1311,6 +1362,9 @@
 {
 	unsigned long flags;
 	int sta_id;
+	struct iwl_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&priv->mutex);
 
 	sta_id = iwl_sta_id(sta);
 	if (sta_id == IWL_INVALID_STATION) {
@@ -1323,10 +1377,10 @@
 	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
 	priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
-	return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
-					CMD_ASYNC);
+	return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 EXPORT_SYMBOL(iwl_sta_rx_agg_stop);
 
@@ -1340,9 +1394,9 @@
 	priv->stations[sta_id].sta.sta.modify_mask = 0;
 	priv->stations[sta_id].sta.sleep_tx_count = 0;
 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
-	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
 }
 EXPORT_SYMBOL(iwl_sta_modify_ps_wake);
 
@@ -1357,9 +1411,9 @@
 					STA_MODIFY_SLEEP_TX_COUNT_MSK;
 	priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
-	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
 }
 EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index c2a453a..ba95b1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -60,6 +60,7 @@
 void iwl_clear_ucode_stations(struct iwl_priv *priv);
 int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq);
 void iwl_dealloc_bcast_station(struct iwl_priv *priv);
+int iwl_update_bcast_station(struct iwl_priv *priv);
 int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
 int iwl_send_add_sta(struct iwl_priv *priv,
 		     struct iwl_addsta_cmd *sta, u8 flags);
@@ -73,7 +74,7 @@
 		       const u8 *addr);
 int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		       struct ieee80211_sta *sta);
-void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
+int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
 int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
 			 int tid, u16 ssn);
 int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -107,4 +108,33 @@
 
 	return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
 }
+
+/**
+ * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @priv: iwl priv
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
+					  struct ieee80211_sta *sta)
+{
+	int sta_id;
+
+	if (!sta)
+		return priv->hw_params.bcast_sta_id;
+
+	sta_id = iwl_sta_id(sta);
+
+	/*
+	 * mac80211 should not be passing a partially
+	 * initialised station!
+	 */
+	WARN_ON(sta_id == IWL_INVALID_STATION);
+
+	return sta_id;
+}
 #endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 1ece2ea..a81989c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -77,21 +77,6 @@
 }
 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
 
-
-void iwl_free_tfds_in_queue(struct iwl_priv *priv,
-			    int sta_id, int tid, int freed)
-{
-	if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
-		priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
-	else {
-		IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
-			priv->stations[sta_id].tid[tid].tfds_in_queue,
-			freed);
-		priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
-	}
-}
-EXPORT_SYMBOL(iwl_free_tfds_in_queue);
-
 /**
  * iwl_tx_queue_free - Deallocate DMA queue.
  * @txq: Transmit queue to deallocate.
@@ -169,15 +154,15 @@
 		}
 
 		pci_unmap_single(priv->pci_dev,
-				 pci_unmap_addr(&txq->meta[i], mapping),
-				 pci_unmap_len(&txq->meta[i], len),
+				 dma_unmap_addr(&txq->meta[i], mapping),
+				 dma_unmap_len(&txq->meta[i], len),
 				 PCI_DMA_BIDIRECTIONAL);
 	}
 	if (huge) {
 		i = q->n_window;
 		pci_unmap_single(priv->pci_dev,
-				 pci_unmap_addr(&txq->meta[i], mapping),
-				 pci_unmap_len(&txq->meta[i], len),
+				 dma_unmap_addr(&txq->meta[i], mapping),
+				 dma_unmap_len(&txq->meta[i], len),
 				 PCI_DMA_BIDIRECTIONAL);
 	}
 
@@ -287,7 +272,7 @@
 	/* Driver private data, only for Tx (not command) queues,
 	 * not shared with device. */
 	if (id != IWL_CMD_QUEUE_NUM) {
-		txq->txb = kmalloc(sizeof(txq->txb[0]) *
+		txq->txb = kzalloc(sizeof(txq->txb[0]) *
 				   TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
 		if (!txq->txb) {
 			IWL_ERR(priv, "kmalloc for auxiliary BD "
@@ -531,8 +516,8 @@
 
 	phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
 				   fix_size, PCI_DMA_BIDIRECTIONAL);
-	pci_unmap_addr_set(out_meta, mapping, phys_addr);
-	pci_unmap_len_set(out_meta, len, fix_size);
+	dma_unmap_addr_set(out_meta, mapping, phys_addr);
+	dma_unmap_len_set(out_meta, len, fix_size);
 
 	trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
 
@@ -626,8 +611,8 @@
 	meta = &txq->meta[cmd_index];
 
 	pci_unmap_single(priv->pci_dev,
-			 pci_unmap_addr(meta, mapping),
-			 pci_unmap_len(meta, len),
+			 dma_unmap_addr(meta, mapping),
+			 dma_unmap_len(meta, len),
 			 PCI_DMA_BIDIRECTIONAL);
 
 	/* Input error checking is done when commands are added to queue. */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index a27872d..8eb3471 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -197,6 +197,7 @@
 static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
 {
 	unsigned long flags;
+	struct iwl_addsta_cmd sta_cmd;
 
 	spin_lock_irqsave(&priv->sta_lock, flags);
 	memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
@@ -205,11 +206,11 @@
 	priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
 	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
 	spin_unlock_irqrestore(&priv->sta_lock, flags);
 
 	IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
-	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 0);
-	return 0;
+	return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 
 static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
@@ -474,10 +475,8 @@
 	u8 unicast;
 	u8 sta_id;
 	u8 tid = 0;
-	u16 seq_number = 0;
 	__le16 fc;
 	u8 wait_write_ptr = 0;
-	u8 *qc = NULL;
 	unsigned long flags;
 
 	spin_lock_irqsave(&priv->lock, flags);
@@ -510,10 +509,7 @@
 	hdr_len = ieee80211_hdrlen(fc);
 
 	/* Find index into station table for destination station */
-	if (!info->control.sta)
-		sta_id = priv->hw_params.bcast_sta_id;
-	else
-		sta_id = iwl_sta_id(info->control.sta);
+	sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
 	if (sta_id == IWL_INVALID_STATION) {
 		IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
 			       hdr->addr1);
@@ -523,16 +519,10 @@
 	IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
 
 	if (ieee80211_is_data_qos(fc)) {
-		qc = ieee80211_get_qos_ctl(hdr);
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
 		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
 		if (unlikely(tid >= MAX_TID_COUNT))
 			goto drop;
-		seq_number = priv->stations[sta_id].tid[tid].seq_number &
-				IEEE80211_SCTL_SEQ;
-		hdr->seq_ctrl = cpu_to_le16(seq_number) |
-			(hdr->seq_ctrl &
-				cpu_to_le16(IEEE80211_SCTL_FRAG));
-		seq_number += 0x10;
 	}
 
 	/* Descriptor for chosen Tx queue */
@@ -548,7 +538,7 @@
 
 	/* Set up driver data for this TFD */
 	memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
-	txq->txb[q->write_ptr].skb[0] = skb;
+	txq->txb[q->write_ptr].skb = skb;
 
 	/* Init first empty entry in queue's array of Tx/cmd buffers */
 	out_cmd = txq->cmd[idx];
@@ -591,8 +581,6 @@
 
 	if (!ieee80211_has_morefrags(hdr->frame_control)) {
 		txq->need_update = 1;
-		if (qc)
-			priv->stations[sta_id].tid[tid].seq_number = seq_number;
 	} else {
 		wait_write_ptr = 1;
 		txq->need_update = 0;
@@ -631,8 +619,8 @@
 				    len, PCI_DMA_TODEVICE);
 	/* we do not map meta data ... so we can safely access address to
 	 * provide to unmap command*/
-	pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
-	pci_unmap_len_set(out_meta, len, len);
+	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+	dma_unmap_len_set(out_meta, len, len);
 
 	/* Add buffer containing Tx command and MAC(!) header to TFD's
 	 * first entry */
@@ -677,55 +665,6 @@
 	return -1;
 }
 
-#define BEACON_TIME_MASK_LOW	0x00FFFFFF
-#define BEACON_TIME_MASK_HIGH	0xFF000000
-#define TIME_UNIT		1024
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in 8:24 format
- * the high 1 byte is the beacon counts
- * the lower 3 bytes is the time in usec within one beacon interval
- */
-
-static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval)
-{
-	u32 quot;
-	u32 rem;
-	u32 interval = beacon_interval * 1024;
-
-	if (!interval || !usec)
-		return 0;
-
-	quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
-	rem = (usec % interval) & BEACON_TIME_MASK_LOW;
-
-	return (quot << 24) + rem;
-}
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-
-static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
-{
-	u32 base_low = base & BEACON_TIME_MASK_LOW;
-	u32 addon_low = addon & BEACON_TIME_MASK_LOW;
-	u32 interval = beacon_interval * TIME_UNIT;
-	u32 res = (base & BEACON_TIME_MASK_HIGH) +
-	    (addon & BEACON_TIME_MASK_HIGH);
-
-	if (base_low > addon_low)
-		res += base_low - addon_low;
-	else if (base_low < addon_low) {
-		res += interval + base_low - addon_low;
-		res += (1 << 24);
-	} else
-		res += (1 << 24);
-
-	return cpu_to_le32(res);
-}
-
 static int iwl3945_get_measurement(struct iwl_priv *priv,
 			       struct ieee80211_measurement_params *params,
 			       u8 type)
@@ -743,8 +682,7 @@
 	int duration = le16_to_cpu(params->duration);
 
 	if (iwl_is_associated(priv))
-		add_time =
-		    iwl3945_usecs_to_beacons(
+		add_time = iwl_usecs_to_beacons(priv,
 			le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
 			le16_to_cpu(priv->rxon_timing.beacon_interval));
 
@@ -759,8 +697,8 @@
 
 	if (iwl_is_associated(priv))
 		spectrum.start_time =
-		    iwl3945_add_beacon_time(priv->_3945.last_beacon_time,
-				add_time,
+			iwl_add_beacon_time(priv,
+				priv->_3945.last_beacon_time, add_time,
 				le16_to_cpu(priv->rxon_timing.beacon_interval));
 	else
 		spectrum.start_time = 0;
@@ -1233,7 +1171,7 @@
 	}
 
 	dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-			  rxq->dma_addr);
+			  rxq->bd_dma);
 	dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
 			  rxq->rb_stts, rxq->rb_stts_dma);
 	rxq->bd = NULL;
@@ -1314,6 +1252,8 @@
 		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
 
 	while (i != r) {
+		int len;
+
 		rxb = rxq->queue[i];
 
 		/* If an RXB doesn't have a Rx queue slot associated with it,
@@ -1328,8 +1268,9 @@
 			       PCI_DMA_FROMDEVICE);
 		pkt = rxb_addr(rxb);
 
-		trace_iwlwifi_dev_rx(priv, pkt,
-			le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+		len += sizeof(u32); /* account for status word */
+		trace_iwlwifi_dev_rx(priv, pkt, len);
 
 		/* Reclaim a command buffer only if this packet is a response
 		 *   to a (driver-originated) command.
@@ -1483,7 +1424,7 @@
 		    iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
 
 		IWL_ERR(priv,
-			"%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+			"%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
 			desc_lookup(desc), desc, time, blink1, blink2,
 			ilink1, ilink2, data1);
 		trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
@@ -3022,14 +2963,16 @@
 		scan->tx_cmd.len = cpu_to_le16(
 			iwl_fill_probe_req(priv,
 				(struct ieee80211_mgmt *)scan->data,
+				vif->addr,
 				priv->scan_request->ie,
 				priv->scan_request->ie_len,
 				IWL_MAX_SCAN_SIZE - sizeof(*scan)));
 	} else {
+		/* use bcast addr, will not be transmitted but must be valid */
 		scan->tx_cmd.len = cpu_to_le16(
 			iwl_fill_probe_req(priv,
 				(struct ieee80211_mgmt *)scan->data,
-				NULL, 0,
+				iwl_bcast_addr, NULL, 0,
 				IWL_MAX_SCAN_SIZE - sizeof(*scan)));
 	}
 	/* select Rx antennas */
@@ -3158,19 +3101,16 @@
 	IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
 			vif->bss_conf.aid, vif->bss_conf.beacon_int);
 
-	if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+	if (vif->bss_conf.use_short_preamble)
 		priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
 	else
 		priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
 
 	if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
-		if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+		if (vif->bss_conf.use_short_slot)
 			priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
 		else
 			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
-		if (vif->type == NL80211_IFTYPE_ADHOC)
-			priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 	}
 
 	iwlcore_commit_rxon(priv);
@@ -3334,8 +3274,7 @@
 
 		priv->staging_rxon.assoc_id = 0;
 
-		if (vif->bss_conf.assoc_capability &
-					WLAN_CAPABILITY_SHORT_PREAMBLE)
+		if (vif->bss_conf.use_short_preamble)
 			priv->staging_rxon.flags |=
 				RXON_FLG_SHORT_PREAMBLE_MSK;
 		else
@@ -3343,17 +3282,12 @@
 				~RXON_FLG_SHORT_PREAMBLE_MSK;
 
 		if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
-			if (vif->bss_conf.assoc_capability &
-					WLAN_CAPABILITY_SHORT_SLOT_TIME)
+			if (vif->bss_conf.use_short_slot)
 				priv->staging_rxon.flags |=
 					RXON_FLG_SHORT_SLOT_MSK;
 			else
 				priv->staging_rxon.flags &=
 					~RXON_FLG_SHORT_SLOT_MSK;
-
-			if (vif->type == NL80211_IFTYPE_ADHOC)
-				priv->staging_rxon.flags &=
-					~RXON_FLG_SHORT_SLOT_MSK;
 		}
 		/* restore RXON assoc */
 		priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
@@ -3386,17 +3320,9 @@
 	static_key = !iwl_is_associated(priv);
 
 	if (!static_key) {
-		if (!sta) {
-			sta_id = priv->hw_params.bcast_sta_id;
-		} else {
-			sta_id = iwl_sta_id(sta);
-			if (sta_id == IWL_INVALID_STATION) {
-				IWL_DEBUG_MAC80211(priv,
-						   "leave - %pM not in station map.\n",
-						   sta->addr);
-				return -EINVAL;
-			}
-		}
+		sta_id = iwl_sta_id_or_broadcast(priv, sta);
+		if (sta_id == IWL_INVALID_STATION)
+			return -EINVAL;
 	}
 
 	mutex_lock(&priv->mutex);
@@ -4028,9 +3954,6 @@
 	priv->pci_dev = pdev;
 	priv->inta_mask = CSR_INI_SET_MASK;
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-	atomic_set(&priv->restrict_refcnt, 0);
-#endif
 	if (iwl_alloc_traffic_mem(priv))
 		IWL_ERR(priv, "Not enough memory to generate traffic log\n");
 
@@ -4099,9 +4022,8 @@
 	}
 	/* MAC Address location in EEPROM same for 3945/4965 */
 	eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-	memcpy(priv->mac_addr, eeprom->mac_address, ETH_ALEN);
-	IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr);
-	SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
+	IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
+	SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
 
 	/***********************
 	 * 5. Setup HW Constants
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 902e95f..6061967 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -670,20 +670,24 @@
 }
 
 static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
-				    enum tx_power_setting type, int dbm)
+				    enum nl80211_tx_power_setting type, int mbm)
 {
 	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
 	int ret;
 
 	switch (type) {
-	case TX_POWER_AUTOMATIC:
+	case NL80211_TX_POWER_AUTOMATIC:
 		return 0;
-	case TX_POWER_FIXED:
+	case NL80211_TX_POWER_FIXED:
+		if (mbm < 0 || (mbm % 100))
+			return -EOPNOTSUPP;
+
 		if (!test_bit(IWM_STATUS_READY, &iwm->status))
 			return 0;
 
 		ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
-					      CFG_TX_PWR_LIMIT_USR, dbm * 2);
+					      CFG_TX_PWR_LIMIT_USR,
+					      MBM_TO_DBM(mbm) * 2);
 		if (ret < 0)
 			return ret;
 
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 7e16bcf..6421689 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -56,7 +56,7 @@
 
 struct iwm_umac_cmd_reset {
 	__le32 flags;
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_PARAM_TBL_ORD_FIX    0x0
 #define UMAC_PARAM_TBL_ORD_VAR    0x1
@@ -220,37 +220,37 @@
 	__le16 tbl;
 	__le16 key;
 	__le32 value;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_cmd_set_param_var {
 	__le16 tbl;
 	__le16 key;
 	__le16 len;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_cmd_get_param {
 	__le16 tbl;
 	__le16 key;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_cmd_get_param_resp {
 	__le16 tbl;
 	__le16 key;
 	__le16 len;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_cmd_eeprom_proxy_hdr {
 	__le32 type;
 	__le32 offset;
 	__le32 len;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_cmd_eeprom_proxy {
 	struct iwm_umac_cmd_eeprom_proxy_hdr hdr;
 	u8 buf[0];
-} __attribute__ ((packed));
+} __packed;
 
 #define IWM_UMAC_CMD_EEPROM_TYPE_READ       0x1
 #define IWM_UMAC_CMD_EEPROM_TYPE_WRITE      0x2
@@ -267,13 +267,13 @@
 	u8 reserved;
 	u8 flags;
 	__le32 channels_mask;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_cmd_get_channel_list {
 	__le16 count;
 	__le16 reserved;
 	struct iwm_umac_channel_info ch[0];
-} __attribute__ ((packed));
+} __packed;
 
 
 /* UMAC WiFi interface commands */
@@ -304,7 +304,7 @@
 	u8 ssid_len;
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
 	u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_cmd_scan_request {
 	struct iwm_umac_wifi_if hdr;
@@ -314,7 +314,7 @@
 	u8 timeout; /* In seconds */
 	u8 reserved;
 	struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX];
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_CIPHER_TYPE_NONE		0xFF
 #define UMAC_CIPHER_TYPE_USE_GROUPCAST	0x00
@@ -357,7 +357,7 @@
 	u8 ucast_cipher;
 	u8 mcast_cipher;
 	u8 flags;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_ibss {
 	u8 beacon_interval;	/* in millisecond */
@@ -366,7 +366,7 @@
 	u8 band;
 	u8 channel;
 	u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_MODE_BSS	0
 #define UMAC_MODE_IBSS	1
@@ -385,13 +385,13 @@
 	__le16 flags;
 	u8 wireless_mode;
 	u8 bss_num;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_invalidate_profile {
 	struct iwm_umac_wifi_if hdr;
 	u8 reason;
 	u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 /* Encryption key commands */
 struct iwm_umac_key_wep40 {
@@ -400,7 +400,7 @@
 	u8 key[WLAN_KEY_LEN_WEP40];
 	u8 static_key;
 	u8 reserved[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_key_wep104 {
 	struct iwm_umac_wifi_if hdr;
@@ -408,7 +408,7 @@
 	u8 key[WLAN_KEY_LEN_WEP104];
 	u8 static_key;
 	u8 reserved[2];
-} __attribute__ ((packed));
+} __packed;
 
 #define IWM_TKIP_KEY_SIZE 16
 #define IWM_TKIP_MIC_SIZE 8
@@ -420,7 +420,7 @@
 	u8 tkip_key[IWM_TKIP_KEY_SIZE];
 	u8 mic_rx_key[IWM_TKIP_MIC_SIZE];
 	u8 mic_tx_key[IWM_TKIP_MIC_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_key_ccmp {
 	struct iwm_umac_wifi_if hdr;
@@ -428,27 +428,27 @@
 	u8 iv_count[6];
 	u8 reserved[2];
 	u8 key[WLAN_KEY_LEN_CCMP];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_key_remove {
 	struct iwm_umac_wifi_if hdr;
 	struct iwm_umac_key_hdr key_hdr;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_tx_key_id {
 	struct iwm_umac_wifi_if hdr;
 	u8 key_idx;
 	u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_pwr_trigger {
 	struct iwm_umac_wifi_if hdr;
 	__le32 reseved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_cmd_stats_req {
 	__le32 flags;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_cmd_stop_resume_tx {
 	u8 flags;
@@ -456,7 +456,7 @@
 	__le16 stop_resume_tid_msk;
 	__le16 last_seq_num[IWM_UMAC_TID_NR];
 	u16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 #define IWM_CMD_PMKID_ADD   1
 #define IWM_CMD_PMKID_DEL   2
@@ -468,7 +468,7 @@
 	u8 bssid[ETH_ALEN];
 	__le16 reserved;
 	u8 pmkid[WLAN_PMKID_LEN];
-} __attribute__ ((packed));
+} __packed;
 
 /* LMAC commands */
 int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index 9531b18..907ac89 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -54,7 +54,7 @@
  *   LMAC. If you look at LMAC commands you'll se that they
  *   are actually regular iwlwifi target commands encapsulated
  *   into a special UMAC command called UMAC passthrough.
- *   This is due to the fact the the host talks exclusively
+ *   This is due to the fact the host talks exclusively
  *   to the UMAC and so there needs to be a special UMAC
  *   command for talking to the LMAC.
  *   This is how a wifi command is layed out:
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 13266c3..51d7efa 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -162,7 +162,7 @@
 	u8 mac[ETH_ALEN];
 	u8 key_idx;
 	u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_key {
 	struct iwm_umac_key_hdr hdr;
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index a855a99..5ddcdf8 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -43,7 +43,7 @@
 	u8 id;
 	u8 flags;
 	__le16 seq_num;
-} __attribute__ ((packed));
+} __packed;
 
 /* LMAC commands */
 #define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK  0x1
@@ -54,23 +54,23 @@
 	__le32 send_res; /* 1 for sending back results */
 	__le32 apply_res; /* 1 for applying calibration results to HW */
 	__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_lmac_cal_cfg_status {
 	struct iwm_lmac_cal_cfg_elt init;
 	struct iwm_lmac_cal_cfg_elt periodic;
 	__le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_lmac_cal_cfg_cmd {
 	struct iwm_lmac_cal_cfg_status ucode_cfg;
 	struct iwm_lmac_cal_cfg_status driver_cfg;
 	__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_lmac_cal_cfg_resp {
 	__le32 status;
-} __attribute__ ((packed));
+} __packed;
 
 #define IWM_CARD_STATE_SW_HW_ENABLED	0x00
 #define IWM_CARD_STATE_HW_DISABLED	0x01
@@ -80,7 +80,7 @@
 
 struct iwm_lmac_card_state {
 	__le32 flags;
-} __attribute__ ((packed));
+} __packed;
 
 /**
  * COEX_PRIORITY_TABLE_CMD
@@ -131,7 +131,7 @@
 	u8 win_med_prio;
 	u8 reserved;
 	u8 flags;
-} __attribute__ ((packed));
+} __packed;
 
 #define COEX_FLAGS_STA_TABLE_VALID_MSK		0x1
 #define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK	0x4
@@ -142,7 +142,7 @@
 	u8 flags;
 	u8 reserved[3];
 	struct coex_event sta_prio[COEX_EVENTS_NUM];
-} __attribute__ ((packed));
+} __packed;
 
 /* Coexistence definitions
  *
@@ -192,7 +192,7 @@
 	u32 exit_threshold;
 	u32 reserved;
 	u32 entry_threshold;
-} __attribute__ ((packed));
+} __packed;
 
 
 /* LMAC OP CODES */
@@ -428,7 +428,7 @@
 	u8 first_grp;
 	u8 grp_num;
 	u8 all_data_valid;
-} __attribute__ ((packed));
+} __packed;
 
 #define IWM_LMAC_CALIB_FREQ_GROUPS_NR	7
 #define IWM_CALIB_FREQ_GROUPS_NR	5
@@ -437,20 +437,20 @@
 struct iwm_calib_rxiq_entry {
 	u16 ptam_postdist_ars;
 	u16 ptam_postdist_arc;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_calib_rxiq_group {
 	struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_lmac_calib_rxiq {
 	struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_calib_rxiq {
 	struct iwm_lmac_calib_hdr hdr;
 	struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR];
-} __attribute__ ((packed));
+} __packed;
 
 #define LMAC_STA_ID_SEED	0x0f
 #define LMAC_STA_ID_POS		0
@@ -463,7 +463,7 @@
 	u8 pa_integ_res_A[3];
 	u8 pa_integ_res_B[3];
 	u8 pa_integ_res_C[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_lmac_tx_resp {
 	u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */
@@ -479,6 +479,6 @@
 	u8 ra_tid;
 	__le16 frame_ctl;
 	__le32 status;
-} __attribute__ ((packed));
+} __packed;
 
 #endif
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index e1184de..c02fced 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -321,14 +321,14 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	ticket_node->ticket = kzalloc(sizeof(struct iwm_rx_ticket), GFP_KERNEL);
+	ticket_node->ticket = kmemdup(ticket, sizeof(struct iwm_rx_ticket),
+				      GFP_KERNEL);
 	if (!ticket_node->ticket) {
 		IWM_ERR(iwm, "Couldn't allocate RX ticket\n");
 		kfree(ticket_node);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	memcpy(ticket_node->ticket, ticket, sizeof(struct iwm_rx_ticket));
 	INIT_LIST_HEAD(&ticket_node->node);
 
 	return ticket_node;
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index 0cbba3e..4a137d3 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -42,19 +42,19 @@
 struct iwm_udma_in_hdr {
 	__le32 cmd;
 	__le32 size;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_udma_out_nonwifi_hdr {
 	__le32 cmd;
 	__le32 addr;
 	__le32 op1_sz;
 	__le32 op2;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_udma_out_wifi_hdr {
 	__le32 cmd;
 	__le32 meta_data;
-} __attribute__ ((packed));
+} __packed;
 
 /* Sequence numbering */
 #define UMAC_WIFI_SEQ_NUM_BASE		1
@@ -408,12 +408,12 @@
 	__le16 flags;
 	u8 payload_offset; /* includes: MAC header, pad, IV */
 	u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_rx_mpdu_hdr {
 	__le16 len;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /* UMAC SW WIFI API */
 
@@ -421,31 +421,31 @@
 	u8 cmd;
 	u8 flags;
 	__le16 seq_num;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_fw_cmd_hdr {
 	__le32 meta_data;
 	struct iwm_dev_cmd_hdr cmd;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_wifi_out_hdr {
 	struct iwm_udma_out_wifi_hdr hw_hdr;
 	struct iwm_umac_fw_cmd_hdr sw_hdr;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_nonwifi_out_hdr {
 	struct iwm_udma_out_nonwifi_hdr hw_hdr;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_wifi_in_hdr {
 	struct iwm_udma_in_hdr hw_hdr;
 	struct iwm_umac_fw_cmd_hdr sw_hdr;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_nonwifi_in_hdr {
 	struct iwm_udma_in_hdr hw_hdr;
 	__le32 time_stamp;
-} __attribute__ ((packed));
+} __packed;
 
 #define IWM_UMAC_PAGE_SIZE	0x200
 
@@ -521,7 +521,7 @@
 	u8 status;
 	u8 flags;
 	__le16 buf_size;
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_ROAM_REASON_FIRST_SELECTION	0x1
 #define UMAC_ROAM_REASON_AP_DEAUTH		0x2
@@ -535,7 +535,7 @@
 	__le32 roam_reason;
 	u8 bssid[ETH_ALEN];
 	u8 reserved[2];
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_ASSOC_COMPLETE_SUCCESS		0x0
 #define UMAC_ASSOC_COMPLETE_FAILURE		0x1
@@ -546,7 +546,7 @@
 	u8 bssid[ETH_ALEN];
 	u8 band;
 	u8 channel;
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT	0x0
 #define UMAC_PROFILE_INVALID_ROAM_TIMEOUT	0x1
@@ -556,7 +556,7 @@
 struct iwm_umac_notif_profile_invalidate {
 	struct iwm_umac_notif_wifi_if mlme_hdr;
 	__le32 reason;
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_SCAN_RESULT_SUCCESS  0x0
 #define UMAC_SCAN_RESULT_ABORTED  0x1
@@ -568,7 +568,7 @@
 	__le32 type;
 	__le32 result;
 	u8 seq_num;
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_OPCODE_ADD_MODIFY	0x0
 #define UMAC_OPCODE_REMOVE	0x1
@@ -582,7 +582,7 @@
 	u8 mac_addr[ETH_ALEN];
 	u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */
 	u8 flags;
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_BAND_2GHZ 0
 #define UMAC_BAND_5GHZ 1
@@ -601,7 +601,7 @@
 	s8 rssi;
 	u8 reserved;
 	u8 frame_buf[1];
-} __attribute__ ((packed));
+} __packed;
 
 #define IWM_BSS_REMOVE_INDEX_MSK           0x0fff
 #define IWM_BSS_REMOVE_FLAGS_MSK           0xfc00
@@ -614,13 +614,13 @@
 	struct iwm_umac_notif_wifi_if mlme_hdr;
 	__le32 count;
 	__le16 entries[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_notif_mgt_frame {
 	struct iwm_umac_notif_wifi_if mlme_hdr;
 	__le16 len;
 	u8 frame[1];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_notif_alive {
 	struct iwm_umac_wifi_in_hdr hdr;
@@ -630,13 +630,13 @@
 	__le16 reserved2;
 	__le16 page_grp_count;
 	__le32 page_grp_state[IWM_MACS_OUT_GROUPS];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_notif_init_complete {
 	struct iwm_umac_wifi_in_hdr hdr;
 	__le16 status;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /* error categories */
 enum {
@@ -667,12 +667,12 @@
 	__le32 dbm_buf_end;
 	__le32 dbm_buf_write_ptr;
 	__le32 dbm_buf_cycle_cnt;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_notif_error {
 	struct iwm_umac_wifi_in_hdr hdr;
 	struct iwm_fw_error_hdr err;
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS	0
 #define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED	0xff
@@ -687,20 +687,20 @@
 	struct iwm_umac_wifi_in_hdr hdr;
 	__le32 changes;
 	__le32 grp_info[IWM_MACS_OUT_GROUPS];
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_notif_wifi_status {
 	struct iwm_umac_wifi_in_hdr hdr;
 	__le16 status;
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct iwm_umac_notif_rx_ticket {
 	struct iwm_umac_wifi_in_hdr hdr;
 	u8 num_tickets;
 	u8 reserved[3];
 	struct iwm_rx_ticket tickets[1];
-} __attribute__ ((packed));
+} __packed;
 
 /* Tx/Rx rates window (number of max of last update window per second) */
 #define UMAC_NTF_RATE_SAMPLE_NR	4
@@ -758,7 +758,7 @@
 	__le32 roam_unassoc;
 	__le32 roam_deauth;
 	__le32 roam_ap_loadblance;
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_STOP_TX_FLAG    0x1
 #define UMAC_RESUME_TX_FLAG  0x2
@@ -770,7 +770,7 @@
 	u8 flags; /* UMAC_*_TX_FLAG_* */
 	u8 sta_id;
 	__le16 stop_resume_tid_msk; /* tid bitmask */
-} __attribute__ ((packed));
+} __packed;
 
 #define UMAC_MAX_NUM_PMKIDS 4
 
@@ -779,7 +779,7 @@
 	u8 oid;
 	u8 flags;
 	__le16 buf_size;
-} __attribute__ ((packed));
+} __packed;
 
 #define IWM_SEQ_NUM_HOST_MSK	0x0000
 #define IWM_SEQ_NUM_UMAC_MSK	0x4000
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index 45e870e..f7d01bf 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,4 +1,3 @@
-libertas-y += assoc.o
 libertas-y += cfg.o
 libertas-y += cmd.o
 libertas-y += cmdresp.o
@@ -6,9 +5,7 @@
 libertas-y += ethtool.o
 libertas-y += main.o
 libertas-y += rx.o
-libertas-y += scan.o
 libertas-y += tx.o
-libertas-y += wext.o
 libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
 
 usb8xxx-objs += if_usb.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
deleted file mode 100644
index aa06070e..0000000
--- a/drivers/net/wireless/libertas/assoc.c
+++ /dev/null
@@ -1,2264 +0,0 @@
-/* Copyright (C) 2006, Red Hat, Inc. */
-
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/ieee80211.h>
-#include <linux/if_arp.h>
-#include <linux/slab.h>
-#include <net/lib80211.h>
-
-#include "assoc.h"
-#include "decl.h"
-#include "host.h"
-#include "scan.h"
-#include "cmd.h"
-
-static const u8 bssid_any[ETH_ALEN]  __attribute__ ((aligned (2))) =
-	{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-static const u8 bssid_off[ETH_ALEN]  __attribute__ ((aligned (2))) =
-	{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-
-/* The firmware needs the following bits masked out of the beacon-derived
- * capability field when associating/joining to a BSS:
- *  9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused)
- */
-#define CAPINFO_MASK	(~(0xda00))
-
-/**
- * 802.11b/g supported bitrates (in 500Kb/s units)
- */
-u8 lbs_bg_rates[MAX_RATES] =
-    { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
-0x00, 0x00 };
-
-
-static int assoc_helper_wep_keys(struct lbs_private *priv,
-		struct assoc_request *assoc_req);
-
-/**
- *  @brief This function finds common rates between rates and card rates.
- *
- * It will fill common rates in rates as output if found.
- *
- * NOTE: Setting the MSB of the basic rates need to be taken
- *   care, either before or after calling this function
- *
- *  @param priv     A pointer to struct lbs_private structure
- *  @param rates       the buffer which keeps input and output
- *  @param rates_size  the size of rates buffer; new size of buffer on return,
- *                     which will be less than or equal to original rates_size
- *
- *  @return            0 on success, or -1 on error
- */
-static int get_common_rates(struct lbs_private *priv,
-	u8 *rates,
-	u16 *rates_size)
-{
-	int i, j;
-	u8 intersection[MAX_RATES];
-	u16 intersection_size;
-	u16 num_rates = 0;
-
-	intersection_size = min_t(u16, *rates_size, ARRAY_SIZE(intersection));
-
-	/* Allow each rate from 'rates' that is supported by the hardware */
-	for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && lbs_bg_rates[i]; i++) {
-		for (j = 0; j < intersection_size && rates[j]; j++) {
-			if (rates[j] == lbs_bg_rates[i])
-				intersection[num_rates++] = rates[j];
-		}
-	}
-
-	lbs_deb_hex(LBS_DEB_JOIN, "AP rates    ", rates, *rates_size);
-	lbs_deb_hex(LBS_DEB_JOIN, "card rates  ", lbs_bg_rates,
-			ARRAY_SIZE(lbs_bg_rates));
-	lbs_deb_hex(LBS_DEB_JOIN, "common rates", intersection, num_rates);
-	lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
-
-	if (!priv->enablehwauto) {
-		for (i = 0; i < num_rates; i++) {
-			if (intersection[i] == priv->cur_rate)
-				goto done;
-		}
-		lbs_pr_alert("Previously set fixed data rate %#x isn't "
-		       "compatible with the network.\n", priv->cur_rate);
-		return -1;
-	}
-
-done:
-	memset(rates, 0, *rates_size);
-	*rates_size = num_rates;
-	memcpy(rates, intersection, num_rates);
-	return 0;
-}
-
-
-/**
- *  @brief Sets the MSB on basic rates as the firmware requires
- *
- * Scan through an array and set the MSB for basic data rates.
- *
- *  @param rates     buffer of data rates
- *  @param len       size of buffer
- */
-static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
-{
-	int i;
-
-	for (i = 0; i < len; i++) {
-		if (rates[i] == 0x02 || rates[i] == 0x04 ||
-		    rates[i] == 0x0b || rates[i] == 0x16)
-			rates[i] |= 0x80;
-	}
-}
-
-
-static u8 iw_auth_to_ieee_auth(u8 auth)
-{
-	if (auth == IW_AUTH_ALG_OPEN_SYSTEM)
-		return 0x00;
-	else if (auth == IW_AUTH_ALG_SHARED_KEY)
-		return 0x01;
-	else if (auth == IW_AUTH_ALG_LEAP)
-		return 0x80;
-
-	lbs_deb_join("%s: invalid auth alg 0x%X\n", __func__, auth);
-	return 0;
-}
-
-/**
- *  @brief This function prepares the authenticate command.  AUTHENTICATE only
- *  sets the authentication suite for future associations, as the firmware
- *  handles authentication internally during the ASSOCIATE command.
- *
- *  @param priv      A pointer to struct lbs_private structure
- *  @param bssid     The peer BSSID with which to authenticate
- *  @param auth      The authentication mode to use (from wireless.h)
- *
- *  @return         0 or -1
- */
-static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth)
-{
-	struct cmd_ds_802_11_authenticate cmd;
-	int ret = -1;
-
-	lbs_deb_enter(LBS_DEB_JOIN);
-
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-	memcpy(cmd.bssid, bssid, ETH_ALEN);
-
-	cmd.authtype = iw_auth_to_ieee_auth(auth);
-
-	lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n", bssid, cmd.authtype);
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd);
-
-	lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
-	return ret;
-}
-
-
-int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
-			   struct assoc_request *assoc)
-{
-	struct cmd_ds_802_11_set_wep cmd;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	cmd.action = cpu_to_le16(cmd_action);
-
-	if (cmd_action == CMD_ACT_ADD) {
-		int i;
-
-		/* default tx key index */
-		cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
-					   CMD_WEP_KEY_INDEX_MASK);
-
-		/* Copy key types and material to host command structure */
-		for (i = 0; i < 4; i++) {
-			struct enc_key *pkey = &assoc->wep_keys[i];
-
-			switch (pkey->len) {
-			case KEY_LEN_WEP_40:
-				cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
-				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
-				lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
-				break;
-			case KEY_LEN_WEP_104:
-				cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
-				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
-				lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
-				break;
-			case 0:
-				break;
-			default:
-				lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
-					    i, pkey->len);
-				ret = -1;
-				goto done;
-				break;
-			}
-		}
-	} else if (cmd_action == CMD_ACT_REMOVE) {
-		/* ACT_REMOVE clears _all_ WEP keys */
-
-		/* default tx key index */
-		cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
-					   CMD_WEP_KEY_INDEX_MASK);
-		lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
-	}
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
-done:
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
-int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
-			      uint16_t *enable)
-{
-	struct cmd_ds_802_11_enable_rsn cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-	cmd.action = cpu_to_le16(cmd_action);
-
-	if (cmd_action == CMD_ACT_GET)
-		cmd.enable = 0;
-	else {
-		if (*enable)
-			cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
-		else
-			cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
-		lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
-	}
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
-	if (!ret && cmd_action == CMD_ACT_GET)
-		*enable = le16_to_cpu(cmd.enable);
-
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
-static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
-		struct enc_key *key)
-{
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	if (key->flags & KEY_INFO_WPA_ENABLED)
-		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
-	if (key->flags & KEY_INFO_WPA_UNICAST)
-		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
-	if (key->flags & KEY_INFO_WPA_MCAST)
-		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
-
-	keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
-	keyparam->keytypeid = cpu_to_le16(key->type);
-	keyparam->keylen = cpu_to_le16(key->len);
-	memcpy(keyparam->key, key->key, key->len);
-
-	/* Length field doesn't include the {type,length} header */
-	keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
-	lbs_deb_leave(LBS_DEB_CMD);
-}
-
-int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
-				struct assoc_request *assoc)
-{
-	struct cmd_ds_802_11_key_material cmd;
-	int ret = 0;
-	int index = 0;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	cmd.action = cpu_to_le16(cmd_action);
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	if (cmd_action == CMD_ACT_GET) {
-		cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2);
-	} else {
-		memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
-
-		if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
-			set_one_wpa_key(&cmd.keyParamSet[index],
-					&assoc->wpa_unicast_key);
-			index++;
-		}
-
-		if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
-			set_one_wpa_key(&cmd.keyParamSet[index],
-					&assoc->wpa_mcast_key);
-			index++;
-		}
-
-		/* The common header and as many keys as we included */
-		cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
-						    keyParamSet[index]));
-	}
-	ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
-	/* Copy the returned key to driver private data */
-	if (!ret && cmd_action == CMD_ACT_GET) {
-		void *buf_ptr = cmd.keyParamSet;
-		void *resp_end = &(&cmd)[1];
-
-		while (buf_ptr < resp_end) {
-			struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
-			struct enc_key *key;
-			uint16_t param_set_len = le16_to_cpu(keyparam->length);
-			uint16_t key_len = le16_to_cpu(keyparam->keylen);
-			uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
-			uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
-			void *end;
-
-			end = (void *)keyparam + sizeof(keyparam->type)
-				+ sizeof(keyparam->length) + param_set_len;
-
-			/* Make sure we don't access past the end of the IEs */
-			if (end > resp_end)
-				break;
-
-			if (key_flags & KEY_INFO_WPA_UNICAST)
-				key = &priv->wpa_unicast_key;
-			else if (key_flags & KEY_INFO_WPA_MCAST)
-				key = &priv->wpa_mcast_key;
-			else
-				break;
-
-			/* Copy returned key into driver */
-			memset(key, 0, sizeof(struct enc_key));
-			if (key_len > sizeof(key->key))
-				break;
-			key->type = key_type;
-			key->flags = key_flags;
-			key->len = key_len;
-			memcpy(key->key, keyparam->key, key->len);
-
-			buf_ptr = end + 1;
-		}
-	}
-
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
-static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
-{
-/*		Bit  	Rate
-*		15:13 Reserved
-*		12    54 Mbps
-*		11    48 Mbps
-*		10    36 Mbps
-*		9     24 Mbps
-*		8     18 Mbps
-*		7     12 Mbps
-*		6     9 Mbps
-*		5     6 Mbps
-*		4     Reserved
-*		3     11 Mbps
-*		2     5.5 Mbps
-*		1     2 Mbps
-*		0     1 Mbps
-**/
-
-	uint16_t ratemask;
-	int i = lbs_data_rate_to_fw_index(rate);
-	if (lower_rates_ok)
-		ratemask = (0x1fef >> (12 - i));
-	else
-		ratemask = (1 << i);
-	return cpu_to_le16(ratemask);
-}
-
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
-				      uint16_t cmd_action)
-{
-	struct cmd_ds_802_11_rate_adapt_rateset cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	if (!priv->cur_rate && !priv->enablehwauto)
-		return -EINVAL;
-
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	cmd.action = cpu_to_le16(cmd_action);
-	cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
-	cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
-	ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
-	if (!ret && cmd_action == CMD_ACT_GET)
-		priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
-
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief Set the data rate
- *
- *  @param priv    	A pointer to struct lbs_private structure
- *  @param rate  	The desired data rate, or 0 to clear a locked rate
- *
- *  @return 	   	0 on success, error on failure
- */
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
-{
-	struct cmd_ds_802_11_data_rate cmd;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	if (rate > 0) {
-		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
-		cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
-		if (cmd.rates[0] == 0) {
-			lbs_deb_cmd("DATA_RATE: invalid requested rate of"
-				" 0x%02X\n", rate);
-			ret = 0;
-			goto out;
-		}
-		lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
-	} else {
-		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
-		lbs_deb_cmd("DATA_RATE: setting auto\n");
-	}
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
-	if (ret)
-		goto out;
-
-	lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd));
-
-	/* FIXME: get actual rates FW can do if this command actually returns
-	 * all data rates supported.
-	 */
-	priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
-	lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
-
-int lbs_cmd_802_11_rssi(struct lbs_private *priv,
-				struct cmd_ds_command *cmd)
-{
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	cmd->command = cpu_to_le16(CMD_802_11_RSSI);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) +
-		sizeof(struct cmd_header));
-	cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
-
-	/* reset Beacon SNR/NF/RSSI values */
-	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
-	priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
-	priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
-	priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
-	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
-	priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
-int lbs_ret_802_11_rssi(struct lbs_private *priv,
-				struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	/* store the non average value */
-	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
-	priv->NF[TYPE_BEACON][TYPE_NOAVG] =
-		get_unaligned_le16(&rssirsp->noisefloor);
-
-	priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
-	priv->NF[TYPE_BEACON][TYPE_AVG] =
-		get_unaligned_le16(&rssirsp->avgnoisefloor);
-
-	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
-	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
-		     priv->NF[TYPE_BEACON][TYPE_NOAVG]);
-
-	priv->RSSI[TYPE_BEACON][TYPE_AVG] =
-	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
-		     priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
-
-	lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
-	       priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
-	       priv->RSSI[TYPE_BEACON][TYPE_AVG]);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
-
-int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
-				struct cmd_ds_command *cmd,
-				u16 cmd_action)
-{
-	struct cmd_ds_802_11_beacon_control
-		*bcn_ctrl = &cmd->params.bcn_ctrl;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	cmd->size =
-	    cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
-			     + sizeof(struct cmd_header));
-	cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
-
-	bcn_ctrl->action = cpu_to_le16(cmd_action);
-	bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
-	bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
-int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
-					struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11_beacon_control *bcn_ctrl =
-	    &resp->params.bcn_ctrl;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	if (bcn_ctrl->action == CMD_ACT_GET) {
-		priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
-		priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
-	}
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	return 0;
-}
-
-
-
-static int lbs_assoc_post(struct lbs_private *priv,
-			  struct cmd_ds_802_11_associate_response *resp)
-{
-	int ret = 0;
-	union iwreq_data wrqu;
-	struct bss_descriptor *bss;
-	u16 status_code;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	if (!priv->in_progress_assoc_req) {
-		lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n");
-		ret = -1;
-		goto done;
-	}
-	bss = &priv->in_progress_assoc_req->bss;
-
-	/*
-	 * Older FW versions map the IEEE 802.11 Status Code in the association
-	 * response to the following values returned in resp->statuscode:
-	 *
-	 *    IEEE Status Code                Marvell Status Code
-	 *    0                       ->      0x0000 ASSOC_RESULT_SUCCESS
-	 *    13                      ->      0x0004 ASSOC_RESULT_AUTH_REFUSED
-	 *    14                      ->      0x0004 ASSOC_RESULT_AUTH_REFUSED
-	 *    15                      ->      0x0004 ASSOC_RESULT_AUTH_REFUSED
-	 *    16                      ->      0x0004 ASSOC_RESULT_AUTH_REFUSED
-	 *    others                  ->      0x0003 ASSOC_RESULT_REFUSED
-	 *
-	 * Other response codes:
-	 *    0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
-	 *    0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
-	 *                                    association response from the AP)
-	 */
-
-	status_code = le16_to_cpu(resp->statuscode);
-	if (priv->fwrelease < 0x09000000) {
-		switch (status_code) {
-		case 0x00:
-			break;
-		case 0x01:
-			lbs_deb_assoc("ASSOC_RESP: invalid parameters\n");
-			break;
-		case 0x02:
-			lbs_deb_assoc("ASSOC_RESP: internal timer "
-				"expired while waiting for the AP\n");
-			break;
-		case 0x03:
-			lbs_deb_assoc("ASSOC_RESP: association "
-				"refused by AP\n");
-			break;
-		case 0x04:
-			lbs_deb_assoc("ASSOC_RESP: authentication "
-				"refused by AP\n");
-			break;
-		default:
-			lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x "
-				" unknown\n", status_code);
-			break;
-		}
-	} else {
-		/* v9+ returns the AP's association response */
-		lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x\n", status_code);
-	}
-
-	if (status_code) {
-		lbs_mac_event_disconnected(priv);
-		ret = status_code;
-		goto done;
-	}
-
-	lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP",
-		    (void *) (resp + sizeof (resp->hdr)),
-		    le16_to_cpu(resp->hdr.size) - sizeof (resp->hdr));
-
-	/* Send a Media Connected event, according to the Spec */
-	priv->connect_status = LBS_CONNECTED;
-
-	/* Update current SSID and BSSID */
-	memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
-	priv->curbssparams.ssid_len = bss->ssid_len;
-	memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
-
-	priv->SNR[TYPE_RXPD][TYPE_AVG] = 0;
-	priv->NF[TYPE_RXPD][TYPE_AVG] = 0;
-
-	memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
-	memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
-	priv->nextSNRNF = 0;
-	priv->numSNRNF = 0;
-
-	netif_carrier_on(priv->dev);
-	if (!priv->tx_pending_len)
-		netif_wake_queue(priv->dev);
-
-	memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
-	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-
-done:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief This function prepares an association-class command.
- *
- *  @param priv      A pointer to struct lbs_private structure
- *  @param assoc_req The association request describing the BSS to associate
- *                   or reassociate with
- *  @param command   The actual command, either CMD_802_11_ASSOCIATE or
- *                   CMD_802_11_REASSOCIATE
- *
- *  @return         0 or -1
- */
-static int lbs_associate(struct lbs_private *priv,
-			 struct assoc_request *assoc_req,
-			 u16 command)
-{
-	struct cmd_ds_802_11_associate cmd;
-	int ret = 0;
-	struct bss_descriptor *bss = &assoc_req->bss;
-	u8 *pos = &(cmd.iebuf[0]);
-	u16 tmpcap, tmplen, tmpauth;
-	struct mrvl_ie_ssid_param_set *ssid;
-	struct mrvl_ie_ds_param_set *ds;
-	struct mrvl_ie_cf_param_set *cf;
-	struct mrvl_ie_rates_param_set *rates;
-	struct mrvl_ie_rsn_param_set *rsn;
-	struct mrvl_ie_auth_type *auth;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	BUG_ON((command != CMD_802_11_ASSOCIATE) &&
-		(command != CMD_802_11_REASSOCIATE));
-
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.command = cpu_to_le16(command);
-
-	/* Fill in static fields */
-	memcpy(cmd.bssid, bss->bssid, ETH_ALEN);
-	cmd.listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
-
-	/* Capability info */
-	tmpcap = (bss->capability & CAPINFO_MASK);
-	if (bss->mode == IW_MODE_INFRA)
-		tmpcap |= WLAN_CAPABILITY_ESS;
-	cmd.capability = cpu_to_le16(tmpcap);
-	lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap);
-
-	/* SSID */
-	ssid = (struct mrvl_ie_ssid_param_set *) pos;
-	ssid->header.type = cpu_to_le16(TLV_TYPE_SSID);
-	tmplen = bss->ssid_len;
-	ssid->header.len = cpu_to_le16(tmplen);
-	memcpy(ssid->ssid, bss->ssid, tmplen);
-	pos += sizeof(ssid->header) + tmplen;
-
-	ds = (struct mrvl_ie_ds_param_set *) pos;
-	ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
-	ds->header.len = cpu_to_le16(1);
-	ds->channel = bss->phy.ds.channel;
-	pos += sizeof(ds->header) + 1;
-
-	cf = (struct mrvl_ie_cf_param_set *) pos;
-	cf->header.type = cpu_to_le16(TLV_TYPE_CF);
-	tmplen = sizeof(*cf) - sizeof (cf->header);
-	cf->header.len = cpu_to_le16(tmplen);
-	/* IE payload should be zeroed, firmware fills it in for us */
-	pos += sizeof(*cf);
-
-	rates = (struct mrvl_ie_rates_param_set *) pos;
-	rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
-	tmplen = min_t(u16, ARRAY_SIZE(bss->rates), MAX_RATES);
-	memcpy(&rates->rates, &bss->rates, tmplen);
-	if (get_common_rates(priv, rates->rates, &tmplen)) {
-		ret = -1;
-		goto done;
-	}
-	pos += sizeof(rates->header) + tmplen;
-	rates->header.len = cpu_to_le16(tmplen);
-	lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen);
-
-	/* Copy the infra. association rates into Current BSS state structure */
-	memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
-	memcpy(&priv->curbssparams.rates, &rates->rates, tmplen);
-
-	/* Set MSB on basic rates as the firmware requires, but _after_
-	 * copying to current bss rates.
-	 */
-	lbs_set_basic_rate_flags(rates->rates, tmplen);
-
-	/* Firmware v9+ indicate authentication suites as a TLV */
-	if (priv->fwrelease >= 0x09000000) {
-		auth = (struct mrvl_ie_auth_type *) pos;
-		auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
-		auth->header.len = cpu_to_le16(2);
-		tmpauth = iw_auth_to_ieee_auth(priv->secinfo.auth_mode);
-		auth->auth = cpu_to_le16(tmpauth);
-		pos += sizeof(auth->header) + 2;
-
-		lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n",
-			bss->bssid, priv->secinfo.auth_mode);
-	}
-
-	/* WPA/WPA2 IEs */
-	if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
-		rsn = (struct mrvl_ie_rsn_param_set *) pos;
-		/* WPA_IE or WPA2_IE */
-		rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]);
-		tmplen = (u16) assoc_req->wpa_ie[1];
-		rsn->header.len = cpu_to_le16(tmplen);
-		memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen);
-		lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: WPA/RSN IE", (u8 *) rsn,
-			sizeof(rsn->header) + tmplen);
-		pos += sizeof(rsn->header) + tmplen;
-	}
-
-	cmd.hdr.size = cpu_to_le16((sizeof(cmd) - sizeof(cmd.iebuf)) +
-				   (u16)(pos - (u8 *) &cmd.iebuf));
-
-	/* update curbssparams */
-	priv->channel = bss->phy.ds.channel;
-
-	ret = lbs_cmd_with_response(priv, command, &cmd);
-	if (ret == 0) {
-		ret = lbs_assoc_post(priv,
-			(struct cmd_ds_802_11_associate_response *) &cmd);
-	}
-
-done:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief Associate to a specific BSS discovered in a scan
- *
- *  @param priv      A pointer to struct lbs_private structure
- *  @param assoc_req The association request describing the BSS to associate with
- *
- *  @return          0-success, otherwise fail
- */
-static int lbs_try_associate(struct lbs_private *priv,
-	struct assoc_request *assoc_req)
-{
-	int ret;
-	u8 preamble = RADIO_PREAMBLE_LONG;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	/* FW v9 and higher indicate authentication suites as a TLV in the
-	 * association command, not as a separate authentication command.
-	 */
-	if (priv->fwrelease < 0x09000000) {
-		ret = lbs_set_authentication(priv, assoc_req->bss.bssid,
-					     priv->secinfo.auth_mode);
-		if (ret)
-			goto out;
-	}
-
-	/* Use short preamble only when both the BSS and firmware support it */
-	if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
-		preamble = RADIO_PREAMBLE_SHORT;
-
-	ret = lbs_set_radio(priv, preamble, 1);
-	if (ret)
-		goto out;
-
-	ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
-	/* If the association fails with current auth mode, let's
-	 * try by changing the auth mode
-	 */
-	if ((priv->authtype_auto) &&
-			(ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) &&
-			(assoc_req->secinfo.wep_enabled) &&
-			(priv->connect_status != LBS_CONNECTED)) {
-		if (priv->secinfo.auth_mode == IW_AUTH_ALG_OPEN_SYSTEM)
-			priv->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
-		else
-			priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-		if (!assoc_helper_wep_keys(priv, assoc_req))
-			ret = lbs_associate(priv, assoc_req,
-						CMD_802_11_ASSOCIATE);
-	}
-
-	if (ret)
-		ret = -1;
-out:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_adhoc_post(struct lbs_private *priv,
-			  struct cmd_ds_802_11_ad_hoc_result *resp)
-{
-	int ret = 0;
-	u16 command = le16_to_cpu(resp->hdr.command);
-	u16 result = le16_to_cpu(resp->hdr.result);
-	union iwreq_data wrqu;
-	struct bss_descriptor *bss;
-	DECLARE_SSID_BUF(ssid);
-
-	lbs_deb_enter(LBS_DEB_JOIN);
-
-	if (!priv->in_progress_assoc_req) {
-		lbs_deb_join("ADHOC_RESP: no in-progress association "
-			"request\n");
-		ret = -1;
-		goto done;
-	}
-	bss = &priv->in_progress_assoc_req->bss;
-
-	/*
-	 * Join result code 0 --> SUCCESS
-	 */
-	if (result) {
-		lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
-		if (priv->connect_status == LBS_CONNECTED)
-			lbs_mac_event_disconnected(priv);
-		ret = -1;
-		goto done;
-	}
-
-	/* Send a Media Connected event, according to the Spec */
-	priv->connect_status = LBS_CONNECTED;
-
-	if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
-		/* Update the created network descriptor with the new BSSID */
-		memcpy(bss->bssid, resp->bssid, ETH_ALEN);
-	}
-
-	/* Set the BSSID from the joined/started descriptor */
-	memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
-
-	/* Set the new SSID to current SSID */
-	memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
-	priv->curbssparams.ssid_len = bss->ssid_len;
-
-	netif_carrier_on(priv->dev);
-	if (!priv->tx_pending_len)
-		netif_wake_queue(priv->dev);
-
-	memset(&wrqu, 0, sizeof(wrqu));
-	memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
-	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-
-	lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
-		     print_ssid(ssid, bss->ssid, bss->ssid_len),
-		     priv->curbssparams.bssid,
-		     priv->channel);
-
-done:
-	lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief Join an adhoc network found in a previous scan
- *
- *  @param priv         A pointer to struct lbs_private structure
- *  @param assoc_req    The association request describing the BSS to join
- *
- *  @return             0 on success, error on failure
- */
-static int lbs_adhoc_join(struct lbs_private *priv,
-	struct assoc_request *assoc_req)
-{
-	struct cmd_ds_802_11_ad_hoc_join cmd;
-	struct bss_descriptor *bss = &assoc_req->bss;
-	u8 preamble = RADIO_PREAMBLE_LONG;
-	DECLARE_SSID_BUF(ssid);
-	u16 ratesize = 0;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	lbs_deb_join("current SSID '%s', ssid length %u\n",
-		print_ssid(ssid, priv->curbssparams.ssid,
-		priv->curbssparams.ssid_len),
-		priv->curbssparams.ssid_len);
-	lbs_deb_join("requested ssid '%s', ssid length %u\n",
-		print_ssid(ssid, bss->ssid, bss->ssid_len),
-		bss->ssid_len);
-
-	/* check if the requested SSID is already joined */
-	if (priv->curbssparams.ssid_len &&
-	    !lbs_ssid_cmp(priv->curbssparams.ssid,
-			priv->curbssparams.ssid_len,
-			bss->ssid, bss->ssid_len) &&
-	    (priv->mode == IW_MODE_ADHOC) &&
-	    (priv->connect_status == LBS_CONNECTED)) {
-		union iwreq_data wrqu;
-
-		lbs_deb_join("ADHOC_J_CMD: New ad-hoc SSID is the same as "
-			"current, not attempting to re-join");
-
-		/* Send the re-association event though, because the association
-		 * request really was successful, even if just a null-op.
-		 */
-		memset(&wrqu, 0, sizeof(wrqu));
-		memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid,
-		       ETH_ALEN);
-		wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-		wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-		goto out;
-	}
-
-	/* Use short preamble only when both the BSS and firmware support it */
-	if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
-		lbs_deb_join("AdhocJoin: Short preamble\n");
-		preamble = RADIO_PREAMBLE_SHORT;
-	}
-
-	ret = lbs_set_radio(priv, preamble, 1);
-	if (ret)
-		goto out;
-
-	lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel);
-	lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
-
-	priv->adhoccreate = 0;
-	priv->channel = bss->channel;
-
-	/* Build the join command */
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	cmd.bss.type = CMD_BSS_TYPE_IBSS;
-	cmd.bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
-
-	memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN);
-	memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len);
-
-	memcpy(&cmd.bss.ds, &bss->phy.ds, sizeof(struct ieee_ie_ds_param_set));
-
-	memcpy(&cmd.bss.ibss, &bss->ss.ibss,
-	       sizeof(struct ieee_ie_ibss_param_set));
-
-	cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
-	lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
-	       bss->capability, CAPINFO_MASK);
-
-	/* information on BSSID descriptor passed to FW */
-	lbs_deb_join("ADHOC_J_CMD: BSSID = %pM, SSID = '%s'\n",
-			cmd.bss.bssid, cmd.bss.ssid);
-
-	/* Only v8 and below support setting these */
-	if (priv->fwrelease < 0x09000000) {
-		/* failtimeout */
-		cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
-		/* probedelay */
-		cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
-	}
-
-	/* Copy Data rates from the rates recorded in scan response */
-	memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
-	ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), ARRAY_SIZE (bss->rates));
-	memcpy(cmd.bss.rates, bss->rates, ratesize);
-	if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
-		lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
-		ret = -1;
-		goto out;
-	}
-
-	/* Copy the ad-hoc creation rates into Current BSS state structure */
-	memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
-	memcpy(&priv->curbssparams.rates, cmd.bss.rates, ratesize);
-
-	/* Set MSB on basic rates as the firmware requires, but _after_
-	 * copying to current bss rates.
-	 */
-	lbs_set_basic_rate_flags(cmd.bss.rates, ratesize);
-
-	cmd.bss.ibss.atimwindow = bss->atimwindow;
-
-	if (assoc_req->secinfo.wep_enabled) {
-		u16 tmp = le16_to_cpu(cmd.bss.capability);
-		tmp |= WLAN_CAPABILITY_PRIVACY;
-		cmd.bss.capability = cpu_to_le16(tmp);
-	}
-
-	if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
-		__le32 local_ps_mode = cpu_to_le32(LBS802_11POWERMODECAM);
-
-		/* wake up first */
-		ret = lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE,
-						   CMD_ACT_SET, 0, 0,
-						   &local_ps_mode);
-		if (ret) {
-			ret = -1;
-			goto out;
-		}
-	}
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
-	if (ret == 0) {
-		ret = lbs_adhoc_post(priv,
-				     (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
-	}
-
-out:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief Start an Adhoc Network
- *
- *  @param priv         A pointer to struct lbs_private structure
- *  @param assoc_req    The association request describing the BSS to start
- *
- *  @return             0 on success, error on failure
- */
-static int lbs_adhoc_start(struct lbs_private *priv,
-	struct assoc_request *assoc_req)
-{
-	struct cmd_ds_802_11_ad_hoc_start cmd;
-	u8 preamble = RADIO_PREAMBLE_SHORT;
-	size_t ratesize = 0;
-	u16 tmpcap = 0;
-	int ret = 0;
-	DECLARE_SSID_BUF(ssid);
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	ret = lbs_set_radio(priv, preamble, 1);
-	if (ret)
-		goto out;
-
-	/* Build the start command */
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len);
-
-	lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n",
-		print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len),
-		assoc_req->ssid_len);
-
-	cmd.bsstype = CMD_BSS_TYPE_IBSS;
-
-	if (priv->beacon_period == 0)
-		priv->beacon_period = MRVDRV_BEACON_INTERVAL;
-	cmd.beaconperiod = cpu_to_le16(priv->beacon_period);
-
-	WARN_ON(!assoc_req->channel);
-
-	/* set Physical parameter set */
-	cmd.ds.header.id = WLAN_EID_DS_PARAMS;
-	cmd.ds.header.len = 1;
-	cmd.ds.channel = assoc_req->channel;
-
-	/* set IBSS parameter set */
-	cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS;
-	cmd.ibss.header.len = 2;
-	cmd.ibss.atimwindow = cpu_to_le16(0);
-
-	/* set capability info */
-	tmpcap = WLAN_CAPABILITY_IBSS;
-	if (assoc_req->secinfo.wep_enabled ||
-	    assoc_req->secinfo.WPAenabled ||
-	    assoc_req->secinfo.WPA2enabled) {
-		lbs_deb_join("ADHOC_START: WEP/WPA enabled, privacy on\n");
-		tmpcap |= WLAN_CAPABILITY_PRIVACY;
-	} else
-		lbs_deb_join("ADHOC_START: WEP disabled, privacy off\n");
-
-	cmd.capability = cpu_to_le16(tmpcap);
-
-	/* Only v8 and below support setting probe delay */
-	if (priv->fwrelease < 0x09000000)
-		cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
-
-	ratesize = min(sizeof(cmd.rates), sizeof(lbs_bg_rates));
-	memcpy(cmd.rates, lbs_bg_rates, ratesize);
-
-	/* Copy the ad-hoc creating rates into Current BSS state structure */
-	memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
-	memcpy(&priv->curbssparams.rates, &cmd.rates, ratesize);
-
-	/* Set MSB on basic rates as the firmware requires, but _after_
-	 * copying to current bss rates.
-	 */
-	lbs_set_basic_rate_flags(cmd.rates, ratesize);
-
-	lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
-	       cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
-
-	lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
-		     assoc_req->channel, assoc_req->band);
-
-	priv->adhoccreate = 1;
-	priv->mode = IW_MODE_ADHOC;
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
-	if (ret == 0)
-		ret = lbs_adhoc_post(priv,
-				     (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief Stop and Ad-Hoc network and exit Ad-Hoc mode
- *
- *  @param priv         A pointer to struct lbs_private structure
- *  @return             0 on success, or an error
- */
-int lbs_adhoc_stop(struct lbs_private *priv)
-{
-	struct cmd_ds_802_11_ad_hoc_stop cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_JOIN);
-
-	memset(&cmd, 0, sizeof (cmd));
-	cmd.hdr.size = cpu_to_le16 (sizeof (cmd));
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
-
-	/* Clean up everything even if there was an error */
-	lbs_mac_event_disconnected(priv);
-
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
-					struct bss_descriptor *match_bss)
-{
-	if (!secinfo->wep_enabled &&
-	    !secinfo->WPAenabled && !secinfo->WPA2enabled &&
-	    match_bss->wpa_ie[0] != WLAN_EID_GENERIC &&
-	    match_bss->rsn_ie[0] != WLAN_EID_RSN &&
-	    !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
-		return 1;
-	else
-		return 0;
-}
-
-static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
-				       struct bss_descriptor *match_bss)
-{
-	if (secinfo->wep_enabled &&
-	    !secinfo->WPAenabled && !secinfo->WPA2enabled &&
-	    (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
-		return 1;
-	else
-		return 0;
-}
-
-static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
-				struct bss_descriptor *match_bss)
-{
-	if (!secinfo->wep_enabled && secinfo->WPAenabled &&
-	    (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
-	    /* privacy bit may NOT be set in some APs like LinkSys WRT54G
-	    && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
-	   )
-		return 1;
-	else
-		return 0;
-}
-
-static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
-				 struct bss_descriptor *match_bss)
-{
-	if (!secinfo->wep_enabled && secinfo->WPA2enabled &&
-	    (match_bss->rsn_ie[0] == WLAN_EID_RSN)
-	    /* privacy bit may NOT be set in some APs like LinkSys WRT54G
-	    (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
-	   )
-		return 1;
-	else
-		return 0;
-}
-
-static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
-					struct bss_descriptor *match_bss)
-{
-	if (!secinfo->wep_enabled &&
-	    !secinfo->WPAenabled && !secinfo->WPA2enabled &&
-	    (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) &&
-	    (match_bss->rsn_ie[0] != WLAN_EID_RSN) &&
-	    (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
-		return 1;
-	else
-		return 0;
-}
-
-/**
- *  @brief Check if a scanned network compatible with the driver settings
- *
- *   WEP     WPA     WPA2    ad-hoc  encrypt                      Network
- * enabled enabled  enabled   AES     mode   privacy  WPA  WPA2  Compatible
- *    0       0        0       0      NONE      0      0    0   yes No security
- *    1       0        0       0      NONE      1      0    0   yes Static WEP
- *    0       1        0       0       x        1x     1    x   yes WPA
- *    0       0        1       0       x        1x     x    1   yes WPA2
- *    0       0        0       1      NONE      1      0    0   yes Ad-hoc AES
- *    0       0        0       0     !=NONE     1      0    0   yes Dynamic WEP
- *
- *
- *  @param priv A pointer to struct lbs_private
- *  @param index   Index in scantable to check against current driver settings
- *  @param mode    Network mode: Infrastructure or IBSS
- *
- *  @return        Index in scantable, or error code if negative
- */
-static int is_network_compatible(struct lbs_private *priv,
-				 struct bss_descriptor *bss, uint8_t mode)
-{
-	int matched = 0;
-
-	lbs_deb_enter(LBS_DEB_SCAN);
-
-	if (bss->mode != mode)
-		goto done;
-
-	matched = match_bss_no_security(&priv->secinfo, bss);
-	if (matched)
-		goto done;
-	matched = match_bss_static_wep(&priv->secinfo, bss);
-	if (matched)
-		goto done;
-	matched = match_bss_wpa(&priv->secinfo, bss);
-	if (matched) {
-		lbs_deb_scan("is_network_compatible() WPA: wpa_ie 0x%x "
-			     "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
-			     "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
-			     priv->secinfo.wep_enabled ? "e" : "d",
-			     priv->secinfo.WPAenabled ? "e" : "d",
-			     priv->secinfo.WPA2enabled ? "e" : "d",
-			     (bss->capability & WLAN_CAPABILITY_PRIVACY));
-		goto done;
-	}
-	matched = match_bss_wpa2(&priv->secinfo, bss);
-	if (matched) {
-		lbs_deb_scan("is_network_compatible() WPA2: wpa_ie 0x%x "
-			     "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
-			     "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
-			     priv->secinfo.wep_enabled ? "e" : "d",
-			     priv->secinfo.WPAenabled ? "e" : "d",
-			     priv->secinfo.WPA2enabled ? "e" : "d",
-			     (bss->capability & WLAN_CAPABILITY_PRIVACY));
-		goto done;
-	}
-	matched = match_bss_dynamic_wep(&priv->secinfo, bss);
-	if (matched) {
-		lbs_deb_scan("is_network_compatible() dynamic WEP: "
-			     "wpa_ie 0x%x wpa2_ie 0x%x privacy 0x%x\n",
-			     bss->wpa_ie[0], bss->rsn_ie[0],
-			     (bss->capability & WLAN_CAPABILITY_PRIVACY));
-		goto done;
-	}
-
-	/* bss security settings don't match those configured on card */
-	lbs_deb_scan("is_network_compatible() FAILED: wpa_ie 0x%x "
-		     "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s privacy 0x%x\n",
-		     bss->wpa_ie[0], bss->rsn_ie[0],
-		     priv->secinfo.wep_enabled ? "e" : "d",
-		     priv->secinfo.WPAenabled ? "e" : "d",
-		     priv->secinfo.WPA2enabled ? "e" : "d",
-		     (bss->capability & WLAN_CAPABILITY_PRIVACY));
-
-done:
-	lbs_deb_leave_args(LBS_DEB_SCAN, "matched: %d", matched);
-	return matched;
-}
-
-/**
- *  @brief This function finds a specific compatible BSSID in the scan list
- *
- *  Used in association code
- *
- *  @param priv  A pointer to struct lbs_private
- *  @param bssid    BSSID to find in the scan list
- *  @param mode     Network mode: Infrastructure or IBSS
- *
- *  @return         index in BSSID list, or error return code (< 0)
- */
-static struct bss_descriptor *lbs_find_bssid_in_list(struct lbs_private *priv,
-					      uint8_t *bssid, uint8_t mode)
-{
-	struct bss_descriptor *iter_bss;
-	struct bss_descriptor *found_bss = NULL;
-
-	lbs_deb_enter(LBS_DEB_SCAN);
-
-	if (!bssid)
-		goto out;
-
-	lbs_deb_hex(LBS_DEB_SCAN, "looking for", bssid, ETH_ALEN);
-
-	/* Look through the scan table for a compatible match.  The loop will
-	 *   continue past a matched bssid that is not compatible in case there
-	 *   is an AP with multiple SSIDs assigned to the same BSSID
-	 */
-	mutex_lock(&priv->lock);
-	list_for_each_entry(iter_bss, &priv->network_list, list) {
-		if (compare_ether_addr(iter_bss->bssid, bssid))
-			continue; /* bssid doesn't match */
-		switch (mode) {
-		case IW_MODE_INFRA:
-		case IW_MODE_ADHOC:
-			if (!is_network_compatible(priv, iter_bss, mode))
-				break;
-			found_bss = iter_bss;
-			break;
-		default:
-			found_bss = iter_bss;
-			break;
-		}
-	}
-	mutex_unlock(&priv->lock);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
-	return found_bss;
-}
-
-/**
- *  @brief This function finds ssid in ssid list.
- *
- *  Used in association code
- *
- *  @param priv  A pointer to struct lbs_private
- *  @param ssid     SSID to find in the list
- *  @param bssid    BSSID to qualify the SSID selection (if provided)
- *  @param mode     Network mode: Infrastructure or IBSS
- *
- *  @return         index in BSSID list
- */
-static struct bss_descriptor *lbs_find_ssid_in_list(struct lbs_private *priv,
-					     uint8_t *ssid, uint8_t ssid_len,
-					     uint8_t *bssid, uint8_t mode,
-					     int channel)
-{
-	u32 bestrssi = 0;
-	struct bss_descriptor *iter_bss = NULL;
-	struct bss_descriptor *found_bss = NULL;
-	struct bss_descriptor *tmp_oldest = NULL;
-
-	lbs_deb_enter(LBS_DEB_SCAN);
-
-	mutex_lock(&priv->lock);
-
-	list_for_each_entry(iter_bss, &priv->network_list, list) {
-		if (!tmp_oldest ||
-		    (iter_bss->last_scanned < tmp_oldest->last_scanned))
-			tmp_oldest = iter_bss;
-
-		if (lbs_ssid_cmp(iter_bss->ssid, iter_bss->ssid_len,
-				 ssid, ssid_len) != 0)
-			continue; /* ssid doesn't match */
-		if (bssid && compare_ether_addr(iter_bss->bssid, bssid) != 0)
-			continue; /* bssid doesn't match */
-		if ((channel > 0) && (iter_bss->channel != channel))
-			continue; /* channel doesn't match */
-
-		switch (mode) {
-		case IW_MODE_INFRA:
-		case IW_MODE_ADHOC:
-			if (!is_network_compatible(priv, iter_bss, mode))
-				break;
-
-			if (bssid) {
-				/* Found requested BSSID */
-				found_bss = iter_bss;
-				goto out;
-			}
-
-			if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
-				bestrssi = SCAN_RSSI(iter_bss->rssi);
-				found_bss = iter_bss;
-			}
-			break;
-		case IW_MODE_AUTO:
-		default:
-			if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
-				bestrssi = SCAN_RSSI(iter_bss->rssi);
-				found_bss = iter_bss;
-			}
-			break;
-		}
-	}
-
-out:
-	mutex_unlock(&priv->lock);
-	lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
-	return found_bss;
-}
-
-static int assoc_helper_essid(struct lbs_private *priv,
-                              struct assoc_request * assoc_req)
-{
-	int ret = 0;
-	struct bss_descriptor * bss;
-	int channel = -1;
-	DECLARE_SSID_BUF(ssid);
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	/* FIXME: take channel into account when picking SSIDs if a channel
-	 * is set.
-	 */
-
-	if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
-		channel = assoc_req->channel;
-
-	lbs_deb_assoc("SSID '%s' requested\n",
-	              print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len));
-	if (assoc_req->mode == IW_MODE_INFRA) {
-		lbs_send_specific_ssid_scan(priv, assoc_req->ssid,
-			assoc_req->ssid_len);
-
-		bss = lbs_find_ssid_in_list(priv, assoc_req->ssid,
-				assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel);
-		if (bss != NULL) {
-			memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
-			ret = lbs_try_associate(priv, assoc_req);
-		} else {
-			lbs_deb_assoc("SSID not found; cannot associate\n");
-		}
-	} else if (assoc_req->mode == IW_MODE_ADHOC) {
-		/* Scan for the network, do not save previous results.  Stale
-		 *   scan data will cause us to join a non-existant adhoc network
-		 */
-		lbs_send_specific_ssid_scan(priv, assoc_req->ssid,
-			assoc_req->ssid_len);
-
-		/* Search for the requested SSID in the scan table */
-		bss = lbs_find_ssid_in_list(priv, assoc_req->ssid,
-				assoc_req->ssid_len, NULL, IW_MODE_ADHOC, channel);
-		if (bss != NULL) {
-			lbs_deb_assoc("SSID found, will join\n");
-			memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
-			lbs_adhoc_join(priv, assoc_req);
-		} else {
-			/* else send START command */
-			lbs_deb_assoc("SSID not found, creating adhoc network\n");
-			memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
-				IEEE80211_MAX_SSID_LEN);
-			assoc_req->bss.ssid_len = assoc_req->ssid_len;
-			lbs_adhoc_start(priv, assoc_req);
-		}
-	}
-
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-
-static int assoc_helper_bssid(struct lbs_private *priv,
-                              struct assoc_request * assoc_req)
-{
-	int ret = 0;
-	struct bss_descriptor * bss;
-
-	lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID %pM", assoc_req->bssid);
-
-	/* Search for index position in list for requested MAC */
-	bss = lbs_find_bssid_in_list(priv, assoc_req->bssid,
-			    assoc_req->mode);
-	if (bss == NULL) {
-		lbs_deb_assoc("ASSOC: WAP: BSSID %pM not found, "
-			"cannot associate.\n", assoc_req->bssid);
-		goto out;
-	}
-
-	memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
-	if (assoc_req->mode == IW_MODE_INFRA) {
-		ret = lbs_try_associate(priv, assoc_req);
-		lbs_deb_assoc("ASSOC: lbs_try_associate(bssid) returned %d\n",
-			      ret);
-	} else if (assoc_req->mode == IW_MODE_ADHOC) {
-		lbs_adhoc_join(priv, assoc_req);
-	}
-
-out:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-
-static int assoc_helper_associate(struct lbs_private *priv,
-                                  struct assoc_request * assoc_req)
-{
-	int ret = 0, done = 0;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	/* If we're given and 'any' BSSID, try associating based on SSID */
-
-	if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
-		if (compare_ether_addr(bssid_any, assoc_req->bssid) &&
-		    compare_ether_addr(bssid_off, assoc_req->bssid)) {
-			ret = assoc_helper_bssid(priv, assoc_req);
-			done = 1;
-		}
-	}
-
-	if (!done && test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
-		ret = assoc_helper_essid(priv, assoc_req);
-	}
-
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-
-static int assoc_helper_mode(struct lbs_private *priv,
-                             struct assoc_request * assoc_req)
-{
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	if (assoc_req->mode == priv->mode)
-		goto done;
-
-	if (assoc_req->mode == IW_MODE_INFRA) {
-		if (priv->psstate != PS_STATE_FULL_POWER)
-			lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
-		priv->psmode = LBS802_11POWERMODECAM;
-	}
-
-	priv->mode = assoc_req->mode;
-	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE,
-		assoc_req->mode == IW_MODE_ADHOC ? 2 : 1);
-
-done:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-static int assoc_helper_channel(struct lbs_private *priv,
-                                struct assoc_request * assoc_req)
-{
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	ret = lbs_update_channel(priv);
-	if (ret) {
-		lbs_deb_assoc("ASSOC: channel: error getting channel.\n");
-		goto done;
-	}
-
-	if (assoc_req->channel == priv->channel)
-		goto done;
-
-	if (priv->mesh_dev) {
-		/* Change mesh channel first; 21.p21 firmware won't let
-		   you change channel otherwise (even though it'll return
-		   an error to this */
-		lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP,
-				assoc_req->channel);
-	}
-
-	lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
-		      priv->channel, assoc_req->channel);
-
-	ret = lbs_set_channel(priv, assoc_req->channel);
-	if (ret < 0)
-		lbs_deb_assoc("ASSOC: channel: error setting channel.\n");
-
-	/* FIXME: shouldn't need to grab the channel _again_ after setting
-	 * it since the firmware is supposed to return the new channel, but
-	 * whatever... */
-	ret = lbs_update_channel(priv);
-	if (ret) {
-		lbs_deb_assoc("ASSOC: channel: error getting channel.\n");
-		goto done;
-	}
-
-	if (assoc_req->channel != priv->channel) {
-		lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n",
-		              assoc_req->channel);
-		goto restore_mesh;
-	}
-
-	if (assoc_req->secinfo.wep_enabled &&
-	    (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
-	     assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) {
-		/* Make sure WEP keys are re-sent to firmware */
-		set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
-	}
-
-	/* Must restart/rejoin adhoc networks after channel change */
- 	set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
-
- restore_mesh:
-	if (priv->mesh_dev)
-		lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-				priv->channel);
-
- done:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-
-static int assoc_helper_wep_keys(struct lbs_private *priv,
-				 struct assoc_request *assoc_req)
-{
-	int i;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	/* Set or remove WEP keys */
-	if (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
-	    assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)
-		ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_ADD, assoc_req);
-	else
-		ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_REMOVE, assoc_req);
-
-	if (ret)
-		goto out;
-
-	/* enable/disable the MAC's WEP packet filter */
-	if (assoc_req->secinfo.wep_enabled)
-		priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE;
-	else
-		priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE;
-
-	lbs_set_mac_control(priv);
-
-	mutex_lock(&priv->lock);
-
-	/* Copy WEP keys into priv wep key fields */
-	for (i = 0; i < 4; i++) {
-		memcpy(&priv->wep_keys[i], &assoc_req->wep_keys[i],
-		       sizeof(struct enc_key));
-	}
-	priv->wep_tx_keyidx = assoc_req->wep_tx_keyidx;
-
-	mutex_unlock(&priv->lock);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-static int assoc_helper_secinfo(struct lbs_private *priv,
-                                struct assoc_request * assoc_req)
-{
-	int ret = 0;
-	uint16_t do_wpa;
-	uint16_t rsn = 0;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	memcpy(&priv->secinfo, &assoc_req->secinfo,
-		sizeof(struct lbs_802_11_security));
-
-	lbs_set_mac_control(priv);
-
-	/* If RSN is already enabled, don't try to enable it again, since
-	 * ENABLE_RSN resets internal state machines and will clobber the
-	 * 4-way WPA handshake.
-	 */
-
-	/* Get RSN enabled/disabled */
-	ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_GET, &rsn);
-	if (ret) {
-		lbs_deb_assoc("Failed to get RSN status: %d\n", ret);
-		goto out;
-	}
-
-	/* Don't re-enable RSN if it's already enabled */
-	do_wpa = assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled;
-	if (do_wpa == rsn)
-		goto out;
-
-	/* Set RSN enabled/disabled */
-	ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_SET, &do_wpa);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-
-static int assoc_helper_wpa_keys(struct lbs_private *priv,
-                                 struct assoc_request * assoc_req)
-{
-	int ret = 0;
-	unsigned int flags = assoc_req->flags;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	/* Work around older firmware bug where WPA unicast and multicast
-	 * keys must be set independently.  Seen in SDIO parts with firmware
-	 * version 5.0.11p0.
-	 */
-
-	if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
-		clear_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
-		ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req);
-		assoc_req->flags = flags;
-	}
-
-	if (ret)
-		goto out;
-
-	memcpy(&priv->wpa_unicast_key, &assoc_req->wpa_unicast_key,
-			sizeof(struct enc_key));
-
-	if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) {
-		clear_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
-
-		ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req);
-		assoc_req->flags = flags;
-
-		memcpy(&priv->wpa_mcast_key, &assoc_req->wpa_mcast_key,
-				sizeof(struct enc_key));
-	}
-
-out:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-
-static int assoc_helper_wpa_ie(struct lbs_private *priv,
-                               struct assoc_request * assoc_req)
-{
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
-		memcpy(&priv->wpa_ie, &assoc_req->wpa_ie, assoc_req->wpa_ie_len);
-		priv->wpa_ie_len = assoc_req->wpa_ie_len;
-	} else {
-		memset(&priv->wpa_ie, 0, MAX_WPA_IE_LEN);
-		priv->wpa_ie_len = 0;
-	}
-
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-
-static int should_deauth_infrastructure(struct lbs_private *priv,
-                                        struct assoc_request * assoc_req)
-{
-	int ret = 0;
-
-	if (priv->connect_status != LBS_CONNECTED)
-		return 0;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-	if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
-		lbs_deb_assoc("Deauthenticating due to new SSID\n");
-		ret = 1;
-		goto out;
-	}
-
-	if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
-		if (priv->secinfo.auth_mode != assoc_req->secinfo.auth_mode) {
-			lbs_deb_assoc("Deauthenticating due to new security\n");
-			ret = 1;
-			goto out;
-		}
-	}
-
-	if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
-		lbs_deb_assoc("Deauthenticating due to new BSSID\n");
-		ret = 1;
-		goto out;
-	}
-
-	if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
-		lbs_deb_assoc("Deauthenticating due to channel switch\n");
-		ret = 1;
-		goto out;
-	}
-
-	/* FIXME: deal with 'auto' mode somehow */
-	if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) {
-		if (assoc_req->mode != IW_MODE_INFRA) {
-			lbs_deb_assoc("Deauthenticating due to leaving "
-				"infra mode\n");
-			ret = 1;
-			goto out;
-		}
-	}
-
-out:
-	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
-	return ret;
-}
-
-
-static int should_stop_adhoc(struct lbs_private *priv,
-                             struct assoc_request * assoc_req)
-{
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	if (priv->connect_status != LBS_CONNECTED)
-		return 0;
-
-	if (lbs_ssid_cmp(priv->curbssparams.ssid,
-	                      priv->curbssparams.ssid_len,
-	                      assoc_req->ssid, assoc_req->ssid_len) != 0)
-		return 1;
-
-	/* FIXME: deal with 'auto' mode somehow */
-	if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) {
-		if (assoc_req->mode != IW_MODE_ADHOC)
-			return 1;
-	}
-
-	if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
-		if (assoc_req->channel != priv->channel)
-			return 1;
-	}
-
-	lbs_deb_leave(LBS_DEB_ASSOC);
-	return 0;
-}
-
-
-/**
- *  @brief This function finds the best SSID in the Scan List
- *
- *  Search the scan table for the best SSID that also matches the current
- *   adapter network preference (infrastructure or adhoc)
- *
- *  @param priv  A pointer to struct lbs_private
- *
- *  @return         index in BSSID list
- */
-static struct bss_descriptor *lbs_find_best_ssid_in_list(
-	struct lbs_private *priv, uint8_t mode)
-{
-	uint8_t bestrssi = 0;
-	struct bss_descriptor *iter_bss;
-	struct bss_descriptor *best_bss = NULL;
-
-	lbs_deb_enter(LBS_DEB_SCAN);
-
-	mutex_lock(&priv->lock);
-
-	list_for_each_entry(iter_bss, &priv->network_list, list) {
-		switch (mode) {
-		case IW_MODE_INFRA:
-		case IW_MODE_ADHOC:
-			if (!is_network_compatible(priv, iter_bss, mode))
-				break;
-			if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
-				break;
-			bestrssi = SCAN_RSSI(iter_bss->rssi);
-			best_bss = iter_bss;
-			break;
-		case IW_MODE_AUTO:
-		default:
-			if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
-				break;
-			bestrssi = SCAN_RSSI(iter_bss->rssi);
-			best_bss = iter_bss;
-			break;
-		}
-	}
-
-	mutex_unlock(&priv->lock);
-	lbs_deb_leave_args(LBS_DEB_SCAN, "best_bss %p", best_bss);
-	return best_bss;
-}
-
-/**
- *  @brief Find the best AP
- *
- *  Used from association worker.
- *
- *  @param priv         A pointer to struct lbs_private structure
- *  @param pSSID        A pointer to AP's ssid
- *
- *  @return             0--success, otherwise--fail
- */
-static int lbs_find_best_network_ssid(struct lbs_private *priv,
-	uint8_t *out_ssid, uint8_t *out_ssid_len, uint8_t preferred_mode,
-	uint8_t *out_mode)
-{
-	int ret = -1;
-	struct bss_descriptor *found;
-
-	lbs_deb_enter(LBS_DEB_SCAN);
-
-	priv->scan_ssid_len = 0;
-	lbs_scan_networks(priv, 1);
-	if (priv->surpriseremoved)
-		goto out;
-
-	found = lbs_find_best_ssid_in_list(priv, preferred_mode);
-	if (found && (found->ssid_len > 0)) {
-		memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN);
-		*out_ssid_len = found->ssid_len;
-		*out_mode = found->mode;
-		ret = 0;
-	}
-
-out:
-	lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
-	return ret;
-}
-
-
-void lbs_association_worker(struct work_struct *work)
-{
-	struct lbs_private *priv = container_of(work, struct lbs_private,
-		assoc_work.work);
-	struct assoc_request * assoc_req = NULL;
-	int ret = 0;
-	int find_any_ssid = 0;
-	DECLARE_SSID_BUF(ssid);
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-
-	mutex_lock(&priv->lock);
-	assoc_req = priv->pending_assoc_req;
-	priv->pending_assoc_req = NULL;
-	priv->in_progress_assoc_req = assoc_req;
-	mutex_unlock(&priv->lock);
-
-	if (!assoc_req)
-		goto done;
-
-	lbs_deb_assoc(
-		"Association Request:\n"
-		"    flags:     0x%08lx\n"
-		"    SSID:      '%s'\n"
-		"    chann:     %d\n"
-		"    band:      %d\n"
-		"    mode:      %d\n"
-		"    BSSID:     %pM\n"
-		"    secinfo:  %s%s%s\n"
-		"    auth_mode: %d\n",
-		assoc_req->flags,
-		print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len),
-		assoc_req->channel, assoc_req->band, assoc_req->mode,
-		assoc_req->bssid,
-		assoc_req->secinfo.WPAenabled ? " WPA" : "",
-		assoc_req->secinfo.WPA2enabled ? " WPA2" : "",
-		assoc_req->secinfo.wep_enabled ? " WEP" : "",
-		assoc_req->secinfo.auth_mode);
-
-	/* If 'any' SSID was specified, find an SSID to associate with */
-	if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) &&
-	    !assoc_req->ssid_len)
-		find_any_ssid = 1;
-
-	/* But don't use 'any' SSID if there's a valid locked BSSID to use */
-	if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
-		if (compare_ether_addr(assoc_req->bssid, bssid_any) &&
-		    compare_ether_addr(assoc_req->bssid, bssid_off))
-			find_any_ssid = 0;
-	}
-
-	if (find_any_ssid) {
-		u8 new_mode = assoc_req->mode;
-
-		ret = lbs_find_best_network_ssid(priv, assoc_req->ssid,
-				&assoc_req->ssid_len, assoc_req->mode, &new_mode);
-		if (ret) {
-			lbs_deb_assoc("Could not find best network\n");
-			ret = -ENETUNREACH;
-			goto out;
-		}
-
-		/* Ensure we switch to the mode of the AP */
-		if (assoc_req->mode == IW_MODE_AUTO) {
-			set_bit(ASSOC_FLAG_MODE, &assoc_req->flags);
-			assoc_req->mode = new_mode;
-		}
-	}
-
-	/*
-	 * Check if the attributes being changing require deauthentication
-	 * from the currently associated infrastructure access point.
-	 */
-	if (priv->mode == IW_MODE_INFRA) {
-		if (should_deauth_infrastructure(priv, assoc_req)) {
-			ret = lbs_cmd_80211_deauthenticate(priv,
-							   priv->curbssparams.bssid,
-							   WLAN_REASON_DEAUTH_LEAVING);
-			if (ret) {
-				lbs_deb_assoc("Deauthentication due to new "
-					"configuration request failed: %d\n",
-					ret);
-			}
-		}
-	} else if (priv->mode == IW_MODE_ADHOC) {
-		if (should_stop_adhoc(priv, assoc_req)) {
-			ret = lbs_adhoc_stop(priv);
-			if (ret) {
-				lbs_deb_assoc("Teardown of AdHoc network due to "
-					"new configuration request failed: %d\n",
-					ret);
-			}
-
-		}
-	}
-
-	/* Send the various configuration bits to the firmware */
-	if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) {
-		ret = assoc_helper_mode(priv, assoc_req);
-		if (ret)
-			goto out;
-	}
-
-	if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
-		ret = assoc_helper_channel(priv, assoc_req);
-		if (ret)
-			goto out;
-	}
-
-	if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
-		ret = assoc_helper_secinfo(priv, assoc_req);
-		if (ret)
-			goto out;
-	}
-
-	if (test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) {
-		ret = assoc_helper_wpa_ie(priv, assoc_req);
-		if (ret)
-			goto out;
-	}
-
-	/*
-	 * v10 FW wants WPA keys to be set/cleared before WEP key operations,
-	 * otherwise it will fail to correctly associate to WEP networks.
-	 * Other firmware versions don't appear to care.
-	 */
-	if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) ||
-	    test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
-		ret = assoc_helper_wpa_keys(priv, assoc_req);
-		if (ret)
-			goto out;
-	}
-
-	if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) ||
-	    test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
-		ret = assoc_helper_wep_keys(priv, assoc_req);
-		if (ret)
-			goto out;
-	}
-
-
-	/* SSID/BSSID should be the _last_ config option set, because they
-	 * trigger the association attempt.
-	 */
-	if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) ||
-	    test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
-		int success = 1;
-
-		ret = assoc_helper_associate(priv, assoc_req);
-		if (ret) {
-			lbs_deb_assoc("ASSOC: association unsuccessful: %d\n",
-				ret);
-			success = 0;
-		}
-
-		if (priv->connect_status != LBS_CONNECTED) {
-			lbs_deb_assoc("ASSOC: association unsuccessful, "
-				"not connected\n");
-			success = 0;
-		}
-
-		if (success) {
-			lbs_deb_assoc("associated to %pM\n",
-				priv->curbssparams.bssid);
-			lbs_prepare_and_send_command(priv,
-				CMD_802_11_RSSI,
-				0, CMD_OPTION_WAITFORRSP, 0, NULL);
-		} else {
-			ret = -1;
-		}
-	}
-
-out:
-	if (ret) {
-		lbs_deb_assoc("ASSOC: reconfiguration attempt unsuccessful: %d\n",
-			ret);
-	}
-
-	mutex_lock(&priv->lock);
-	priv->in_progress_assoc_req = NULL;
-	mutex_unlock(&priv->lock);
-	kfree(assoc_req);
-
-done:
-	lbs_deb_leave(LBS_DEB_ASSOC);
-}
-
-
-/*
- * Caller MUST hold any necessary locks
- */
-struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
-{
-	struct assoc_request * assoc_req;
-
-	lbs_deb_enter(LBS_DEB_ASSOC);
-	if (!priv->pending_assoc_req) {
-		priv->pending_assoc_req = kzalloc(sizeof(struct assoc_request),
-		                                     GFP_KERNEL);
-		if (!priv->pending_assoc_req) {
-			lbs_pr_info("Not enough memory to allocate association"
-				" request!\n");
-			return NULL;
-		}
-	}
-
-	/* Copy current configuration attributes to the association request,
-	 * but don't overwrite any that are already set.
-	 */
-	assoc_req = priv->pending_assoc_req;
-	if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
-		memcpy(&assoc_req->ssid, &priv->curbssparams.ssid,
-		       IEEE80211_MAX_SSID_LEN);
-		assoc_req->ssid_len = priv->curbssparams.ssid_len;
-	}
-
-	if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
-		assoc_req->channel = priv->channel;
-
-	if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags))
-		assoc_req->band = priv->curbssparams.band;
-
-	if (!test_bit(ASSOC_FLAG_MODE, &assoc_req->flags))
-		assoc_req->mode = priv->mode;
-
-	if (!test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
-		memcpy(&assoc_req->bssid, priv->curbssparams.bssid,
-			ETH_ALEN);
-	}
-
-	if (!test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)) {
-		int i;
-		for (i = 0; i < 4; i++) {
-			memcpy(&assoc_req->wep_keys[i], &priv->wep_keys[i],
-				sizeof(struct enc_key));
-		}
-	}
-
-	if (!test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags))
-		assoc_req->wep_tx_keyidx = priv->wep_tx_keyidx;
-
-	if (!test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) {
-		memcpy(&assoc_req->wpa_mcast_key, &priv->wpa_mcast_key,
-			sizeof(struct enc_key));
-	}
-
-	if (!test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
-		memcpy(&assoc_req->wpa_unicast_key, &priv->wpa_unicast_key,
-			sizeof(struct enc_key));
-	}
-
-	if (!test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
-		memcpy(&assoc_req->secinfo, &priv->secinfo,
-			sizeof(struct lbs_802_11_security));
-	}
-
-	if (!test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) {
-		memcpy(&assoc_req->wpa_ie, &priv->wpa_ie,
-			MAX_WPA_IE_LEN);
-		assoc_req->wpa_ie_len = priv->wpa_ie_len;
-	}
-
-	lbs_deb_leave(LBS_DEB_ASSOC);
-	return assoc_req;
-}
-
-
-/**
- *  @brief Deauthenticate from a specific BSS
- *
- *  @param priv        A pointer to struct lbs_private structure
- *  @param bssid       The specific BSS to deauthenticate from
- *  @param reason      The 802.11 sec. 7.3.1.7 Reason Code for deauthenticating
- *
- *  @return            0 on success, error on failure
- */
-int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN],
-				 u16 reason)
-{
-	struct cmd_ds_802_11_deauthenticate cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_JOIN);
-
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-	memcpy(cmd.macaddr, &bssid[0], ETH_ALEN);
-	cmd.reasoncode = cpu_to_le16(reason);
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd);
-
-	/* Clean up everything even if there was an error; can't assume that
-	 * we're still authenticated to the AP after trying to deauth.
-	 */
-	lbs_mac_event_disconnected(priv);
-
-	lbs_deb_leave(LBS_DEB_JOIN);
-	return ret;
-}
-
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
deleted file mode 100644
index 40621b7..0000000
--- a/drivers/net/wireless/libertas/assoc.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* Copyright (C) 2006, Red Hat, Inc. */
-
-#ifndef _LBS_ASSOC_H_
-#define _LBS_ASSOC_H_
-
-
-#include "defs.h"
-#include "host.h"
-
-
-struct lbs_private;
-
-/*
- * In theory, the IE is limited to the IE length, 255,
- * but in practice 64 bytes are enough.
- */
-#define MAX_WPA_IE_LEN 64
-
-
-
-struct lbs_802_11_security {
-	u8 WPAenabled;
-	u8 WPA2enabled;
-	u8 wep_enabled;
-	u8 auth_mode;
-	u32 key_mgmt;
-};
-
-/** Current Basic Service Set State Structure */
-struct current_bss_params {
-	/** bssid */
-	u8 bssid[ETH_ALEN];
-	/** ssid */
-	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
-	u8 ssid_len;
-
-	/** band */
-	u8 band;
-	/** channel is directly in priv->channel */
-	/** zero-terminated array of supported data rates */
-	u8 rates[MAX_RATES + 1];
-};
-
-/**
- *  @brief Structure used to store information for each beacon/probe response
- */
-struct bss_descriptor {
-	u8 bssid[ETH_ALEN];
-
-	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
-	u8 ssid_len;
-
-	u16 capability;
-	u32 rssi;
-	u32 channel;
-	u16 beaconperiod;
-	__le16 atimwindow;
-
-	/* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
-	u8 mode;
-
-	/* zero-terminated array of supported data rates */
-	u8 rates[MAX_RATES + 1];
-
-	unsigned long last_scanned;
-
-	union ieee_phy_param_set phy;
-	union ieee_ss_param_set ss;
-
-	u8 wpa_ie[MAX_WPA_IE_LEN];
-	size_t wpa_ie_len;
-	u8 rsn_ie[MAX_WPA_IE_LEN];
-	size_t rsn_ie_len;
-
-	u8 mesh;
-
-	struct list_head list;
-};
-
-/** Association request
- *
- * Encapsulates all the options that describe a specific assocation request
- * or configuration of the wireless card's radio, mode, and security settings.
- */
-struct assoc_request {
-#define ASSOC_FLAG_SSID			1
-#define ASSOC_FLAG_CHANNEL		2
-#define ASSOC_FLAG_BAND			3
-#define ASSOC_FLAG_MODE			4
-#define ASSOC_FLAG_BSSID		5
-#define ASSOC_FLAG_WEP_KEYS		6
-#define ASSOC_FLAG_WEP_TX_KEYIDX	7
-#define ASSOC_FLAG_WPA_MCAST_KEY	8
-#define ASSOC_FLAG_WPA_UCAST_KEY	9
-#define ASSOC_FLAG_SECINFO		10
-#define ASSOC_FLAG_WPA_IE		11
-	unsigned long flags;
-
-	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
-	u8 ssid_len;
-	u8 channel;
-	u8 band;
-	u8 mode;
-	u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
-
-	/** WEP keys */
-	struct enc_key wep_keys[4];
-	u16 wep_tx_keyidx;
-
-	/** WPA keys */
-	struct enc_key wpa_mcast_key;
-	struct enc_key wpa_unicast_key;
-
-	struct lbs_802_11_security secinfo;
-
-	/** WPA Information Elements*/
-	u8 wpa_ie[MAX_WPA_IE_LEN];
-	u8 wpa_ie_len;
-
-	/* BSS to associate with for infrastructure of Ad-Hoc join */
-	struct bss_descriptor bss;
-};
-
-
-extern u8 lbs_bg_rates[MAX_RATES];
-
-void lbs_association_worker(struct work_struct *work);
-struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
-
-int lbs_adhoc_stop(struct lbs_private *priv);
-
-int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
-				 u8 bssid[ETH_ALEN], u16 reason);
-
-int lbs_cmd_802_11_rssi(struct lbs_private *priv,
-				struct cmd_ds_command *cmd);
-int lbs_ret_802_11_rssi(struct lbs_private *priv,
-				struct cmd_ds_command *resp);
-
-int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
-				struct cmd_ds_command *cmd,
-				u16 cmd_action);
-int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
-					struct cmd_ds_command *resp);
-
-int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
-			   struct assoc_request *assoc);
-
-int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
-			      uint16_t *enable);
-
-int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
-				struct assoc_request *assoc);
-
-#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 9d5d3cc..f36cc97 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -7,8 +7,12 @@
  */
 
 #include <linux/slab.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
 #include <net/cfg80211.h>
+#include <asm/unaligned.h>
 
+#include "decl.h"
 #include "cfg.h"
 #include "cmd.h"
 
@@ -39,26 +43,27 @@
 	CHAN2G(14, 2484, 0),
 };
 
-#define RATETAB_ENT(_rate, _rateid, _flags) { \
-	.bitrate  = (_rate),                  \
-	.hw_value = (_rateid),                \
-	.flags    = (_flags),                 \
+#define RATETAB_ENT(_rate, _hw_value, _flags) { \
+	.bitrate  = (_rate),                    \
+	.hw_value = (_hw_value),                \
+	.flags    = (_flags),                   \
 }
 
 
+/* Table 6 in section 3.2.1.1 */
 static struct ieee80211_rate lbs_rates[] = {
-	RATETAB_ENT(10,  0x1,   0),
-	RATETAB_ENT(20,  0x2,   0),
-	RATETAB_ENT(55,  0x4,   0),
-	RATETAB_ENT(110, 0x8,   0),
-	RATETAB_ENT(60,  0x10,  0),
-	RATETAB_ENT(90,  0x20,  0),
-	RATETAB_ENT(120, 0x40,  0),
-	RATETAB_ENT(180, 0x80,  0),
-	RATETAB_ENT(240, 0x100, 0),
-	RATETAB_ENT(360, 0x200, 0),
-	RATETAB_ENT(480, 0x400, 0),
-	RATETAB_ENT(540, 0x800, 0),
+	RATETAB_ENT(10,  0,  0),
+	RATETAB_ENT(20,  1,  0),
+	RATETAB_ENT(55,  2,  0),
+	RATETAB_ENT(110, 3,  0),
+	RATETAB_ENT(60,  9,  0),
+	RATETAB_ENT(90,  6,  0),
+	RATETAB_ENT(120, 7,  0),
+	RATETAB_ENT(180, 8,  0),
+	RATETAB_ENT(240, 9,  0),
+	RATETAB_ENT(360, 10, 0),
+	RATETAB_ENT(480, 11, 0),
+	RATETAB_ENT(540, 12, 0),
 };
 
 static struct ieee80211_supported_band lbs_band_2ghz = {
@@ -76,22 +81,639 @@
 	WLAN_CIPHER_SUITE_CCMP,
 };
 
+/* Time to stay on the channel */
+#define LBS_DWELL_PASSIVE 100
+#define LBS_DWELL_ACTIVE  40
 
 
+/***************************************************************************
+ * Misc utility functions
+ *
+ * TLVs are Marvell specific. They are very similar to IEs, they have the
+ * same structure: type, length, data*. The only difference: for IEs, the
+ * type and length are u8, but for TLVs they're __le16.
+ */
+
+/*
+ * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1
+ * in the firmware spec
+ */
+static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
+{
+	int ret = -ENOTSUPP;
+
+	switch (auth_type) {
+	case NL80211_AUTHTYPE_OPEN_SYSTEM:
+	case NL80211_AUTHTYPE_SHARED_KEY:
+		ret = auth_type;
+		break;
+	case NL80211_AUTHTYPE_AUTOMATIC:
+		ret = NL80211_AUTHTYPE_OPEN_SYSTEM;
+		break;
+	case NL80211_AUTHTYPE_NETWORK_EAP:
+		ret = 0x80;
+		break;
+	default:
+		/* silence compiler */
+		break;
+	}
+	return ret;
+}
+
+
+/* Various firmware commands need the list of supported rates, but with
+   the hight-bit set for basic rates */
+static int lbs_add_rates(u8 *rates)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
+		u8 rate = lbs_rates[i].bitrate / 5;
+		if (rate == 0x02 || rate == 0x04 ||
+		    rate == 0x0b || rate == 0x16)
+			rate |= 0x80;
+		rates[i] = rate;
+	}
+	return ARRAY_SIZE(lbs_rates);
+}
+
+
+/***************************************************************************
+ * TLV utility functions
+ *
+ * TLVs are Marvell specific. They are very similar to IEs, they have the
+ * same structure: type, length, data*. The only difference: for IEs, the
+ * type and length are u8, but for TLVs they're __le16.
+ */
+
+
+/*
+ * Add ssid TLV
+ */
+#define LBS_MAX_SSID_TLV_SIZE			\
+	(sizeof(struct mrvl_ie_header)		\
+	 + IEEE80211_MAX_SSID_LEN)
+
+static int lbs_add_ssid_tlv(u8 *tlv, const u8 *ssid, int ssid_len)
+{
+	struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv;
+
+	/*
+	 * TLV-ID SSID  00 00
+	 * length       06 00
+	 * ssid         4d 4e 54 45 53 54
+	 */
+	ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID);
+	ssid_tlv->header.len = cpu_to_le16(ssid_len);
+	memcpy(ssid_tlv->ssid, ssid, ssid_len);
+	return sizeof(ssid_tlv->header) + ssid_len;
+}
+
+
+/*
+ * Add channel list TLV (section 8.4.2)
+ *
+ * Actual channel data comes from priv->wdev->wiphy->channels.
+ */
+#define LBS_MAX_CHANNEL_LIST_TLV_SIZE					\
+	(sizeof(struct mrvl_ie_header)					\
+	 + (LBS_SCAN_BEFORE_NAP * sizeof(struct chanscanparamset)))
+
+static int lbs_add_channel_list_tlv(struct lbs_private *priv, u8 *tlv,
+				    int last_channel, int active_scan)
+{
+	int chanscanparamsize = sizeof(struct chanscanparamset) *
+		(last_channel - priv->scan_channel);
+
+	struct mrvl_ie_header *header = (void *) tlv;
+
+	/*
+	 * TLV-ID CHANLIST  01 01
+	 * length           0e 00
+	 * channel          00 01 00 00 00 64 00
+	 *   radio type     00
+	 *   channel           01
+	 *   scan type            00
+	 *   min scan time           00 00
+	 *   max scan time                 64 00
+	 * channel 2        00 02 00 00 00 64 00
+	 *
+	 */
+
+	header->type = cpu_to_le16(TLV_TYPE_CHANLIST);
+	header->len  = cpu_to_le16(chanscanparamsize);
+	tlv += sizeof(struct mrvl_ie_header);
+
+	/* lbs_deb_scan("scan: channels %d to %d\n", priv->scan_channel,
+		     last_channel); */
+	memset(tlv, 0, chanscanparamsize);
+
+	while (priv->scan_channel < last_channel) {
+		struct chanscanparamset *param = (void *) tlv;
+
+		param->radiotype = CMD_SCAN_RADIO_TYPE_BG;
+		param->channumber =
+			priv->scan_req->channels[priv->scan_channel]->hw_value;
+		if (active_scan) {
+			param->maxscantime = cpu_to_le16(LBS_DWELL_ACTIVE);
+		} else {
+			param->chanscanmode.passivescan = 1;
+			param->maxscantime = cpu_to_le16(LBS_DWELL_PASSIVE);
+		}
+		tlv += sizeof(struct chanscanparamset);
+		priv->scan_channel++;
+	}
+	return sizeof(struct mrvl_ie_header) + chanscanparamsize;
+}
+
+
+/*
+ * Add rates TLV
+ *
+ * The rates are in lbs_bg_rates[], but for the 802.11b
+ * rates the high bit is set. We add this TLV only because
+ * there's a firmware which otherwise doesn't report all
+ * APs in range.
+ */
+#define LBS_MAX_RATES_TLV_SIZE			\
+	(sizeof(struct mrvl_ie_header)		\
+	 + (ARRAY_SIZE(lbs_rates)))
+
+/* Adds a TLV with all rates the hardware supports */
+static int lbs_add_supported_rates_tlv(u8 *tlv)
+{
+	size_t i;
+	struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
+
+	/*
+	 * TLV-ID RATES  01 00
+	 * length        0e 00
+	 * rates         82 84 8b 96 0c 12 18 24 30 48 60 6c
+	 */
+	rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
+	tlv += sizeof(rate_tlv->header);
+	i = lbs_add_rates(tlv);
+	tlv += i;
+	rate_tlv->header.len = cpu_to_le16(i);
+	return sizeof(rate_tlv->header) + i;
+}
+
+
+/*
+ * Adds a TLV with all rates the hardware *and* BSS supports.
+ */
+static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss)
+{
+	struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
+	const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
+	int n;
+
+	/*
+	 * 01 00                   TLV_TYPE_RATES
+	 * 04 00                   len
+	 * 82 84 8b 96             rates
+	 */
+	rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
+	tlv += sizeof(rate_tlv->header);
+
+	if (!rates_eid) {
+		/* Fallback: add basic 802.11b rates */
+		*tlv++ = 0x82;
+		*tlv++ = 0x84;
+		*tlv++ = 0x8b;
+		*tlv++ = 0x96;
+		n = 4;
+	} else {
+		int hw, ap;
+		u8 ap_max = rates_eid[1];
+		n = 0;
+		for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
+			u8 hw_rate = lbs_rates[hw].bitrate / 5;
+			for (ap = 0; ap < ap_max; ap++) {
+				if (hw_rate == (rates_eid[ap+2] & 0x7f)) {
+					*tlv++ = rates_eid[ap+2];
+					n++;
+				}
+			}
+		}
+	}
+
+	rate_tlv->header.len = cpu_to_le16(n);
+	return sizeof(rate_tlv->header) + n;
+}
+
+
+/*
+ * Add auth type TLV.
+ *
+ * This is only needed for newer firmware (V9 and up).
+ */
+#define LBS_MAX_AUTH_TYPE_TLV_SIZE \
+	sizeof(struct mrvl_ie_auth_type)
+
+static int lbs_add_auth_type_tlv(u8 *tlv, enum nl80211_auth_type auth_type)
+{
+	struct mrvl_ie_auth_type *auth = (void *) tlv;
+
+	/*
+	 * 1f 01  TLV_TYPE_AUTH_TYPE
+	 * 01 00  len
+	 * 01     auth type
+	 */
+	auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
+	auth->header.len = cpu_to_le16(sizeof(*auth)-sizeof(auth->header));
+	auth->auth = cpu_to_le16(lbs_auth_to_authtype(auth_type));
+	return sizeof(*auth);
+}
+
+
+/*
+ * Add channel (phy ds) TLV
+ */
+#define LBS_MAX_CHANNEL_TLV_SIZE \
+	sizeof(struct mrvl_ie_header)
+
+static int lbs_add_channel_tlv(u8 *tlv, u8 channel)
+{
+	struct mrvl_ie_ds_param_set *ds = (void *) tlv;
+
+	/*
+	 * 03 00  TLV_TYPE_PHY_DS
+	 * 01 00  len
+	 * 06     channel
+	 */
+	ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
+	ds->header.len = cpu_to_le16(sizeof(*ds)-sizeof(ds->header));
+	ds->channel = channel;
+	return sizeof(*ds);
+}
+
+
+/*
+ * Add (empty) CF param TLV of the form:
+ */
+#define LBS_MAX_CF_PARAM_TLV_SIZE		\
+	sizeof(struct mrvl_ie_header)
+
+static int lbs_add_cf_param_tlv(u8 *tlv)
+{
+	struct mrvl_ie_cf_param_set *cf = (void *)tlv;
+
+	/*
+	 * 04 00  TLV_TYPE_CF
+	 * 06 00  len
+	 * 00     cfpcnt
+	 * 00     cfpperiod
+	 * 00 00  cfpmaxduration
+	 * 00 00  cfpdurationremaining
+	 */
+	cf->header.type = cpu_to_le16(TLV_TYPE_CF);
+	cf->header.len = cpu_to_le16(sizeof(*cf)-sizeof(cf->header));
+	return sizeof(*cf);
+}
+
+/*
+ * Add WPA TLV
+ */
+#define LBS_MAX_WPA_TLV_SIZE			\
+	(sizeof(struct mrvl_ie_header)		\
+	 + 128 /* TODO: I guessed the size */)
+
+static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
+{
+	size_t tlv_len;
+
+	/*
+	 * We need just convert an IE to an TLV. IEs use u8 for the header,
+	 *   u8      type
+	 *   u8      len
+	 *   u8[]    data
+	 * but TLVs use __le16 instead:
+	 *   __le16  type
+	 *   __le16  len
+	 *   u8[]    data
+	 */
+	*tlv++ = *ie++;
+	*tlv++ = 0;
+	tlv_len = *tlv++ = *ie++;
+	*tlv++ = 0;
+	while (tlv_len--)
+		*tlv++ = *ie++;
+	/* the TLV is two bytes larger than the IE */
+	return ie_len + 2;
+}
+
+/***************************************************************************
+ * Set Channel
+ */
+
 static int lbs_cfg_set_channel(struct wiphy *wiphy,
 	struct net_device *netdev,
-	struct ieee80211_channel *chan,
+	struct ieee80211_channel *channel,
 	enum nl80211_channel_type channel_type)
 {
 	struct lbs_private *priv = wiphy_priv(wiphy);
 	int ret = -ENOTSUPP;
 
-	lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", chan->center_freq, channel_type);
+	lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
+			   channel->center_freq, channel_type);
 
 	if (channel_type != NL80211_CHAN_NO_HT)
 		goto out;
 
-	ret = lbs_set_channel(priv, chan->hw_value);
+	ret = lbs_set_channel(priv, channel->hw_value);
+
+ out:
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+
+/***************************************************************************
+ * Scanning
+ */
+
+/*
+ * When scanning, the firmware doesn't send a nul packet with the power-safe
+ * bit to the AP. So we cannot stay away from our current channel too long,
+ * otherwise we loose data. So take a "nap" while scanning every other
+ * while.
+ */
+#define LBS_SCAN_BEFORE_NAP 4
+
+
+/*
+ * When the firmware reports back a scan-result, it gives us an "u8 rssi",
+ * which isn't really an RSSI, as it becomes larger when moving away from
+ * the AP. Anyway, we need to convert that into mBm.
+ */
+#define LBS_SCAN_RSSI_TO_MBM(rssi) \
+	((-(int)rssi + 3)*100)
+
+static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
+	struct cmd_header *resp)
+{
+	struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
+	int bsssize;
+	const u8 *pos;
+	u16 nr_sets;
+	const u8 *tsfdesc;
+	int tsfsize;
+	int i;
+	int ret = -EILSEQ;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	bsssize = get_unaligned_le16(&scanresp->bssdescriptsize);
+	nr_sets = le16_to_cpu(resp->size);
+
+	/*
+	 * The general layout of the scan response is described in chapter
+	 * 5.7.1. Basically we have a common part, then any number of BSS
+	 * descriptor sections. Finally we have section with the same number
+	 * of TSFs.
+	 *
+	 * cmd_ds_802_11_scan_rsp
+	 *   cmd_header
+	 *   pos_size
+	 *   nr_sets
+	 *   bssdesc 1
+	 *     bssid
+	 *     rssi
+	 *     timestamp
+	 *     intvl
+	 *     capa
+	 *     IEs
+	 *   bssdesc 2
+	 *   bssdesc n
+	 *   MrvlIEtypes_TsfFimestamp_t
+	 *     TSF for BSS 1
+	 *     TSF for BSS 2
+	 *     TSF for BSS n
+	 */
+
+	pos = scanresp->bssdesc_and_tlvbuffer;
+
+	tsfdesc = pos + bsssize;
+	tsfsize = 4 + 8 * scanresp->nr_sets;
+
+	/* Validity check: we expect a Marvell-Local TLV */
+	i = get_unaligned_le16(tsfdesc);
+	tsfdesc += 2;
+	if (i != TLV_TYPE_TSFTIMESTAMP)
+		goto done;
+	/* Validity check: the TLV holds TSF values with 8 bytes each, so
+	 * the size in the TLV must match the nr_sets value */
+	i = get_unaligned_le16(tsfdesc);
+	tsfdesc += 2;
+	if (i / 8 != scanresp->nr_sets)
+		goto done;
+
+	for (i = 0; i < scanresp->nr_sets; i++) {
+		const u8 *bssid;
+		const u8 *ie;
+		int left;
+		int ielen;
+		int rssi;
+		u16 intvl;
+		u16 capa;
+		int chan_no = -1;
+		const u8 *ssid = NULL;
+		u8 ssid_len = 0;
+		DECLARE_SSID_BUF(ssid_buf);
+
+		int len = get_unaligned_le16(pos);
+		pos += 2;
+
+		/* BSSID */
+		bssid = pos;
+		pos += ETH_ALEN;
+		/* RSSI */
+		rssi = *pos++;
+		/* Packet time stamp */
+		pos += 8;
+		/* Beacon interval */
+		intvl = get_unaligned_le16(pos);
+		pos += 2;
+		/* Capabilities */
+		capa = get_unaligned_le16(pos);
+		pos += 2;
+
+		/* To find out the channel, we must parse the IEs */
+		ie = pos;
+		/* 6+1+8+2+2: size of BSSID, RSSI, time stamp, beacon
+		   interval, capabilities */
+		ielen = left = len - (6 + 1 + 8 + 2 + 2);
+		while (left >= 2) {
+			u8 id, elen;
+			id = *pos++;
+			elen = *pos++;
+			left -= 2;
+			if (elen > left || elen == 0)
+				goto done;
+			if (id == WLAN_EID_DS_PARAMS)
+				chan_no = *pos;
+			if (id == WLAN_EID_SSID) {
+				ssid = pos;
+				ssid_len = elen;
+			}
+			left -= elen;
+			pos += elen;
+		}
+
+		/* No channel, no luck */
+		if (chan_no != -1) {
+			struct wiphy *wiphy = priv->wdev->wiphy;
+			int freq = ieee80211_channel_to_frequency(chan_no);
+			struct ieee80211_channel *channel =
+				ieee80211_get_channel(wiphy, freq);
+
+			lbs_deb_scan("scan: %pM, capa %04x, chan %2d, %s, "
+				     "%d dBm\n",
+				     bssid, capa, chan_no,
+				     print_ssid(ssid_buf, ssid, ssid_len),
+				     LBS_SCAN_RSSI_TO_MBM(rssi)/100);
+
+			if (channel ||
+			    !(channel->flags & IEEE80211_CHAN_DISABLED))
+				cfg80211_inform_bss(wiphy, channel,
+					bssid, le64_to_cpu(*(__le64 *)tsfdesc),
+					capa, intvl, ie, ielen,
+					LBS_SCAN_RSSI_TO_MBM(rssi),
+					GFP_KERNEL);
+		}
+		tsfdesc += 8;
+	}
+	ret = 0;
+
+ done:
+	lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
+	return ret;
+}
+
+
+/*
+ * Our scan command contains a TLV, consting of a SSID TLV, a channel list
+ * TLV and a rates TLV. Determine the maximum size of them:
+ */
+#define LBS_SCAN_MAX_CMD_SIZE			\
+	(sizeof(struct cmd_ds_802_11_scan)	\
+	 + LBS_MAX_SSID_TLV_SIZE		\
+	 + LBS_MAX_CHANNEL_LIST_TLV_SIZE	\
+	 + LBS_MAX_RATES_TLV_SIZE)
+
+/*
+ * Assumes priv->scan_req is initialized and valid
+ * Assumes priv->scan_channel is initialized
+ */
+static void lbs_scan_worker(struct work_struct *work)
+{
+	struct lbs_private *priv =
+		container_of(work, struct lbs_private, scan_work.work);
+	struct cmd_ds_802_11_scan *scan_cmd;
+	u8 *tlv; /* pointer into our current, growing TLV storage area */
+	int last_channel;
+	int running, carrier;
+
+	lbs_deb_enter(LBS_DEB_SCAN);
+
+	scan_cmd = kzalloc(LBS_SCAN_MAX_CMD_SIZE, GFP_KERNEL);
+	if (scan_cmd == NULL)
+		goto out_no_scan_cmd;
+
+	/* prepare fixed part of scan command */
+	scan_cmd->bsstype = CMD_BSS_TYPE_ANY;
+
+	/* stop network while we're away from our main channel */
+	running = !netif_queue_stopped(priv->dev);
+	carrier = netif_carrier_ok(priv->dev);
+	if (running)
+		netif_stop_queue(priv->dev);
+	if (carrier)
+		netif_carrier_off(priv->dev);
+
+	/* prepare fixed part of scan command */
+	tlv = scan_cmd->tlvbuffer;
+
+	/* add SSID TLV */
+	if (priv->scan_req->n_ssids)
+		tlv += lbs_add_ssid_tlv(tlv,
+					priv->scan_req->ssids[0].ssid,
+					priv->scan_req->ssids[0].ssid_len);
+
+	/* add channel TLVs */
+	last_channel = priv->scan_channel + LBS_SCAN_BEFORE_NAP;
+	if (last_channel > priv->scan_req->n_channels)
+		last_channel = priv->scan_req->n_channels;
+	tlv += lbs_add_channel_list_tlv(priv, tlv, last_channel,
+		priv->scan_req->n_ssids);
+
+	/* add rates TLV */
+	tlv += lbs_add_supported_rates_tlv(tlv);
+
+	if (priv->scan_channel < priv->scan_req->n_channels) {
+		cancel_delayed_work(&priv->scan_work);
+		queue_delayed_work(priv->work_thread, &priv->scan_work,
+			msecs_to_jiffies(300));
+	}
+
+	/* This is the final data we are about to send */
+	scan_cmd->hdr.size = cpu_to_le16(tlv - (u8 *)scan_cmd);
+	lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd,
+		    sizeof(*scan_cmd));
+	lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer,
+		    tlv - scan_cmd->tlvbuffer);
+
+	__lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr,
+		le16_to_cpu(scan_cmd->hdr.size),
+		lbs_ret_scan, 0);
+
+	if (priv->scan_channel >= priv->scan_req->n_channels) {
+		/* Mark scan done */
+		cfg80211_scan_done(priv->scan_req, false);
+		priv->scan_req = NULL;
+	}
+
+	/* Restart network */
+	if (carrier)
+		netif_carrier_on(priv->dev);
+	if (running && !priv->tx_pending_len)
+		netif_wake_queue(priv->dev);
+
+	kfree(scan_cmd);
+
+ out_no_scan_cmd:
+	lbs_deb_leave(LBS_DEB_SCAN);
+}
+
+
+static int lbs_cfg_scan(struct wiphy *wiphy,
+	struct net_device *dev,
+	struct cfg80211_scan_request *request)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	if (priv->scan_req || delayed_work_pending(&priv->scan_work)) {
+		/* old scan request not yet processed */
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n",
+		request->n_ssids, request->n_channels, request->ie_len);
+
+	priv->scan_channel = 0;
+	queue_delayed_work(priv->work_thread, &priv->scan_work,
+		msecs_to_jiffies(50));
+
+	if (priv->surpriseremoved)
+		ret = -EIO;
+
+	priv->scan_req = request;
 
  out:
 	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -101,8 +723,1228 @@
 
 
 
+/***************************************************************************
+ * Events
+ */
+
+void lbs_send_disconnect_notification(struct lbs_private *priv)
+{
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	cfg80211_disconnected(priv->dev,
+		0,
+		NULL, 0,
+		GFP_KERNEL);
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
+{
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	cfg80211_michael_mic_failure(priv->dev,
+		priv->assoc_bss,
+		event == MACREG_INT_CODE_MIC_ERR_MULTICAST ?
+			NL80211_KEYTYPE_GROUP :
+			NL80211_KEYTYPE_PAIRWISE,
+		-1,
+		NULL,
+		GFP_KERNEL);
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+
+
+
+/***************************************************************************
+ * Connect/disconnect
+ */
+
+
+/*
+ * This removes all WEP keys
+ */
+static int lbs_remove_wep_keys(struct lbs_private *priv)
+{
+	struct cmd_ds_802_11_set_wep cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.keyindex = cpu_to_le16(priv->wep_tx_key);
+	cmd.action = cpu_to_le16(CMD_ACT_REMOVE);
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+	return ret;
+}
+
+/*
+ * Set WEP keys
+ */
+static int lbs_set_wep_keys(struct lbs_private *priv)
+{
+	struct cmd_ds_802_11_set_wep cmd;
+	int i;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	/*
+	 * command         13 00
+	 * size            50 00
+	 * sequence        xx xx
+	 * result          00 00
+	 * action          02 00     ACT_ADD
+	 * transmit key    00 00
+	 * type for key 1  01        WEP40
+	 * type for key 2  00
+	 * type for key 3  00
+	 * type for key 4  00
+	 * key 1           39 39 39 39 39 00 00 00
+	 *                 00 00 00 00 00 00 00 00
+	 * key 2           00 00 00 00 00 00 00 00
+	 *                 00 00 00 00 00 00 00 00
+	 * key 3           00 00 00 00 00 00 00 00
+	 *                 00 00 00 00 00 00 00 00
+	 * key 4           00 00 00 00 00 00 00 00
+	 */
+	if (priv->wep_key_len[0] || priv->wep_key_len[1] ||
+	    priv->wep_key_len[2] || priv->wep_key_len[3]) {
+		/* Only set wep keys if we have at least one of them */
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+		cmd.keyindex = cpu_to_le16(priv->wep_tx_key);
+		cmd.action = cpu_to_le16(CMD_ACT_ADD);
+
+		for (i = 0; i < 4; i++) {
+			switch (priv->wep_key_len[i]) {
+			case WLAN_KEY_LEN_WEP40:
+				cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
+				break;
+			case WLAN_KEY_LEN_WEP104:
+				cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
+				break;
+			default:
+				cmd.keytype[i] = 0;
+				break;
+			}
+			memcpy(cmd.keymaterial[i], priv->wep_key[i],
+			       priv->wep_key_len[i]);
+		}
+
+		ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
+	} else {
+		/* Otherwise remove all wep keys */
+		ret = lbs_remove_wep_keys(priv);
+	}
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+	return ret;
+}
+
+
+/*
+ * Enable/Disable RSN status
+ */
+static int lbs_enable_rsn(struct lbs_private *priv, int enable)
+{
+	struct cmd_ds_802_11_enable_rsn cmd;
+	int ret;
+
+	lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", enable);
+
+	/*
+	 * cmd       2f 00
+	 * size      0c 00
+	 * sequence  xx xx
+	 * result    00 00
+	 * action    01 00    ACT_SET
+	 * enable    01 00
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+	cmd.enable = cpu_to_le16(enable);
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+	return ret;
+}
+
+
+/*
+ * Set WPA/WPA key material
+ */
+
+/* like "struct cmd_ds_802_11_key_material", but with cmd_header. Once we
+ * get rid of WEXT, this should go into host.h */
+
+struct cmd_key_material {
+	struct cmd_header hdr;
+
+	__le16 action;
+	struct MrvlIEtype_keyParamSet param;
+} __attribute__ ((packed));
+
+static int lbs_set_key_material(struct lbs_private *priv,
+				int key_type,
+				int key_info,
+				u8 *key, u16 key_len)
+{
+	struct cmd_key_material cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	/*
+	 * Example for WPA (TKIP):
+	 *
+	 * cmd       5e 00
+	 * size      34 00
+	 * sequence  xx xx
+	 * result    00 00
+	 * action    01 00
+	 * TLV type  00 01    key param
+	 * length    00 26
+	 * key type  01 00    TKIP
+	 * key info  06 00    UNICAST | ENABLED
+	 * key len   20 00
+	 * key       32 bytes
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+	cmd.param.type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
+	cmd.param.length = cpu_to_le16(sizeof(cmd.param) - 4);
+	cmd.param.keytypeid = cpu_to_le16(key_type);
+	cmd.param.keyinfo = cpu_to_le16(key_info);
+	cmd.param.keylen = cpu_to_le16(key_len);
+	if (key && key_len)
+		memcpy(cmd.param.key, key, key_len);
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+	return ret;
+}
+
+
+/*
+ * Sets the auth type (open, shared, etc) in the firmware. That
+ * we use CMD_802_11_AUTHENTICATE is misleading, this firmware
+ * command doesn't send an authentication frame at all, it just
+ * stores the auth_type.
+ */
+static int lbs_set_authtype(struct lbs_private *priv,
+			    struct cfg80211_connect_params *sme)
+{
+	struct cmd_ds_802_11_authenticate cmd;
+	int ret;
+
+	lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", sme->auth_type);
+
+	/*
+	 * cmd        11 00
+	 * size       19 00
+	 * sequence   xx xx
+	 * result     00 00
+	 * BSS id     00 13 19 80 da 30
+	 * auth type  00
+	 * reserved   00 00 00 00 00 00 00 00 00 00
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	if (sme->bssid)
+		memcpy(cmd.bssid, sme->bssid, ETH_ALEN);
+	/* convert auth_type */
+	ret = lbs_auth_to_authtype(sme->auth_type);
+	if (ret < 0)
+		goto done;
+
+	cmd.authtype = ret;
+	ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd);
+
+ done:
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+/*
+ * Create association request
+ */
+#define LBS_ASSOC_MAX_CMD_SIZE                     \
+	(sizeof(struct cmd_ds_802_11_associate)    \
+	 - 512 /* cmd_ds_802_11_associate.iebuf */ \
+	 + LBS_MAX_SSID_TLV_SIZE                   \
+	 + LBS_MAX_CHANNEL_TLV_SIZE                \
+	 + LBS_MAX_CF_PARAM_TLV_SIZE               \
+	 + LBS_MAX_AUTH_TYPE_TLV_SIZE              \
+	 + LBS_MAX_WPA_TLV_SIZE)
+
+static int lbs_associate(struct lbs_private *priv,
+		struct cfg80211_bss *bss,
+		struct cfg80211_connect_params *sme)
+{
+	struct cmd_ds_802_11_associate_response *resp;
+	struct cmd_ds_802_11_associate *cmd = kzalloc(LBS_ASSOC_MAX_CMD_SIZE,
+						      GFP_KERNEL);
+	const u8 *ssid_eid;
+	size_t len, resp_ie_len;
+	int status;
+	int ret;
+	u8 *pos = &(cmd->iebuf[0]);
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	if (!cmd) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	/*
+	 * cmd              50 00
+	 * length           34 00
+	 * sequence         xx xx
+	 * result           00 00
+	 * BSS id           00 13 19 80 da 30
+	 * capabilities     11 00
+	 * listen interval  0a 00
+	 * beacon interval  00 00
+	 * DTIM period      00
+	 * TLVs             xx   (up to 512 bytes)
+	 */
+	cmd->hdr.command = cpu_to_le16(CMD_802_11_ASSOCIATE);
+
+	/* Fill in static fields */
+	memcpy(cmd->bssid, bss->bssid, ETH_ALEN);
+	cmd->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
+	cmd->capability = cpu_to_le16(bss->capability);
+
+	/* add SSID TLV */
+	ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+	if (ssid_eid)
+		pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]);
+	else
+		lbs_deb_assoc("no SSID\n");
+
+	/* add DS param TLV */
+	if (bss->channel)
+		pos += lbs_add_channel_tlv(pos, bss->channel->hw_value);
+	else
+		lbs_deb_assoc("no channel\n");
+
+	/* add (empty) CF param TLV */
+	pos += lbs_add_cf_param_tlv(pos);
+
+	/* add rates TLV */
+	pos += lbs_add_common_rates_tlv(pos, bss);
+
+	/* add auth type TLV */
+	if (priv->fwrelease >= 0x09000000)
+		pos += lbs_add_auth_type_tlv(pos, sme->auth_type);
+
+	/* add WPA/WPA2 TLV */
+	if (sme->ie && sme->ie_len)
+		pos += lbs_add_wpa_tlv(pos, sme->ie, sme->ie_len);
+
+	len = (sizeof(*cmd) - sizeof(cmd->iebuf)) +
+		(u16)(pos - (u8 *) &cmd->iebuf);
+	cmd->hdr.size = cpu_to_le16(len);
+
+	/* store for later use */
+	memcpy(priv->assoc_bss, bss->bssid, ETH_ALEN);
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_ASSOCIATE, cmd);
+	if (ret)
+		goto done;
+
+
+	/* generate connect message to cfg80211 */
+
+	resp = (void *) cmd; /* recast for easier field access */
+	status = le16_to_cpu(resp->statuscode);
+
+	/* Convert statis code of old firmware */
+	if (priv->fwrelease < 0x09000000)
+		switch (status) {
+		case 0:
+			break;
+		case 1:
+			lbs_deb_assoc("invalid association parameters\n");
+			status = WLAN_STATUS_CAPS_UNSUPPORTED;
+			break;
+		case 2:
+			lbs_deb_assoc("timer expired while waiting for AP\n");
+			status = WLAN_STATUS_AUTH_TIMEOUT;
+			break;
+		case 3:
+			lbs_deb_assoc("association refused by AP\n");
+			status = WLAN_STATUS_ASSOC_DENIED_UNSPEC;
+			break;
+		case 4:
+			lbs_deb_assoc("authentication refused by AP\n");
+			status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
+			break;
+		default:
+			lbs_deb_assoc("association failure %d\n", status);
+			status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+	}
+
+	lbs_deb_assoc("status %d, capability 0x%04x\n", status,
+		      le16_to_cpu(resp->capability));
+
+	resp_ie_len = le16_to_cpu(resp->hdr.size)
+		- sizeof(resp->hdr)
+		- 6;
+	cfg80211_connect_result(priv->dev,
+				priv->assoc_bss,
+				sme->ie, sme->ie_len,
+				resp->iebuf, resp_ie_len,
+				status,
+				GFP_KERNEL);
+
+	if (status == 0) {
+		/* TODO: get rid of priv->connect_status */
+		priv->connect_status = LBS_CONNECTED;
+		netif_carrier_on(priv->dev);
+		if (!priv->tx_pending_len)
+			netif_tx_wake_all_queues(priv->dev);
+	}
+
+
+done:
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+
+static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
+			   struct cfg80211_connect_params *sme)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	struct cfg80211_bss *bss = NULL;
+	int ret = 0;
+	u8 preamble = RADIO_PREAMBLE_SHORT;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	if (sme->bssid) {
+		bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+			sme->ssid, sme->ssid_len,
+			WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+	} else {
+		/*
+		 * Here we have an impedance mismatch. The firmware command
+		 * CMD_802_11_ASSOCIATE always needs a BSSID, it cannot
+		 * connect otherwise. However, for the connect-API of
+		 * cfg80211 the bssid is purely optional. We don't get one,
+		 * except the user specifies one on the "iw" command line.
+		 *
+		 * If we don't got one, we could initiate a scan and look
+		 * for the best matching cfg80211_bss entry.
+		 *
+		 * Or, better yet, net/wireless/sme.c get's rewritten into
+		 * something more generally useful.
+		 */
+		lbs_pr_err("TODO: no BSS specified\n");
+		ret = -ENOTSUPP;
+		goto done;
+	}
+
+
+	if (!bss) {
+		lbs_pr_err("assicate: bss %pM not in scan results\n",
+			   sme->bssid);
+		ret = -ENOENT;
+		goto done;
+	}
+	lbs_deb_assoc("trying %pM", sme->bssid);
+	lbs_deb_assoc("cipher 0x%x, key index %d, key len %d\n",
+		      sme->crypto.cipher_group,
+		      sme->key_idx, sme->key_len);
+
+	/* As this is a new connection, clear locally stored WEP keys */
+	priv->wep_tx_key = 0;
+	memset(priv->wep_key, 0, sizeof(priv->wep_key));
+	memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
+
+	/* set/remove WEP keys */
+	switch (sme->crypto.cipher_group) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		/* Store provided WEP keys in priv-> */
+		priv->wep_tx_key = sme->key_idx;
+		priv->wep_key_len[sme->key_idx] = sme->key_len;
+		memcpy(priv->wep_key[sme->key_idx], sme->key, sme->key_len);
+		/* Set WEP keys and WEP mode */
+		lbs_set_wep_keys(priv);
+		priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE;
+		lbs_set_mac_control(priv);
+		/* No RSN mode for WEP */
+		lbs_enable_rsn(priv, 0);
+		break;
+	case 0: /* there's no WLAN_CIPHER_SUITE_NONE definition */
+		/*
+		 * If we don't have no WEP, no WPA and no WPA2,
+		 * we remove all keys like in the WPA/WPA2 setup,
+		 * we just don't set RSN.
+		 *
+		 * Therefore: fall-throught
+		 */
+	case WLAN_CIPHER_SUITE_TKIP:
+	case WLAN_CIPHER_SUITE_CCMP:
+		/* Remove WEP keys and WEP mode */
+		lbs_remove_wep_keys(priv);
+		priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE;
+		lbs_set_mac_control(priv);
+
+		/* clear the WPA/WPA2 keys */
+		lbs_set_key_material(priv,
+			KEY_TYPE_ID_WEP, /* doesn't matter */
+			KEY_INFO_WPA_UNICAST,
+			NULL, 0);
+		lbs_set_key_material(priv,
+			KEY_TYPE_ID_WEP, /* doesn't matter */
+			KEY_INFO_WPA_MCAST,
+			NULL, 0);
+		/* RSN mode for WPA/WPA2 */
+		lbs_enable_rsn(priv, sme->crypto.cipher_group != 0);
+		break;
+	default:
+		lbs_pr_err("unsupported cipher group 0x%x\n",
+			   sme->crypto.cipher_group);
+		ret = -ENOTSUPP;
+		goto done;
+	}
+
+	lbs_set_authtype(priv, sme);
+	lbs_set_radio(priv, preamble, 1);
+
+	/* Do the actual association */
+	lbs_associate(priv, bss, sme);
+
+ done:
+	if (bss)
+		cfg80211_put_bss(bss);
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	struct cmd_ds_802_11_deauthenticate cmd;
+
+	lbs_deb_enter_args(LBS_DEB_CFG80211, "reason_code %d", reason_code);
+
+	/* store for lbs_cfg_ret_disconnect() */
+	priv->disassoc_reason = reason_code;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	/* Mildly ugly to use a locally store my own BSSID ... */
+	memcpy(cmd.macaddr, &priv->assoc_bss, ETH_ALEN);
+	cmd.reasoncode = cpu_to_le16(reason_code);
+
+	if (lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd))
+		return -EFAULT;
+
+	cfg80211_disconnected(priv->dev,
+			priv->disassoc_reason,
+			NULL, 0,
+			GFP_KERNEL);
+	priv->connect_status = LBS_DISCONNECTED;
+
+	return 0;
+}
+
+
+static int lbs_cfg_set_default_key(struct wiphy *wiphy,
+				   struct net_device *netdev,
+				   u8 key_index)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	if (key_index != priv->wep_tx_key) {
+		lbs_deb_assoc("set_default_key: to %d\n", key_index);
+		priv->wep_tx_key = key_index;
+		lbs_set_wep_keys(priv);
+	}
+
+	return 0;
+}
+
+
+static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
+			   u8 idx, const u8 *mac_addr,
+			   struct key_params *params)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	u16 key_info;
+	u16 key_type;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	lbs_deb_assoc("add_key: cipher 0x%x, mac_addr %pM\n",
+		      params->cipher, mac_addr);
+	lbs_deb_assoc("add_key: key index %d, key len %d\n",
+		      idx, params->key_len);
+	if (params->key_len)
+		lbs_deb_hex(LBS_DEB_CFG80211, "KEY",
+			    params->key, params->key_len);
+
+	lbs_deb_assoc("add_key: seq len %d\n", params->seq_len);
+	if (params->seq_len)
+		lbs_deb_hex(LBS_DEB_CFG80211, "SEQ",
+			    params->seq, params->seq_len);
+
+	switch (params->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		/* actually compare if something has changed ... */
+		if ((priv->wep_key_len[idx] != params->key_len) ||
+			memcmp(priv->wep_key[idx],
+			       params->key, params->key_len) != 0) {
+			priv->wep_key_len[idx] = params->key_len;
+			memcpy(priv->wep_key[idx],
+			       params->key, params->key_len);
+			lbs_set_wep_keys(priv);
+		}
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+	case WLAN_CIPHER_SUITE_CCMP:
+		key_info = KEY_INFO_WPA_ENABLED | ((idx == 0)
+						   ? KEY_INFO_WPA_UNICAST
+						   : KEY_INFO_WPA_MCAST);
+		key_type = (params->cipher == WLAN_CIPHER_SUITE_TKIP)
+			? KEY_TYPE_ID_TKIP
+			: KEY_TYPE_ID_AES;
+		lbs_set_key_material(priv,
+				     key_type,
+				     key_info,
+				     params->key, params->key_len);
+		break;
+	default:
+		lbs_pr_err("unhandled cipher 0x%x\n", params->cipher);
+		ret = -ENOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
+
+static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
+			   u8 key_index, const u8 *mac_addr)
+{
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	lbs_deb_assoc("del_key: key_idx %d, mac_addr %pM\n",
+		      key_index, mac_addr);
+
+#ifdef TODO
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	/*
+	 * I think can keep this a NO-OP, because:
+
+	 * - we clear all keys whenever we do lbs_cfg_connect() anyway
+	 * - neither "iw" nor "wpa_supplicant" won't call this during
+	 *   an ongoing connection
+	 * - TODO: but I have to check if this is still true when
+	 *   I set the AP to periodic re-keying
+	 * - we've not kzallec() something when we've added a key at
+	 *   lbs_cfg_connect() or lbs_cfg_add_key().
+	 *
+	 * This causes lbs_cfg_del_key() only called at disconnect time,
+	 * where we'd just waste time deleting a key that is not going
+	 * to be used anyway.
+	 */
+	if (key_index < 3 && priv->wep_key_len[key_index]) {
+		priv->wep_key_len[key_index] = 0;
+		lbs_set_wep_keys(priv);
+	}
+#endif
+
+	return 0;
+}
+
+
+
+/***************************************************************************
+ * Monitor mode
+ */
+
+/* like "struct cmd_ds_802_11_monitor_mode", but with cmd_header. Once we
+ * get rid of WEXT, this should go into host.h */
+struct cmd_monitor_mode {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 mode;
+} __attribute__ ((packed));
+
+static int lbs_enable_monitor_mode(struct lbs_private *priv, int mode)
+{
+	struct cmd_monitor_mode cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	/*
+	 * cmd       98 00
+	 * size      0c 00
+	 * sequence  xx xx
+	 * result    00 00
+	 * action    01 00    ACT_SET
+	 * enable    01 00
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(CMD_ACT_SET);
+	cmd.mode = cpu_to_le16(mode);
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_MONITOR_MODE, &cmd);
+
+	if (ret == 0)
+		priv->dev->type = ARPHRD_IEEE80211_RADIOTAP;
+	else
+		priv->dev->type = ARPHRD_ETHER;
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+	return ret;
+}
+
+
+
+
+
+
+/***************************************************************************
+ * Get station
+ */
+
+/*
+ * Returns the signal or 0 in case of an error.
+ */
+
+/* like "struct cmd_ds_802_11_rssi", but with cmd_header. Once we get rid
+ * of WEXT, this should go into host.h */
+struct cmd_rssi {
+	struct cmd_header hdr;
+
+	__le16 n_or_snr;
+	__le16 nf;
+	__le16 avg_snr;
+	__le16 avg_nf;
+} __attribute__ ((packed));
+
+static int lbs_get_signal(struct lbs_private *priv, s8 *signal, s8 *noise)
+{
+	struct cmd_rssi cmd;
+	int ret;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.n_or_snr = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
+	ret = lbs_cmd_with_response(priv, CMD_802_11_RSSI, &cmd);
+
+	if (ret == 0) {
+		*signal = CAL_RSSI(le16_to_cpu(cmd.n_or_snr),
+				le16_to_cpu(cmd.nf));
+		*noise  = CAL_NF(le16_to_cpu(cmd.nf));
+	}
+	return ret;
+}
+
+
+static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
+			      u8 *mac, struct station_info *sinfo)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	s8 signal, noise;
+	int ret;
+	size_t i;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	sinfo->filled |= STATION_INFO_TX_BYTES |
+			 STATION_INFO_TX_PACKETS |
+			 STATION_INFO_RX_BYTES |
+			 STATION_INFO_RX_PACKETS;
+	sinfo->tx_bytes = priv->dev->stats.tx_bytes;
+	sinfo->tx_packets = priv->dev->stats.tx_packets;
+	sinfo->rx_bytes = priv->dev->stats.rx_bytes;
+	sinfo->rx_packets = priv->dev->stats.rx_packets;
+
+	/* Get current RSSI */
+	ret = lbs_get_signal(priv, &signal, &noise);
+	if (ret == 0) {
+		sinfo->signal = signal;
+		sinfo->filled |= STATION_INFO_SIGNAL;
+	}
+
+	/* Convert priv->cur_rate from hw_value to NL80211 value */
+	for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
+		if (priv->cur_rate == lbs_rates[i].hw_value) {
+			sinfo->txrate.legacy = lbs_rates[i].bitrate;
+			sinfo->filled |= STATION_INFO_TX_BITRATE;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+
+
+
+/***************************************************************************
+ * "Site survey", here just current channel and noise level
+ */
+
+static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
+	int idx, struct survey_info *survey)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	s8 signal, noise;
+	int ret;
+
+	if (idx != 0)
+		ret = -ENOENT;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	survey->channel = ieee80211_get_channel(wiphy,
+		ieee80211_channel_to_frequency(priv->channel));
+
+	ret = lbs_get_signal(priv, &signal, &noise);
+	if (ret == 0) {
+		survey->filled = SURVEY_INFO_NOISE_DBM;
+		survey->noise = noise;
+	}
+
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+
+
+/***************************************************************************
+ * Change interface
+ */
+
+static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev,
+	enum nl80211_iftype type, u32 *flags,
+	       struct vif_params *params)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	switch (type) {
+	case NL80211_IFTYPE_MONITOR:
+		ret = lbs_enable_monitor_mode(priv, 1);
+		break;
+	case NL80211_IFTYPE_STATION:
+		if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
+			ret = lbs_enable_monitor_mode(priv, 0);
+		if (!ret)
+			ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 1);
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
+			ret = lbs_enable_monitor_mode(priv, 0);
+		if (!ret)
+			ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 2);
+		break;
+	default:
+		ret = -ENOTSUPP;
+	}
+
+	if (!ret)
+		priv->wdev->iftype = type;
+
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+
+/***************************************************************************
+ * IBSS (Ad-Hoc)
+ */
+
+/* The firmware needs the following bits masked out of the beacon-derived
+ * capability field when associating/joining to a BSS:
+ *  9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused)
+ */
+#define CAPINFO_MASK (~(0xda00))
+
+
+static void lbs_join_post(struct lbs_private *priv,
+			  struct cfg80211_ibss_params *params,
+			  u8 *bssid, u16 capability)
+{
+	u8 fake_ie[2 + IEEE80211_MAX_SSID_LEN + /* ssid */
+		   2 + 4 +                      /* basic rates */
+		   2 + 1 +                      /* DS parameter */
+		   2 + 2 +                      /* atim */
+		   2 + 8];                      /* extended rates */
+	u8 *fake = fake_ie;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	/*
+	 * For cfg80211_inform_bss, we'll need a fake IE, as we can't get
+	 * the real IE from the firmware. So we fabricate a fake IE based on
+	 * what the firmware actually sends (sniffed with wireshark).
+	 */
+	/* Fake SSID IE */
+	*fake++ = WLAN_EID_SSID;
+	*fake++ = params->ssid_len;
+	memcpy(fake, params->ssid, params->ssid_len);
+	fake += params->ssid_len;
+	/* Fake supported basic rates IE */
+	*fake++ = WLAN_EID_SUPP_RATES;
+	*fake++ = 4;
+	*fake++ = 0x82;
+	*fake++ = 0x84;
+	*fake++ = 0x8b;
+	*fake++ = 0x96;
+	/* Fake DS channel IE */
+	*fake++ = WLAN_EID_DS_PARAMS;
+	*fake++ = 1;
+	*fake++ = params->channel->hw_value;
+	/* Fake IBSS params IE */
+	*fake++ = WLAN_EID_IBSS_PARAMS;
+	*fake++ = 2;
+	*fake++ = 0; /* ATIM=0 */
+	*fake++ = 0;
+	/* Fake extended rates IE, TODO: don't add this for 802.11b only,
+	 * but I don't know how this could be checked */
+	*fake++ = WLAN_EID_EXT_SUPP_RATES;
+	*fake++ = 8;
+	*fake++ = 0x0c;
+	*fake++ = 0x12;
+	*fake++ = 0x18;
+	*fake++ = 0x24;
+	*fake++ = 0x30;
+	*fake++ = 0x48;
+	*fake++ = 0x60;
+	*fake++ = 0x6c;
+	lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
+
+	cfg80211_inform_bss(priv->wdev->wiphy,
+			    params->channel,
+			    bssid,
+			    0,
+			    capability,
+			    params->beacon_interval,
+			    fake_ie, fake - fake_ie,
+			    0, GFP_KERNEL);
+
+	memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
+	priv->wdev->ssid_len = params->ssid_len;
+
+	cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
+
+	/* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
+	priv->connect_status = LBS_CONNECTED;
+	netif_carrier_on(priv->dev);
+	if (!priv->tx_pending_len)
+		netif_wake_queue(priv->dev);
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+static int lbs_ibss_join_existing(struct lbs_private *priv,
+	struct cfg80211_ibss_params *params,
+	struct cfg80211_bss *bss)
+{
+	const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
+	struct cmd_ds_802_11_ad_hoc_join cmd;
+	u8 preamble = RADIO_PREAMBLE_SHORT;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	/* TODO: set preamble based on scan result */
+	ret = lbs_set_radio(priv, preamble, 1);
+	if (ret)
+		goto out;
+
+	/*
+	 * Example CMD_802_11_AD_HOC_JOIN command:
+	 *
+	 * command         2c 00         CMD_802_11_AD_HOC_JOIN
+	 * size            65 00
+	 * sequence        xx xx
+	 * result          00 00
+	 * bssid           02 27 27 97 2f 96
+	 * ssid            49 42 53 53 00 00 00 00
+	 *                 00 00 00 00 00 00 00 00
+	 *                 00 00 00 00 00 00 00 00
+	 *                 00 00 00 00 00 00 00 00
+	 * type            02            CMD_BSS_TYPE_IBSS
+	 * beacon period   64 00
+	 * dtim period     00
+	 * timestamp       00 00 00 00 00 00 00 00
+	 * localtime       00 00 00 00 00 00 00 00
+	 * IE DS           03
+	 * IE DS len       01
+	 * IE DS channel   01
+	 * reserveed       00 00 00 00
+	 * IE IBSS         06
+	 * IE IBSS len     02
+	 * IE IBSS atim    00 00
+	 * reserved        00 00 00 00
+	 * capability      02 00
+	 * rates           82 84 8b 96 0c 12 18 24 30 48 60 6c 00
+	 * fail timeout    ff 00
+	 * probe delay     00 00
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	memcpy(cmd.bss.bssid, bss->bssid, ETH_ALEN);
+	memcpy(cmd.bss.ssid, params->ssid, params->ssid_len);
+	cmd.bss.type = CMD_BSS_TYPE_IBSS;
+	cmd.bss.beaconperiod = cpu_to_le16(params->beacon_interval);
+	cmd.bss.ds.header.id = WLAN_EID_DS_PARAMS;
+	cmd.bss.ds.header.len = 1;
+	cmd.bss.ds.channel = params->channel->hw_value;
+	cmd.bss.ibss.header.id = WLAN_EID_IBSS_PARAMS;
+	cmd.bss.ibss.header.len = 2;
+	cmd.bss.ibss.atimwindow = 0;
+	cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
+
+	/* set rates to the intersection of our rates and the rates in the
+	   bss */
+	if (!rates_eid) {
+		lbs_add_rates(cmd.bss.rates);
+	} else {
+		int hw, i;
+		u8 rates_max = rates_eid[1];
+		u8 *rates = cmd.bss.rates;
+		for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
+			u8 hw_rate = lbs_rates[hw].bitrate / 5;
+			for (i = 0; i < rates_max; i++) {
+				if (hw_rate == (rates_eid[i+2] & 0x7f)) {
+					u8 rate = rates_eid[i+2];
+					if (rate == 0x02 || rate == 0x04 ||
+					    rate == 0x0b || rate == 0x16)
+						rate |= 0x80;
+					*rates++ = rate;
+				}
+			}
+		}
+	}
+
+	/* Only v8 and below support setting this */
+	if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) {
+		cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
+		cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
+	}
+	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
+	if (ret)
+		goto out;
+
+	/*
+	 * This is a sample response to CMD_802_11_AD_HOC_JOIN:
+	 *
+	 * response        2c 80
+	 * size            09 00
+	 * sequence        xx xx
+	 * result          00 00
+	 * reserved        00
+	 */
+	lbs_join_post(priv, params, bss->bssid, bss->capability);
+
+ out:
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+
+static int lbs_ibss_start_new(struct lbs_private *priv,
+	struct cfg80211_ibss_params *params)
+{
+	struct cmd_ds_802_11_ad_hoc_start cmd;
+	struct cmd_ds_802_11_ad_hoc_result *resp =
+		(struct cmd_ds_802_11_ad_hoc_result *) &cmd;
+	u8 preamble = RADIO_PREAMBLE_SHORT;
+	int ret = 0;
+	u16 capability;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	ret = lbs_set_radio(priv, preamble, 1);
+	if (ret)
+		goto out;
+
+	/*
+	 * Example CMD_802_11_AD_HOC_START command:
+	 *
+	 * command         2b 00         CMD_802_11_AD_HOC_START
+	 * size            b1 00
+	 * sequence        xx xx
+	 * result          00 00
+	 * ssid            54 45 53 54 00 00 00 00
+	 *                 00 00 00 00 00 00 00 00
+	 *                 00 00 00 00 00 00 00 00
+	 *                 00 00 00 00 00 00 00 00
+	 * bss type        02
+	 * beacon period   64 00
+	 * dtim period     00
+	 * IE IBSS         06
+	 * IE IBSS len     02
+	 * IE IBSS atim    00 00
+	 * reserved        00 00 00 00
+	 * IE DS           03
+	 * IE DS len       01
+	 * IE DS channel   01
+	 * reserved        00 00 00 00
+	 * probe delay     00 00
+	 * capability      02 00
+	 * rates           82 84 8b 96   (basic rates with have bit 7 set)
+	 *                 0c 12 18 24 30 48 60 6c
+	 * padding         100 bytes
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	memcpy(cmd.ssid, params->ssid, params->ssid_len);
+	cmd.bsstype = CMD_BSS_TYPE_IBSS;
+	cmd.beaconperiod = cpu_to_le16(params->beacon_interval);
+	cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS;
+	cmd.ibss.header.len = 2;
+	cmd.ibss.atimwindow = 0;
+	cmd.ds.header.id = WLAN_EID_DS_PARAMS;
+	cmd.ds.header.len = 1;
+	cmd.ds.channel = params->channel->hw_value;
+	/* Only v8 and below support setting probe delay */
+	if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8)
+		cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
+	/* TODO: mix in WLAN_CAPABILITY_PRIVACY */
+	capability = WLAN_CAPABILITY_IBSS;
+	cmd.capability = cpu_to_le16(capability);
+	lbs_add_rates(cmd.rates);
+
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
+	if (ret)
+		goto out;
+
+	/*
+	 * This is a sample response to CMD_802_11_AD_HOC_JOIN:
+	 *
+	 * response        2b 80
+	 * size            14 00
+	 * sequence        xx xx
+	 * result          00 00
+	 * reserved        00
+	 * bssid           02 2b 7b 0f 86 0e
+	 */
+	lbs_join_post(priv, params, resp->bssid, capability);
+
+ out:
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+		struct cfg80211_ibss_params *params)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	int ret = 0;
+	struct cfg80211_bss *bss;
+	DECLARE_SSID_BUF(ssid_buf);
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	if (!params->channel) {
+		ret = -ENOTSUPP;
+		goto out;
+	}
+
+	ret = lbs_set_channel(priv, params->channel->hw_value);
+	if (ret)
+		goto out;
+
+	/* Search if someone is beaconing. This assumes that the
+	 * bss list is populated already */
+	bss = cfg80211_get_bss(wiphy, params->channel, params->bssid,
+		params->ssid, params->ssid_len,
+		WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+
+	if (bss) {
+		ret = lbs_ibss_join_existing(priv, params, bss);
+		cfg80211_put_bss(bss);
+	} else
+		ret = lbs_ibss_start_new(priv, params);
+
+
+ out:
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	struct cmd_ds_802_11_ad_hoc_stop cmd;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
+
+	/* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */
+	lbs_mac_event_disconnected(priv);
+
+	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+	return ret;
+}
+
+
+
+
+/***************************************************************************
+ * Initialization
+ */
+
 static struct cfg80211_ops lbs_cfg80211_ops = {
 	.set_channel = lbs_cfg_set_channel,
+	.scan = lbs_cfg_scan,
+	.connect = lbs_cfg_connect,
+	.disconnect = lbs_cfg_disconnect,
+	.add_key = lbs_cfg_add_key,
+	.del_key = lbs_cfg_del_key,
+	.set_default_key = lbs_cfg_set_default_key,
+	.get_station = lbs_cfg_get_station,
+	.dump_survey = lbs_get_survey,
+	.change_virtual_intf = lbs_change_intf,
+	.join_ibss = lbs_join_ibss,
+	.leave_ibss = lbs_leave_ibss,
 };
 
 
@@ -142,6 +1984,36 @@
 }
 
 
+static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv)
+{
+	struct region_code_mapping {
+		const char *cn;
+		int code;
+	};
+
+	/* Section 5.17.2 */
+	static struct region_code_mapping regmap[] = {
+		{"US ", 0x10}, /* US FCC */
+		{"CA ", 0x20}, /* Canada */
+		{"EU ", 0x30}, /* ETSI   */
+		{"ES ", 0x31}, /* Spain  */
+		{"FR ", 0x32}, /* France */
+		{"JP ", 0x40}, /* Japan  */
+	};
+	size_t i;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	for (i = 0; i < ARRAY_SIZE(regmap); i++)
+		if (regmap[i].code == priv->regioncode) {
+			regulatory_hint(priv->wdev->wiphy, regmap[i].cn);
+			break;
+		}
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+
 /*
  * This function get's called after lbs_setup_firmware() determined the
  * firmware capabities. So we can setup the wiphy according to our
@@ -157,10 +2029,12 @@
 	wdev->wiphy->max_scan_ssids = 1;
 	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 
-	/* TODO: BIT(NL80211_IFTYPE_ADHOC); */
-	wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+	wdev->wiphy->interface_modes =
+			BIT(NL80211_IFTYPE_STATION) |
+			BIT(NL80211_IFTYPE_ADHOC);
+	if (lbs_rtap_supported(priv))
+		wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
 
-	/* TODO: honor priv->regioncode */
 	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
 
 	/*
@@ -169,6 +2043,7 @@
 	 */
 	wdev->wiphy->cipher_suites = cipher_suites;
 	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+	wdev->wiphy->reg_notifier = lbs_reg_notifier;
 
 	ret = wiphy_register(wdev->wiphy);
 	if (ret < 0)
@@ -180,10 +2055,129 @@
 	if (ret)
 		lbs_pr_err("cannot register network device\n");
 
+	INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
+
+	lbs_cfg_set_regulatory_hint(priv);
+
 	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
 	return ret;
 }
 
+/**
+ *  @brief This function sets DOMAIN INFO to FW
+ *  @param priv       pointer to struct lbs_private
+ *  @return          0; -1
+*/
+static int lbs_11d_set_domain_info(struct lbs_private *priv)
+{
+	int ret;
+
+	ret = lbs_prepare_and_send_command(priv, CMD_802_11D_DOMAIN_INFO,
+			CMD_ACT_SET,
+			CMD_OPTION_WAITFORRSP, 0, NULL);
+	if (ret)
+		lbs_deb_11d("fail to dnld domain info\n");
+
+	return ret;
+}
+
+static void lbs_send_domain_info_cmd_fw(struct wiphy *wiphy,
+					struct regulatory_request *request)
+{
+	u8   no_of_triplet = 0;
+	u8   no_of_parsed_chan = 0;
+	u8   first_channel = 0, next_chan = 0, max_pwr = 0;
+	u8   i, flag = 0;
+	enum ieee80211_band band;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *ch;
+	struct lbs_private *priv = wiphy_priv(wiphy);
+	struct lbs_802_11d_domain_reg *domain_info = &priv->domain_reg;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CFG80211);
+
+	/* Set country code */
+	domain_info->country_code[0] = request->alpha2[0];
+	domain_info->country_code[1] = request->alpha2[1];
+	domain_info->country_code[2] = ' ';
+
+	for (band = 0; band < IEEE80211_NUM_BANDS ; band++) {
+
+		if (!wiphy->bands[band])
+			continue;
+
+		sband = wiphy->bands[band];
+
+		for (i = 0; i < sband->n_channels ; i++) {
+			ch = &sband->channels[i];
+			if (ch->flags & IEEE80211_CHAN_DISABLED)
+				continue;
+
+			if (!flag) {
+				flag = 1;
+				next_chan = first_channel = (u32) ch->hw_value;
+				max_pwr = ch->max_power;
+				no_of_parsed_chan = 1;
+				continue;
+			}
+
+			if (ch->hw_value == next_chan + 1 &&
+					ch->max_power == max_pwr) {
+				next_chan++;
+				no_of_parsed_chan++;
+			} else {
+				domain_info->triplet[no_of_triplet]
+					.chans.first_channel = first_channel;
+				domain_info->triplet[no_of_triplet]
+					.chans.num_channels = no_of_parsed_chan;
+				domain_info->triplet[no_of_triplet]
+					.chans.max_power = max_pwr;
+				no_of_triplet++;
+				flag = 0;
+			}
+		}
+		if (flag) {
+			domain_info->triplet[no_of_triplet]
+				.chans.first_channel = first_channel;
+			domain_info->triplet[no_of_triplet]
+				.chans.num_channels = no_of_parsed_chan;
+			domain_info->triplet[no_of_triplet]
+				.chans.max_power = max_pwr;
+			no_of_triplet++;
+		}
+	}
+
+	domain_info->no_triplet = no_of_triplet;
+
+	/* Set domain info */
+	ret = lbs_11d_set_domain_info(priv);
+	if (ret)
+		lbs_pr_err("11D: error setting domain info in FW\n");
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+int lbs_reg_notifier(struct wiphy *wiphy,
+		struct regulatory_request *request)
+{
+	lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain "
+			"callback for domain %c%c\n", request->alpha2[0],
+			request->alpha2[1]);
+
+	lbs_send_domain_info_cmd_fw(wiphy, request);
+
+	lbs_deb_leave(LBS_DEB_CFG80211);
+
+	return 0;
+}
+
+void lbs_scan_deinit(struct lbs_private *priv)
+{
+	lbs_deb_enter(LBS_DEB_CFG80211);
+	cancel_delayed_work_sync(&priv->scan_work);
+}
+
 
 void lbs_cfg_free(struct lbs_private *priv)
 {
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index e09a193..756fb98 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -1,16 +1,27 @@
 #ifndef __LBS_CFG80211_H__
 #define __LBS_CFG80211_H__
 
-#include "dev.h"
+struct device;
+struct lbs_private;
+struct regulatory_request;
+struct wiphy;
 
 struct wireless_dev *lbs_cfg_alloc(struct device *dev);
 int lbs_cfg_register(struct lbs_private *priv);
 void lbs_cfg_free(struct lbs_private *priv);
 
-int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
-	u8 ssid_len);
-int lbs_scan_networks(struct lbs_private *priv, int full_scan);
-void lbs_cfg_scan_worker(struct work_struct *work);
+int lbs_reg_notifier(struct wiphy *wiphy,
+		struct regulatory_request *request);
 
+/* All of those are TODOs: */
+#define lbs_cmd_802_11_rssi(priv, cmdptr) (0)
+#define lbs_ret_802_11_rssi(priv, resp) (0)
+#define lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action) (0)
+#define lbs_ret_802_11_bcn_ctrl(priv, resp) (0)
+
+void lbs_send_disconnect_notification(struct lbs_private *priv);
+void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
+
+void lbs_scan_deinit(struct lbs_private *priv);
 
 #endif
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index cdb9b96..6c8a9d9 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -7,13 +7,8 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 
-#include "host.h"
 #include "decl.h"
-#include "defs.h"
-#include "dev.h"
-#include "assoc.h"
-#include "wext.h"
-#include "scan.h"
+#include "cfg.h"
 #include "cmd.h"
 
 
@@ -70,6 +65,8 @@
 	switch (cmd) {
 	case CMD_802_11_RSSI:
 		return 1;
+	case CMD_802_11_HOST_SLEEP_CFG:
+		return 1;
 	default:
 		break;
 	}
@@ -175,16 +172,28 @@
 	if (priv->mesh_dev)
 		memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN);
 
-	if (lbs_set_regiontable(priv, priv->regioncode, 0)) {
-		ret = -1;
-		goto out;
-	}
-
 out:
 	lbs_deb_leave(LBS_DEB_CMD);
 	return ret;
 }
 
+static int lbs_ret_host_sleep_cfg(struct lbs_private *priv, unsigned long dummy,
+			struct cmd_header *resp)
+{
+	lbs_deb_enter(LBS_DEB_CMD);
+	if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+		priv->is_host_sleep_configured = 0;
+		if (priv->psstate == PS_STATE_FULL_POWER) {
+			priv->is_host_sleep_activated = 0;
+			wake_up_interruptible(&priv->host_sleep_q);
+		}
+	} else {
+		priv->is_host_sleep_configured = 1;
+	}
+	lbs_deb_leave(LBS_DEB_CMD);
+	return 0;
+}
+
 int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
 		struct wol_config *p_wol_config)
 {
@@ -202,12 +211,11 @@
 	else
 		cmd_config.wol_conf.action = CMD_ACT_ACTION_NONE;
 
-	ret = lbs_cmd_with_response(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config);
+	ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config.hdr,
+			le16_to_cpu(cmd_config.hdr.size),
+			lbs_ret_host_sleep_cfg, 0);
 	if (!ret) {
-		if (criteria) {
-			lbs_deb_cmd("Set WOL criteria to %x\n", criteria);
-			priv->wol_criteria = criteria;
-		} else
+		if (p_wol_config)
 			memcpy((uint8_t *) p_wol_config,
 					(uint8_t *)&cmd_config.wol_conf,
 					sizeof(struct wol_config));
@@ -712,6 +720,10 @@
 		}
 	}
 
+	if (le16_to_cpu(cmdnode->cmdbuf->command) ==
+			CMD_802_11_WAKEUP_CONFIRM)
+		addtail = 0;
+
 	spin_lock_irqsave(&priv->driver_lock, flags);
 
 	if (addtail)
@@ -887,6 +899,66 @@
 }
 
 /**
+ *  @brief This function implements command CMD_802_11D_DOMAIN_INFO
+ *  @param priv       pointer to struct lbs_private
+ *  @param cmd        pointer to cmd buffer
+ *  @param cmdno      cmd ID
+ *  @param cmdOption  cmd action
+ *  @return           0
+*/
+int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
+				 struct cmd_ds_command *cmd,
+				 u16 cmdoption)
+{
+	struct cmd_ds_802_11d_domain_info *pdomaininfo =
+	    &cmd->params.domaininfo;
+	struct mrvl_ie_domain_param_set *domain = &pdomaininfo->domain;
+	u8 nr_triplet = priv->domain_reg.no_triplet;
+
+	lbs_deb_enter(LBS_DEB_11D);
+
+	lbs_deb_11d("nr_triplet=%x\n", nr_triplet);
+
+	pdomaininfo->action = cpu_to_le16(cmdoption);
+	if (cmdoption == CMD_ACT_GET) {
+		cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
+					sizeof(struct cmd_header));
+		lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd,
+			le16_to_cpu(cmd->size));
+		goto done;
+	}
+
+	domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
+	memcpy(domain->countrycode, priv->domain_reg.country_code,
+	       sizeof(domain->countrycode));
+
+	domain->header.len = cpu_to_le16(nr_triplet
+				* sizeof(struct ieee80211_country_ie_triplet)
+				+ sizeof(domain->countrycode));
+
+	if (nr_triplet) {
+		memcpy(domain->triplet, priv->domain_reg.triplet,
+				nr_triplet *
+				sizeof(struct ieee80211_country_ie_triplet));
+
+		cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
+					     le16_to_cpu(domain->header.len) +
+					     sizeof(struct mrvl_ie_header) +
+					     sizeof(struct cmd_header));
+	} else {
+		cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
+					sizeof(struct cmd_header));
+	}
+
+	lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd,
+			le16_to_cpu(cmd->size));
+
+done:
+	lbs_deb_enter(LBS_DEB_11D);
+	return 0;
+}
+
+/**
  *  @brief This function prepare the command before send to firmware.
  *
  *  @param priv		A pointer to struct lbs_private structure
@@ -984,6 +1056,11 @@
 		ret = 0;
 		goto done;
 
+	case CMD_802_11D_DOMAIN_INFO:
+		cmdptr->command = cpu_to_le16(cmd_no);
+		ret = lbs_cmd_802_11d_domain_info(priv, cmdptr, cmd_action);
+		break;
+
 	case CMD_802_11_TPC_CFG:
 		cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG);
 		cmdptr->size =
@@ -1303,6 +1380,15 @@
 		 * check if in power save mode, if yes, put the device back
 		 * to PS mode
 		 */
+#ifdef TODO
+		/*
+		 * This was the old code for libertas+wext. Someone that
+		 * understands this beast should re-code it in a sane way.
+		 *
+		 * I actually don't understand why this is related to WPA
+		 * and to connection status, shouldn't powering should be
+		 * independ of such things?
+		 */
 		if ((priv->psmode != LBS802_11POWERMODECAM) &&
 		    (priv->psstate == PS_STATE_FULL_POWER) &&
 		    ((priv->connect_status == LBS_CONNECTED) ||
@@ -1324,6 +1410,7 @@
 				lbs_ps_sleep(priv, 0);
 			}
 		}
+#endif
 	}
 
 	ret = 0;
@@ -1353,6 +1440,11 @@
 	/* We don't get a response on the sleep-confirmation */
 	priv->dnld_sent = DNLD_RES_RECEIVED;
 
+	if (priv->is_host_sleep_configured) {
+		priv->is_host_sleep_activated = 1;
+		wake_up_interruptible(&priv->host_sleep_q);
+	}
+
 	/* If nothing to do, go back to sleep (?) */
 	if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx])
 		priv->psstate = PS_STATE_SLEEP;
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 88f7131..a0d9482 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -5,18 +5,11 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/sched.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
 #include <asm/unaligned.h>
-#include <net/iw_handler.h>
+#include <net/cfg80211.h>
 
-#include "host.h"
-#include "decl.h"
+#include "cfg.h"
 #include "cmd.h"
-#include "defs.h"
-#include "dev.h"
-#include "assoc.h"
-#include "wext.h"
 
 /**
  *  @brief This function handles disconnect event. it
@@ -38,7 +31,9 @@
 	 * It causes problem in the Supplicant
 	 */
 	msleep_interruptible(1000);
-	lbs_send_disconnect_notification(priv);
+
+	if (priv->wdev->iftype == NL80211_IFTYPE_STATION)
+		lbs_send_disconnect_notification(priv);
 
 	/* report disconnect to upper layer */
 	netif_stop_queue(priv->dev);
@@ -49,23 +44,8 @@
 	priv->currenttxskb = NULL;
 	priv->tx_pending_len = 0;
 
-	/* reset SNR/NF/RSSI values */
-	memset(priv->SNR, 0x00, sizeof(priv->SNR));
-	memset(priv->NF, 0x00, sizeof(priv->NF));
-	memset(priv->RSSI, 0x00, sizeof(priv->RSSI));
-	memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
-	memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
-	priv->nextSNRNF = 0;
-	priv->numSNRNF = 0;
 	priv->connect_status = LBS_DISCONNECTED;
 
-	/* Clear out associated SSID and BSSID since connection is
-	 * no longer valid.
-	 */
-	memset(&priv->curbssparams.bssid, 0, ETH_ALEN);
-	memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN);
-	priv->curbssparams.ssid_len = 0;
-
 	if (priv->psstate != PS_STATE_FULL_POWER) {
 		/* make firmware to exit PS mode */
 		lbs_deb_cmd("disconnected, so exit PS mode\n");
@@ -117,6 +97,52 @@
 	return ret;
 }
 
+/**
+ *  @brief This function parses countryinfo from AP and download country info to FW
+ *  @param priv    pointer to struct lbs_private
+ *  @param resp    pointer to command response buffer
+ *  @return        0; -1
+ */
+static int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
+{
+	struct cmd_ds_802_11d_domain_info *domaininfo =
+			&resp->params.domaininforesp;
+	struct mrvl_ie_domain_param_set *domain = &domaininfo->domain;
+	u16 action = le16_to_cpu(domaininfo->action);
+	s16 ret = 0;
+	u8 nr_triplet = 0;
+
+	lbs_deb_enter(LBS_DEB_11D);
+
+	lbs_deb_hex(LBS_DEB_11D, "domain info resp", (u8 *) resp,
+			(int)le16_to_cpu(resp->size));
+
+	nr_triplet = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) /
+		sizeof(struct ieee80211_country_ie_triplet);
+
+	lbs_deb_11d("domain info resp: nr_triplet %d\n", nr_triplet);
+
+	if (nr_triplet > MRVDRV_MAX_TRIPLET_802_11D) {
+		lbs_deb_11d("invalid number of triplets returned!!\n");
+		return -1;
+	}
+
+	switch (action) {
+	case CMD_ACT_SET:	/*Proc set action */
+		break;
+
+	case CMD_ACT_GET:
+		break;
+	default:
+		lbs_deb_11d("invalid action:%d\n", domaininfo->action);
+		ret = -1;
+		break;
+	}
+
+	lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
+	return ret;
+}
+
 static inline int handle_cmd_response(struct lbs_private *priv,
 				      struct cmd_header *cmd_response)
 {
@@ -150,6 +176,10 @@
 		ret = lbs_ret_802_11_rssi(priv, resp);
 		break;
 
+	case CMD_RET(CMD_802_11D_DOMAIN_INFO):
+		ret = lbs_ret_802_11d_domain_info(resp);
+		break;
+
 	case CMD_RET(CMD_802_11_TPC_CFG):
 		spin_lock_irqsave(&priv->driver_lock, flags);
 		memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg,
@@ -261,7 +291,7 @@
 			 * ad-hoc mode. It takes place in
 			 * lbs_execute_next_command().
 			 */
-			if (priv->mode == IW_MODE_ADHOC &&
+			if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR &&
 			    action == CMD_SUBCMD_ENTER_PS)
 				priv->psmode = LBS802_11POWERMODECAM;
 		} else if (action == CMD_SUBCMD_ENTER_PS) {
@@ -341,32 +371,10 @@
 	return ret;
 }
 
-static int lbs_send_confirmwake(struct lbs_private *priv)
-{
-	struct cmd_header cmd;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_HOST);
-
-	cmd.command = cpu_to_le16(CMD_802_11_WAKEUP_CONFIRM);
-	cmd.size = cpu_to_le16(sizeof(cmd));
-	cmd.seqnum = cpu_to_le16(++priv->seqnum);
-	cmd.result = 0;
-
-	lbs_deb_hex(LBS_DEB_HOST, "wake confirm", (u8 *) &cmd,
-		sizeof(cmd));
-
-	ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &cmd, sizeof(cmd));
-	if (ret)
-		lbs_pr_alert("SEND_WAKEC_CMD: Host to Card failed for Confirm Wake\n");
-
-	lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret);
-	return ret;
-}
-
 int lbs_process_event(struct lbs_private *priv, u32 event)
 {
 	int ret = 0;
+	struct cmd_header cmd;
 
 	lbs_deb_enter(LBS_DEB_CMD);
 
@@ -410,7 +418,10 @@
 		if (priv->reset_deep_sleep_wakeup)
 			priv->reset_deep_sleep_wakeup(priv);
 		priv->is_deep_sleep = 0;
-		lbs_send_confirmwake(priv);
+		lbs_cmd_async(priv, CMD_802_11_WAKEUP_CONFIRM, &cmd,
+				sizeof(cmd));
+		priv->is_host_sleep_activated = 0;
+		wake_up_interruptible(&priv->host_sleep_q);
 		break;
 
 	case MACREG_INT_CODE_DEEP_SLEEP_AWAKE:
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index de2caac..1736746 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -1,18 +1,13 @@
-#include <linux/module.h>
 #include <linux/dcache.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/slab.h>
-#include <net/iw_handler.h>
-#include <net/lib80211.h>
 
-#include "dev.h"
 #include "decl.h"
-#include "host.h"
-#include "debugfs.h"
 #include "cmd.h"
+#include "debugfs.h"
 
 static struct dentry *lbs_dir;
 static char *szStates[] = {
@@ -60,51 +55,6 @@
 	return res;
 }
 
-
-static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
-				  size_t count, loff_t *ppos)
-{
-	struct lbs_private *priv = file->private_data;
-	size_t pos = 0;
-	int numscansdone = 0, res;
-	unsigned long addr = get_zeroed_page(GFP_KERNEL);
-	char *buf = (char *)addr;
-	DECLARE_SSID_BUF(ssid);
-	struct bss_descriptor * iter_bss;
-	if (!buf)
-		return -ENOMEM;
-
-	pos += snprintf(buf+pos, len-pos,
-		"# | ch  | rssi |       bssid       |   cap    | Qual | SSID\n");
-
-	mutex_lock(&priv->lock);
-	list_for_each_entry (iter_bss, &priv->network_list, list) {
-		u16 ibss = (iter_bss->capability & WLAN_CAPABILITY_IBSS);
-		u16 privacy = (iter_bss->capability & WLAN_CAPABILITY_PRIVACY);
-		u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT);
-
-		pos += snprintf(buf+pos, len-pos, "%02u| %03d | %04d | %pM |",
-			numscansdone, iter_bss->channel, iter_bss->rssi,
-			iter_bss->bssid);
-		pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability);
-		pos += snprintf(buf+pos, len-pos, "%c%c%c |",
-				ibss ? 'A' : 'I', privacy ? 'P' : ' ',
-				spectrum_mgmt ? 'S' : ' ');
-		pos += snprintf(buf+pos, len-pos, " %04d |", SCAN_RSSI(iter_bss->rssi));
-		pos += snprintf(buf+pos, len-pos, " %s\n",
-		                print_ssid(ssid, iter_bss->ssid,
-					   iter_bss->ssid_len));
-
-		numscansdone++;
-	}
-	mutex_unlock(&priv->lock);
-
-	res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
-
-	free_page(addr);
-	return res;
-}
-
 static ssize_t lbs_sleepparams_write(struct file *file,
 				const char __user *user_buf, size_t count,
 				loff_t *ppos)
@@ -723,8 +673,6 @@
 
 static const struct lbs_debugfs_files debugfs_files[] = {
 	{ "info", 0444, FOPS(lbs_dev_info, write_file_dummy), },
-	{ "getscantable", 0444, FOPS(lbs_getscantable,
-					write_file_dummy), },
 	{ "sleepparams", 0644, FOPS(lbs_sleepparams_read,
 				lbs_sleepparams_write), },
 };
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 709ffca..ba5438a 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -1,3 +1,4 @@
+
 /**
   *  This file contains declaration referring to
   *  functions defined in other source files
@@ -12,6 +13,7 @@
 struct lbs_private;
 struct sk_buff;
 struct net_device;
+struct cmd_ds_command;
 
 
 /* ethtool.c */
@@ -34,11 +36,13 @@
 void lbs_stop_card(struct lbs_private *priv);
 void lbs_host_to_card_done(struct lbs_private *priv);
 
+int lbs_rtap_supported(struct lbs_private *priv);
+
 int lbs_set_mac_address(struct net_device *dev, void *addr);
 void lbs_set_multicast_list(struct net_device *dev);
 
 int lbs_suspend(struct lbs_private *priv);
-void lbs_resume(struct lbs_private *priv);
+int lbs_resume(struct lbs_private *priv);
 
 void lbs_queue_event(struct lbs_private *priv, u32 event);
 void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
@@ -49,5 +53,9 @@
 u32 lbs_fw_index_to_data_rate(u8 index);
 u8 lbs_data_rate_to_fw_index(u32 rate);
 
+int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
+		struct cmd_ds_command *cmd, u16 cmdoption);
+
+int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp);
 
 #endif
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index a54880e..4536d9c 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -7,8 +7,8 @@
 #define _LBS_DEV_H_
 
 #include "mesh.h"
-#include "scan.h"
-#include "assoc.h"
+#include "defs.h"
+#include "host.h"
 
 #include <linux/kfifo.h>
 
@@ -29,7 +29,6 @@
 	/* Basic networking */
 	struct net_device *dev;
 	u32 connect_status;
-	int infra_open;
 	struct work_struct mcast_work;
 	u32 nr_of_multicastmacaddr;
 	u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
@@ -37,6 +36,9 @@
 	/* CFG80211 */
 	struct wireless_dev *wdev;
 	bool wiphy_registered;
+	struct cfg80211_scan_request *scan_req;
+	u8 assoc_bss[ETH_ALEN];
+	u8 disassoc_reason;
 
 	/* Mesh */
 	struct net_device *mesh_dev; /* Virtual device */
@@ -49,10 +51,6 @@
 	u8 mesh_ssid_len;
 #endif
 
-	/* Monitor mode */
-	struct net_device *rtap_net_dev;
-	u32 monitormode;
-
 	/* Debugfs */
 	struct dentry *debugfs_dir;
 	struct dentry *debugfs_debug;
@@ -62,6 +60,9 @@
 	struct dentry *regs_dir;
 	struct dentry *debugfs_regs_files[6];
 
+	/** 11D and domain regulatory data */
+	struct lbs_802_11d_domain_reg domain_reg;
+
 	/* Hardware debugging */
 	u32 mac_offset;
 	u32 bbp_offset;
@@ -75,6 +76,7 @@
 
 	/* Deep sleep */
 	int is_deep_sleep;
+	int deep_sleep_required;
 	int is_auto_deep_sleep_enabled;
 	int wakeup_dev_required;
 	int is_activity_detected;
@@ -82,6 +84,11 @@
 	wait_queue_head_t ds_awake_q;
 	struct timer_list auto_deepsleep_timer;
 
+	/* Host sleep*/
+	int is_host_sleep_configured;
+	int is_host_sleep_activated;
+	wait_queue_head_t host_sleep_q;
+
 	/* Hardware access */
 	void *card;
 	u8 fw_ready;
@@ -127,14 +134,10 @@
 	struct workqueue_struct *work_thread;
 
 	/** Encryption stuff */
-	struct lbs_802_11_security secinfo;
-	struct enc_key wpa_mcast_key;
-	struct enc_key wpa_unicast_key;
-	u8 wpa_ie[MAX_WPA_IE_LEN];
-	u8 wpa_ie_len;
-	u16 wep_tx_keyidx;
-	struct enc_key wep_keys[4];
 	u8 authtype_auto;
+	u8 wep_tx_key;
+	u8 wep_key[4][WLAN_KEY_LEN_WEP104];
+	u8 wep_key_len[4];
 
 	/* Wake On LAN */
 	uint32_t wol_criteria;
@@ -155,6 +158,7 @@
 	/* NIC/link operation characteristics */
 	u16 mac_control;
 	u8 radio_on;
+	u8 cur_rate;
 	u8 channel;
 	s16 txpower_cur;
 	s16 txpower_min;
@@ -163,42 +167,6 @@
 	/** Scanning */
 	struct delayed_work scan_work;
 	int scan_channel;
-	/* remember which channel was scanned last, != 0 if currently scanning */
-	u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1];
-	u8 scan_ssid_len;
-
-	/* Associating */
-	struct delayed_work assoc_work;
-	struct current_bss_params curbssparams;
-	u8 mode;
-	struct list_head network_list;
-	struct list_head network_free_list;
-	struct bss_descriptor *networks;
-	struct assoc_request * pending_assoc_req;
-	struct assoc_request * in_progress_assoc_req;
-	uint16_t enablehwauto;
-
-	/* ADHOC */
-	u16 beacon_period;
-	u8 beacon_enable;
-	u8 adhoccreate;
-
-	/* WEXT */
-	char name[DEV_NAME_LEN];
-	u8 nodename[16];
-	struct iw_statistics wstats;
-	u8 cur_rate;
-#define	MAX_REGION_CHANNEL_NUM	2
-	struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
-
-	/** Requested Signal Strength*/
-	u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG];
-	u16 NF[MAX_TYPE_B][MAX_TYPE_AVG];
-	u8 RSSI[MAX_TYPE_B][MAX_TYPE_AVG];
-	u8 rawSNR[DEFAULT_DATA_AVG_FACTOR];
-	u8 rawNF[DEFAULT_DATA_AVG_FACTOR];
-	u16 nextSNRNF;
-	u16 numSNRNF;
 };
 
 extern struct cmd_confirm_sleep confirm_sleep;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 3804a58..50193aa 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -2,13 +2,8 @@
 #include <linux/ethtool.h>
 #include <linux/delay.h>
 
-#include "host.h"
 #include "decl.h"
-#include "defs.h"
-#include "dev.h"
-#include "wext.h"
 #include "cmd.h"
-#include "mesh.h"
 
 
 static void lbs_ethtool_get_drvinfo(struct net_device *dev,
@@ -69,14 +64,11 @@
 {
 	struct lbs_private *priv = dev->ml_priv;
 
-	if (priv->wol_criteria == 0xffffffff) {
-		/* Interface driver didn't configure wake */
-		wol->supported = wol->wolopts = 0;
-		return;
-	}
-
 	wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY;
 
+	if (priv->wol_criteria == EHS_REMOVE_WAKEUP)
+		return;
+
 	if (priv->wol_criteria & EHS_WAKE_ON_UNICAST_DATA)
 		wol->wolopts |= WAKE_UCAST;
 	if (priv->wol_criteria & EHS_WAKE_ON_MULTICAST_DATA)
@@ -91,23 +83,22 @@
 			       struct ethtool_wolinfo *wol)
 {
 	struct lbs_private *priv = dev->ml_priv;
-	uint32_t criteria = 0;
 
 	if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY))
 		return -EOPNOTSUPP;
 
+	priv->wol_criteria = 0;
 	if (wol->wolopts & WAKE_UCAST)
-		criteria |= EHS_WAKE_ON_UNICAST_DATA;
+		priv->wol_criteria |= EHS_WAKE_ON_UNICAST_DATA;
 	if (wol->wolopts & WAKE_MCAST)
-		criteria |= EHS_WAKE_ON_MULTICAST_DATA;
+		priv->wol_criteria |= EHS_WAKE_ON_MULTICAST_DATA;
 	if (wol->wolopts & WAKE_BCAST)
-		criteria |= EHS_WAKE_ON_BROADCAST_DATA;
+		priv->wol_criteria |= EHS_WAKE_ON_BROADCAST_DATA;
 	if (wol->wolopts & WAKE_PHY)
-		criteria |= EHS_WAKE_ON_MAC_EVENT;
+		priv->wol_criteria |= EHS_WAKE_ON_MAC_EVENT;
 	if (wol->wolopts == 0)
-		criteria |= EHS_REMOVE_WAKEUP;
-
-	return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL);
+		priv->wol_criteria |= EHS_REMOVE_WAKEUP;
+	return 0;
 }
 
 const struct ethtool_ops lbs_ethtool_ops = {
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 3809c0b..db8e209 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -326,7 +326,7 @@
 	u8 pktdelay_2ms;
 	/* reserved */
 	u8 reserved1;
-} __attribute__ ((packed));
+} __packed;
 
 /* RxPD Descriptor */
 struct rxpd {
@@ -339,8 +339,8 @@
 			u8 bss_type;
 			/* BSS number */
 			u8 bss_num;
-		} __attribute__ ((packed)) bss;
-	} __attribute__ ((packed)) u;
+		} __packed bss;
+	} __packed u;
 
 	/* SNR */
 	u8 snr;
@@ -366,14 +366,14 @@
 	/* Pkt Priority */
 	u8 priority;
 	u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_header {
 	__le16 command;
 	__le16 size;
 	__le16 seqnum;
 	__le16 result;
-} __attribute__ ((packed));
+} __packed;
 
 /* Generic structure to hold all key types. */
 struct enc_key {
@@ -387,6 +387,30 @@
 struct lbs_offset_value {
 	u32 offset;
 	u32 value;
+} __packed;
+
+#define MRVDRV_MAX_TRIPLET_802_11D              83
+
+#define COUNTRY_CODE_LEN                        3
+
+struct mrvl_ie_domain_param_set {
+	struct mrvl_ie_header header;
+
+	u8 countrycode[COUNTRY_CODE_LEN];
+	struct ieee80211_country_ie_triplet triplet[1];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11d_domain_info {
+	__le16 action;
+	struct mrvl_ie_domain_param_set domain;
+} __attribute__ ((packed));
+
+struct lbs_802_11d_domain_reg {
+	/** Country code*/
+	u8 country_code[COUNTRY_CODE_LEN];
+	/** No. of triplet*/
+	u8 no_triplet;
+	struct ieee80211_country_ie_triplet triplet[MRVDRV_MAX_TRIPLET_802_11D];
 } __attribute__ ((packed));
 
 /*
@@ -426,7 +450,7 @@
 
 	/*FW/HW capability */
 	__le32 fwcapinfo;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_subscribe_event {
 	struct cmd_header hdr;
@@ -440,7 +464,7 @@
 	 * bump this up a bit.
 	 */
 	uint8_t tlv[128];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * This scan handle Country Information IE(802.11d compliant)
@@ -452,7 +476,7 @@
 	uint8_t bsstype;
 	uint8_t bssid[ETH_ALEN];
 	uint8_t tlvbuffer[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_scan_rsp {
 	struct cmd_header hdr;
@@ -460,7 +484,7 @@
 	__le16 bssdescriptsize;
 	uint8_t nr_sets;
 	uint8_t bssdesc_and_tlvbuffer[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_get_log {
 	struct cmd_header hdr;
@@ -478,20 +502,20 @@
 	__le32 fcserror;
 	__le32 txframe;
 	__le32 wepundecryptable;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_mac_control {
 	struct cmd_header hdr;
 	__le16 action;
 	u16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_mac_multicast_adr {
 	struct cmd_header hdr;
 	__le16 action;
 	__le16 nr_of_adrs;
 	u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_authenticate {
 	struct cmd_header hdr;
@@ -499,14 +523,14 @@
 	u8 bssid[ETH_ALEN];
 	u8 authtype;
 	u8 reserved[10];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_deauthenticate {
 	struct cmd_header hdr;
 
 	u8 macaddr[ETH_ALEN];
 	__le16 reasoncode;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_associate {
 	struct cmd_header hdr;
@@ -517,7 +541,7 @@
 	__le16 bcnperiod;
 	u8 dtimperiod;
 	u8 iebuf[512];    /* Enough for required and most optional IEs */
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_associate_response {
 	struct cmd_header hdr;
@@ -526,7 +550,7 @@
 	__le16 statuscode;
 	__le16 aid;
 	u8 iebuf[512];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_set_wep {
 	struct cmd_header hdr;
@@ -540,7 +564,7 @@
 	/* 40, 128bit or TXWEP */
 	uint8_t keytype[4];
 	uint8_t keymaterial[4][16];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_snmp_mib {
 	struct cmd_header hdr;
@@ -549,40 +573,40 @@
 	__le16 oid;
 	__le16 bufsize;
 	u8 value[128];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_mac_reg_access {
 	__le16 action;
 	__le16 offset;
 	__le32 value;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_bbp_reg_access {
 	__le16 action;
 	__le16 offset;
 	u8 value;
 	u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_rf_reg_access {
 	__le16 action;
 	__le16 offset;
 	u8 value;
 	u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_radio_control {
 	struct cmd_header hdr;
 
 	__le16 action;
 	__le16 control;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_beacon_control {
 	__le16 action;
 	__le16 beacon_enable;
 	__le16 beacon_period;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_sleep_params {
 	struct cmd_header hdr;
@@ -607,7 +631,7 @@
 
 	/* reserved field, should be set to zero */
 	__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_rf_channel {
 	struct cmd_header hdr;
@@ -617,7 +641,7 @@
 	__le16 rftype;      /* unused */
 	__le16 reserved;    /* unused */
 	u8 channellist[32]; /* unused */
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_rssi {
 	/* weighting factor */
@@ -626,21 +650,21 @@
 	__le16 reserved_0;
 	__le16 reserved_1;
 	__le16 reserved_2;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_rssi_rsp {
 	__le16 SNR;
 	__le16 noisefloor;
 	__le16 avgSNR;
 	__le16 avgnoisefloor;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_mac_address {
 	struct cmd_header hdr;
 
 	__le16 action;
 	u8 macadd[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_rf_tx_power {
 	struct cmd_header hdr;
@@ -649,26 +673,26 @@
 	__le16 curlevel;
 	s8 maxlevel;
 	s8 minlevel;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_monitor_mode {
 	__le16 action;
 	__le16 mode;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_set_boot2_ver {
 	struct cmd_header hdr;
 
 	__le16 action;
 	__le16 version;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_fw_wake_method {
 	struct cmd_header hdr;
 
 	__le16 action;
 	__le16 method;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_ps_mode {
 	__le16 action;
@@ -676,7 +700,7 @@
 	__le16 multipledtim;
 	__le16 reserved;
 	__le16 locallisteninterval;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_confirm_sleep {
 	struct cmd_header hdr;
@@ -686,7 +710,7 @@
 	__le16 multipledtim;
 	__le16 reserved;
 	__le16 locallisteninterval;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_data_rate {
 	struct cmd_header hdr;
@@ -694,14 +718,14 @@
 	__le16 action;
 	__le16 reserved;
 	u8 rates[MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_rate_adapt_rateset {
 	struct cmd_header hdr;
 	__le16 action;
 	__le16 enablehwauto;
 	__le16 bitmap;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_ad_hoc_start {
 	struct cmd_header hdr;
@@ -718,14 +742,14 @@
 	__le16 capability;
 	u8 rates[MAX_RATES];
 	u8 tlv_memory_size_pad[100];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_ad_hoc_result {
 	struct cmd_header hdr;
 
 	u8 pad[3];
 	u8 bssid[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct adhoc_bssdesc {
 	u8 bssid[ETH_ALEN];
@@ -746,7 +770,7 @@
 	 * Adhoc join command and will cause a binary layout mismatch with
 	 * the firmware
 	 */
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_ad_hoc_join {
 	struct cmd_header hdr;
@@ -754,18 +778,18 @@
 	struct adhoc_bssdesc bss;
 	__le16 failtimeout;   /* Reserved on v9 and later */
 	__le16 probedelay;    /* Reserved on v9 and later */
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_ad_hoc_stop {
 	struct cmd_header hdr;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_enable_rsn {
 	struct cmd_header hdr;
 
 	__le16 action;
 	__le16 enable;
-} __attribute__ ((packed));
+} __packed;
 
 struct MrvlIEtype_keyParamSet {
 	/* type ID */
@@ -785,7 +809,7 @@
 
 	/* key material of size keylen */
 	u8 key[32];
-} __attribute__ ((packed));
+} __packed;
 
 #define MAX_WOL_RULES 		16
 
@@ -797,7 +821,7 @@
 	__le16 reserve;
 	__be32 sig_mask;
 	__be32 signature;
-} __attribute__ ((packed));
+} __packed;
 
 struct wol_config {
 	uint8_t action;
@@ -805,7 +829,7 @@
 	uint8_t no_rules_in_cmd;
 	uint8_t result;
 	struct host_wol_rule rule[MAX_WOL_RULES];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_host_sleep {
 	struct cmd_header hdr;
@@ -813,7 +837,7 @@
 	uint8_t gpio;
 	uint16_t gap;
 	struct wol_config wol_conf;
-} __attribute__ ((packed));
+} __packed;
 
 
 
@@ -822,7 +846,7 @@
 
 	__le16 action;
 	struct MrvlIEtype_keyParamSet keyParamSet[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_eeprom_access {
 	struct cmd_header hdr;
@@ -832,7 +856,7 @@
 	/* firmware says it returns a maximum of 20 bytes */
 #define LBS_EEPROM_READ_LEN 20
 	u8 value[LBS_EEPROM_READ_LEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_tpc_cfg {
 	struct cmd_header hdr;
@@ -843,7 +867,7 @@
 	int8_t P1;
 	int8_t P2;
 	uint8_t usesnr;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct cmd_ds_802_11_pa_cfg {
@@ -854,14 +878,14 @@
 	int8_t P0;
 	int8_t P1;
 	int8_t P2;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct cmd_ds_802_11_led_ctrl {
 	__le16 action;
 	__le16 numled;
 	u8 data[256];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_802_11_afc {
 	__le16 afc_auto;
@@ -875,22 +899,22 @@
 			__le16 carrier_offset; /* signed */
 		};
 	};
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_tx_rate_query {
 	__le16 txrate;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_get_tsf {
 	__le64 tsfvalue;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_bt_access {
 	__le16 action;
 	__le32 id;
 	u8 addr1[ETH_ALEN];
 	u8 addr2[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_fwt_access {
 	__le16 action;
@@ -910,7 +934,7 @@
 	__le32 snr;
 	__le32 references;
 	u8 prec[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_mesh_config {
 	struct cmd_header hdr;
@@ -920,14 +944,14 @@
 	__le16 type;
 	__le16 length;
 	u8 data[128];	/* last position reserved */
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_mesh_access {
 	struct cmd_header hdr;
 
 	__le16 action;
 	__le32 data[32];	/* last position reserved */
-} __attribute__ ((packed));
+} __packed;
 
 /* Number of stats counters returned by the firmware */
 #define MESH_STATS_NUM 8
@@ -949,6 +973,9 @@
 		struct cmd_ds_bbp_reg_access bbpreg;
 		struct cmd_ds_rf_reg_access rfreg;
 
+		struct cmd_ds_802_11d_domain_info domaininfo;
+		struct cmd_ds_802_11d_domain_info domaininforesp;
+
 		struct cmd_ds_802_11_tpc_cfg tpccfg;
 		struct cmd_ds_802_11_afc afc;
 		struct cmd_ds_802_11_led_ctrl ledgpio;
@@ -957,6 +984,5 @@
 		struct cmd_ds_fwt_access fwt;
 		struct cmd_ds_802_11_beacon_control bcn_ctrl;
 	} params;
-} __attribute__ ((packed));
-
+} __packed;
 #endif
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 64dd345..6e71346 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1182,11 +1182,69 @@
 	lbs_deb_leave(LBS_DEB_SDIO);
 }
 
+static int if_sdio_suspend(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+	int ret;
+	struct if_sdio_card *card = sdio_get_drvdata(func);
+
+	mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
+
+	lbs_pr_info("%s: suspend: PM flags = 0x%x\n",
+						sdio_func_id(func), flags);
+
+	/* If we aren't being asked to wake on anything, we should bail out
+	 * and let the SD stack power down the card.
+	 */
+	if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+		lbs_pr_info("Suspend without wake params -- "
+						"powering down card.");
+		return -ENOSYS;
+	}
+
+	if (!(flags & MMC_PM_KEEP_POWER)) {
+		lbs_pr_err("%s: cannot remain alive while host is suspended\n",
+			sdio_func_id(func));
+		return -ENOSYS;
+	}
+
+	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+	if (ret)
+		return ret;
+
+	ret = lbs_suspend(card->priv);
+	if (ret)
+		return ret;
+
+	return sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+}
+
+static int if_sdio_resume(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+	struct if_sdio_card *card = sdio_get_drvdata(func);
+	int ret;
+
+	lbs_pr_info("%s: resume: we're back\n", sdio_func_id(func));
+
+	ret = lbs_resume(card->priv);
+
+	return ret;
+}
+
+static const struct dev_pm_ops if_sdio_pm_ops = {
+	.suspend	= if_sdio_suspend,
+	.resume		= if_sdio_resume,
+};
+
 static struct sdio_driver if_sdio_driver = {
 	.name		= "libertas_sdio",
 	.id_table	= if_sdio_ids,
 	.probe		= if_sdio_probe,
 	.remove		= if_sdio_remove,
+	.drv = {
+		.pm = &if_sdio_pm_ops,
+	},
 };
 
 /*******************************************************************/
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index f41594c..3678e53 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -613,16 +613,14 @@
 		return;
 	}
 
-	syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
+	syncfwheader = kmemdup(skb->data + IPFIELD_ALIGN_OFFSET,
+			       sizeof(struct fwsyncheader), GFP_ATOMIC);
 	if (!syncfwheader) {
 		lbs_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
 		kfree_skb(skb);
 		return;
 	}
 
-	memcpy(syncfwheader, skb->data + IPFIELD_ALIGN_OFFSET,
-	       sizeof(struct fwsyncheader));
-
 	if (!syncfwheader->cmd) {
 		lbs_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
 		lbs_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
@@ -1043,6 +1041,12 @@
 	if (priv->psstate != PS_STATE_FULL_POWER)
 		return -1;
 
+	if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+		lbs_pr_info("Suspend attempt without "
+						"configuring wake params!\n");
+		return -ENOSYS;
+	}
+
 	ret = lbs_suspend(priv);
 	if (ret)
 		goto out;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index d9b8ee1..b519fc7 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -11,20 +11,14 @@
 #include <linux/if_arp.h>
 #include <linux/kthread.h>
 #include <linux/kfifo.h>
-#include <linux/stddef.h>
-#include <linux/ieee80211.h>
 #include <linux/slab.h>
-#include <net/iw_handler.h>
 #include <net/cfg80211.h>
 
 #include "host.h"
 #include "decl.h"
 #include "dev.h"
-#include "wext.h"
 #include "cfg.h"
 #include "debugfs.h"
-#include "scan.h"
-#include "assoc.h"
 #include "cmd.h"
 
 #define DRIVER_RELEASE_VERSION "323.p0"
@@ -96,72 +90,6 @@
 }
 
 
-static int lbs_add_rtap(struct lbs_private *priv);
-static void lbs_remove_rtap(struct lbs_private *priv);
-
-
-/**
- * Get function for sysfs attribute rtap
- */
-static ssize_t lbs_rtap_get(struct device *dev,
-		struct device_attribute *attr, char * buf)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	return snprintf(buf, 5, "0x%X\n", priv->monitormode);
-}
-
-/**
- *  Set function for sysfs attribute rtap
- */
-static ssize_t lbs_rtap_set(struct device *dev,
-		struct device_attribute *attr, const char * buf, size_t count)
-{
-	int monitor_mode;
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-
-	sscanf(buf, "%x", &monitor_mode);
-	if (monitor_mode) {
-		if (priv->monitormode == monitor_mode)
-			return strlen(buf);
-		if (!priv->monitormode) {
-			if (priv->infra_open || lbs_mesh_open(priv))
-				return -EBUSY;
-			if (priv->mode == IW_MODE_INFRA)
-				lbs_cmd_80211_deauthenticate(priv,
-							     priv->curbssparams.bssid,
-							     WLAN_REASON_DEAUTH_LEAVING);
-			else if (priv->mode == IW_MODE_ADHOC)
-				lbs_adhoc_stop(priv);
-			lbs_add_rtap(priv);
-		}
-		priv->monitormode = monitor_mode;
-	} else {
-		if (!priv->monitormode)
-			return strlen(buf);
-		priv->monitormode = 0;
-		lbs_remove_rtap(priv);
-
-		if (priv->currenttxskb) {
-			dev_kfree_skb_any(priv->currenttxskb);
-			priv->currenttxskb = NULL;
-		}
-
-		/* Wake queues, command thread, etc. */
-		lbs_host_to_card_done(priv);
-	}
-
-	lbs_prepare_and_send_command(priv,
-			CMD_802_11_MONITOR_MODE, CMD_ACT_SET,
-			CMD_OPTION_WAITFORRSP, 0, &priv->monitormode);
-	return strlen(buf);
-}
-
-/**
- * lbs_rtap attribute to be exported per ethX interface
- * through sysfs (/sys/class/net/ethX/lbs_rtap)
- */
-static DEVICE_ATTR(lbs_rtap, 0644, lbs_rtap_get, lbs_rtap_set );
-
 /**
  *  @brief This function opens the ethX interface
  *
@@ -177,13 +105,6 @@
 
 	spin_lock_irq(&priv->driver_lock);
 
-	if (priv->monitormode) {
-		ret = -EBUSY;
-		goto out;
-	}
-
-	priv->infra_open = 1;
-
 	if (priv->connect_status == LBS_CONNECTED)
 		netif_carrier_on(dev);
 	else
@@ -191,7 +112,6 @@
 
 	if (!priv->tx_pending_len)
 		netif_wake_queue(dev);
- out:
 
 	spin_unlock_irq(&priv->driver_lock);
 	lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
@@ -211,7 +131,6 @@
 	lbs_deb_enter(LBS_DEB_NET);
 
 	spin_lock_irq(&priv->driver_lock);
-	priv->infra_open = 0;
 	netif_stop_queue(dev);
 	spin_unlock_irq(&priv->driver_lock);
 
@@ -625,16 +544,13 @@
 	return 0;
 }
 
-static int lbs_suspend_callback(struct lbs_private *priv, unsigned long dummy,
-				struct cmd_header *cmd)
+static int lbs_ret_host_sleep_activate(struct lbs_private *priv,
+		unsigned long dummy,
+		struct cmd_header *cmd)
 {
 	lbs_deb_enter(LBS_DEB_FW);
-
-	netif_device_detach(priv->dev);
-	if (priv->mesh_dev)
-		netif_device_detach(priv->mesh_dev);
-
-	priv->fw_ready = 0;
+	priv->is_host_sleep_activated = 1;
+	wake_up_interruptible(&priv->host_sleep_q);
 	lbs_deb_leave(LBS_DEB_FW);
 	return 0;
 }
@@ -646,39 +562,65 @@
 
 	lbs_deb_enter(LBS_DEB_FW);
 
-	if (priv->wol_criteria == 0xffffffff) {
-		lbs_pr_info("Suspend attempt without configuring wake params!\n");
-		return -EINVAL;
+	if (priv->is_deep_sleep) {
+		ret = lbs_set_deep_sleep(priv, 0);
+		if (ret) {
+			lbs_pr_err("deep sleep cancellation failed: %d\n", ret);
+			return ret;
+		}
+		priv->deep_sleep_required = 1;
 	}
 
 	memset(&cmd, 0, sizeof(cmd));
+	ret = lbs_host_sleep_cfg(priv, priv->wol_criteria,
+						(struct wol_config *)NULL);
+	if (ret) {
+		lbs_pr_info("Host sleep configuration failed: %d\n", ret);
+		return ret;
+	}
+	if (priv->psstate == PS_STATE_FULL_POWER) {
+		ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_ACTIVATE, &cmd,
+				sizeof(cmd), lbs_ret_host_sleep_activate, 0);
+		if (ret)
+			lbs_pr_info("HOST_SLEEP_ACTIVATE failed: %d\n", ret);
+	}
 
-	ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_ACTIVATE, &cmd,
-			sizeof(cmd), lbs_suspend_callback, 0);
-	if (ret)
-		lbs_pr_info("HOST_SLEEP_ACTIVATE failed: %d\n", ret);
+	if (!wait_event_interruptible_timeout(priv->host_sleep_q,
+				priv->is_host_sleep_activated, (10 * HZ))) {
+		lbs_pr_err("host_sleep_q: timer expired\n");
+		ret = -1;
+	}
+	netif_device_detach(priv->dev);
+	if (priv->mesh_dev)
+		netif_device_detach(priv->mesh_dev);
 
 	lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(lbs_suspend);
 
-void lbs_resume(struct lbs_private *priv)
+int lbs_resume(struct lbs_private *priv)
 {
+	int ret;
+	uint32_t criteria = EHS_REMOVE_WAKEUP;
+
 	lbs_deb_enter(LBS_DEB_FW);
 
-	priv->fw_ready = 1;
-
-	/* Firmware doesn't seem to give us RX packets any more
-	   until we send it some command. Might as well update */
-	lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
-				     0, 0, NULL);
+	ret = lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL);
 
 	netif_device_attach(priv->dev);
 	if (priv->mesh_dev)
 		netif_device_attach(priv->mesh_dev);
 
-	lbs_deb_leave(LBS_DEB_FW);
+	if (priv->deep_sleep_required) {
+		priv->deep_sleep_required = 0;
+		ret = lbs_set_deep_sleep(priv, 1);
+		if (ret)
+			lbs_pr_err("deep sleep activation failed: %d\n", ret);
+	}
+
+	lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(lbs_resume);
 
@@ -710,6 +652,9 @@
 		priv->txpower_max = maxlevel;
 	}
 
+	/* Send cmd to FW to enable 11D function */
+	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+
 	lbs_set_mac_control(priv);
 done:
 	lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
@@ -799,45 +744,27 @@
 
 static int lbs_init_adapter(struct lbs_private *priv)
 {
-	size_t bufsize;
-	int i, ret = 0;
+	int ret;
 
 	lbs_deb_enter(LBS_DEB_MAIN);
 
-	/* Allocate buffer to store the BSSID list */
-	bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor);
-	priv->networks = kzalloc(bufsize, GFP_KERNEL);
-	if (!priv->networks) {
-		lbs_pr_err("Out of memory allocating beacons\n");
-		ret = -1;
-		goto out;
-	}
-
-	/* Initialize scan result lists */
-	INIT_LIST_HEAD(&priv->network_free_list);
-	INIT_LIST_HEAD(&priv->network_list);
-	for (i = 0; i < MAX_NETWORK_COUNT; i++) {
-		list_add_tail(&priv->networks[i].list,
-			      &priv->network_free_list);
-	}
-
 	memset(priv->current_addr, 0xff, ETH_ALEN);
 
 	priv->connect_status = LBS_DISCONNECTED;
-	priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-	priv->mode = IW_MODE_INFRA;
 	priv->channel = DEFAULT_AD_HOC_CHANNEL;
 	priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
 	priv->radio_on = 1;
-	priv->enablehwauto = 1;
 	priv->psmode = LBS802_11POWERMODECAM;
 	priv->psstate = PS_STATE_FULL_POWER;
 	priv->is_deep_sleep = 0;
 	priv->is_auto_deep_sleep_enabled = 0;
+	priv->deep_sleep_required = 0;
 	priv->wakeup_dev_required = 0;
 	init_waitqueue_head(&priv->ds_awake_q);
 	priv->authtype_auto = 1;
-
+	priv->is_host_sleep_configured = 0;
+	priv->is_host_sleep_activated = 0;
+	init_waitqueue_head(&priv->host_sleep_q);
 	mutex_init(&priv->lock);
 
 	setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
@@ -881,8 +808,6 @@
 	kfifo_free(&priv->event_fifo);
 	del_timer(&priv->command_timer);
 	del_timer(&priv->auto_deepsleep_timer);
-	kfree(priv->networks);
-	priv->networks = NULL;
 
 	lbs_deb_leave(LBS_DEB_MAIN);
 }
@@ -919,7 +844,7 @@
 		lbs_pr_err("cfg80211 init failed\n");
 		goto done;
 	}
-	/* TODO? */
+
 	wdev->iftype = NL80211_IFTYPE_STATION;
 	priv = wdev_priv(wdev);
 	priv->wdev = wdev;
@@ -929,7 +854,6 @@
 		goto err_wdev;
 	}
 
-	//TODO? dev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
 	dev = alloc_netdev(0, "wlan%d", ether_setup);
 	if (!dev) {
 		dev_err(dmdev, "no memory for network device instance\n");
@@ -945,20 +869,10 @@
  	dev->netdev_ops = &lbs_netdev_ops;
 	dev->watchdog_timeo = 5 * HZ;
 	dev->ethtool_ops = &lbs_ethtool_ops;
-#ifdef	WIRELESS_EXT
-	dev->wireless_handlers = &lbs_handler_def;
-#endif
 	dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
 
-
-	// TODO: kzalloc + iwm_init_default_profile(iwm, iwm->umac_profile); ??
-
-
 	priv->card = card;
-	priv->infra_open = 0;
 
-
-	priv->rtap_net_dev = NULL;
 	strcpy(dev->name, "wlan%d");
 
 	lbs_deb_thread("Starting main thread...\n");
@@ -970,12 +884,11 @@
 	}
 
 	priv->work_thread = create_singlethread_workqueue("lbs_worker");
-	INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
-	INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
 	INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
 
 	priv->wol_criteria = 0xffffffff;
 	priv->wol_gpio = 0xff;
+	priv->wol_gap = 20;
 
 	goto done;
 
@@ -1004,12 +917,10 @@
 	lbs_deb_enter(LBS_DEB_MAIN);
 
 	lbs_remove_mesh(priv);
-	lbs_remove_rtap(priv);
+	lbs_scan_deinit(priv);
 
 	dev = priv->dev;
 
-	cancel_delayed_work_sync(&priv->scan_work);
-	cancel_delayed_work_sync(&priv->assoc_work);
 	cancel_work_sync(&priv->mcast_work);
 
 	/* worker thread destruction blocks on the in-flight command which
@@ -1024,13 +935,15 @@
 		lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
 	}
 
-	lbs_send_disconnect_notification(priv);
-
 	if (priv->is_deep_sleep) {
 		priv->is_deep_sleep = 0;
 		wake_up_interruptible(&priv->ds_awake_q);
 	}
 
+	priv->is_host_sleep_configured = 0;
+	priv->is_host_sleep_activated = 0;
+	wake_up_interruptible(&priv->host_sleep_q);
+
 	/* Stop the thread servicing the interrupts */
 	priv->surpriseremoved = 1;
 	kthread_stop(priv->main_thread);
@@ -1046,7 +959,7 @@
 EXPORT_SYMBOL_GPL(lbs_remove_card);
 
 
-static int lbs_rtap_supported(struct lbs_private *priv)
+int lbs_rtap_supported(struct lbs_private *priv)
 {
 	if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
 		return 1;
@@ -1078,16 +991,6 @@
 
 	lbs_init_mesh(priv);
 
-	/*
-	 * While rtap isn't related to mesh, only mesh-enabled
-	 * firmware implements the rtap functionality via
-	 * CMD_802_11_MONITOR_MODE.
-	 */
-	if (lbs_rtap_supported(priv)) {
-		if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
-			lbs_pr_err("cannot register lbs_rtap attribute\n");
-	}
-
 	lbs_debugfs_init_one(priv, dev);
 
 	lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name);
@@ -1119,9 +1022,6 @@
 	lbs_debugfs_remove_one(priv);
 	lbs_deinit_mesh(priv);
 
-	if (lbs_rtap_supported(priv))
-		device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
-
 	/* Delete the timeout of the currently processing command */
 	del_timer_sync(&priv->command_timer);
 	del_timer_sync(&priv->auto_deepsleep_timer);
@@ -1208,87 +1108,6 @@
 	lbs_deb_leave(LBS_DEB_MAIN);
 }
 
-/*
- * rtap interface support fuctions
- */
-
-static int lbs_rtap_open(struct net_device *dev)
-{
-	/* Yes, _stop_ the queue. Because we don't support injection */
-	lbs_deb_enter(LBS_DEB_MAIN);
-	netif_carrier_off(dev);
-	netif_stop_queue(dev);
-	lbs_deb_leave(LBS_DEB_LEAVE);
-	return 0;
-}
-
-static int lbs_rtap_stop(struct net_device *dev)
-{
-	lbs_deb_enter(LBS_DEB_MAIN);
-	lbs_deb_leave(LBS_DEB_MAIN);
-	return 0;
-}
-
-static netdev_tx_t lbs_rtap_hard_start_xmit(struct sk_buff *skb,
-					    struct net_device *dev)
-{
-	netif_stop_queue(dev);
-	return NETDEV_TX_BUSY;
-}
-
-static void lbs_remove_rtap(struct lbs_private *priv)
-{
-	lbs_deb_enter(LBS_DEB_MAIN);
-	if (priv->rtap_net_dev == NULL)
-		goto out;
-	unregister_netdev(priv->rtap_net_dev);
-	free_netdev(priv->rtap_net_dev);
-	priv->rtap_net_dev = NULL;
-out:
-	lbs_deb_leave(LBS_DEB_MAIN);
-}
-
-static const struct net_device_ops rtap_netdev_ops = {
-	.ndo_open = lbs_rtap_open,
-	.ndo_stop = lbs_rtap_stop,
-	.ndo_start_xmit = lbs_rtap_hard_start_xmit,
-};
-
-static int lbs_add_rtap(struct lbs_private *priv)
-{
-	int ret = 0;
-	struct net_device *rtap_dev;
-
-	lbs_deb_enter(LBS_DEB_MAIN);
-	if (priv->rtap_net_dev) {
-		ret = -EPERM;
-		goto out;
-	}
-
-	rtap_dev = alloc_netdev(0, "rtap%d", ether_setup);
-	if (rtap_dev == NULL) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	memcpy(rtap_dev->dev_addr, priv->current_addr, ETH_ALEN);
-	rtap_dev->type = ARPHRD_IEEE80211_RADIOTAP;
-	rtap_dev->netdev_ops = &rtap_netdev_ops;
-	rtap_dev->ml_priv = priv;
-	SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
-
-	ret = register_netdev(rtap_dev);
-	if (ret) {
-		free_netdev(rtap_dev);
-		goto out;
-	}
-	priv->rtap_net_dev = rtap_dev;
-
-out:
-	lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
-	return ret;
-}
-
 module_init(lbs_init_module);
 module_exit(lbs_exit_module);
 
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index e385af1..bc5bc13 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -5,6 +5,7 @@
 #include <linux/if_arp.h>
 #include <linux/kthread.h>
 #include <linux/kfifo.h>
+#include <net/cfg80211.h>
 
 #include "mesh.h"
 #include "decl.h"
@@ -314,7 +315,7 @@
 
 	spin_lock_irq(&priv->driver_lock);
 
-	if (priv->monitormode) {
+	if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
 		ret = -EBUSY;
 		goto out;
 	}
@@ -369,9 +370,6 @@
 
 	SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
 
-#ifdef	WIRELESS_EXT
-	mesh_dev->wireless_handlers = &mesh_handler_def;
-#endif
 	mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
 	/* Register virtual mesh interface */
 	ret = register_netdev(mesh_dev);
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index e257330..84ea248 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -70,11 +70,6 @@
 void lbs_persist_config_remove(struct net_device *net);
 
 
-/* WEXT handler */
-
-extern struct iw_handler_def mesh_handler_def;
-
-
 /* Ethtool statistics */
 
 struct ethtool_stats;
diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h
index d16b264..b3c8ea6 100644
--- a/drivers/net/wireless/libertas/radiotap.h
+++ b/drivers/net/wireless/libertas/radiotap.h
@@ -6,7 +6,7 @@
 	u8 txpower;
 	u8 rts_retries;
 	u8 data_retries;
-} __attribute__ ((packed));
+} __packed;
 
 #define TX_RADIOTAP_PRESENT (				\
 	(1 << IEEE80211_RADIOTAP_RATE) |		\
@@ -34,7 +34,7 @@
 	u8 flags;
 	u8 rate;
 	u8 antsignal;
-} __attribute__ ((packed));
+} __packed;
 
 #define RX_RADIOTAP_PRESENT (			\
 	(1 << IEEE80211_RADIOTAP_FLAGS) |	\
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 7a377f5..a4d0bca 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -4,18 +4,19 @@
 #include <linux/etherdevice.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <net/cfg80211.h>
 
+#include "defs.h"
 #include "host.h"
 #include "radiotap.h"
 #include "decl.h"
 #include "dev.h"
-#include "wext.h"
 
 struct eth803hdr {
 	u8 dest_addr[6];
 	u8 src_addr[6];
 	u16 h803_len;
-} __attribute__ ((packed));
+} __packed;
 
 struct rfc1042hdr {
 	u8 llc_dsap;
@@ -23,114 +24,22 @@
 	u8 llc_ctrl;
 	u8 snap_oui[3];
 	u16 snap_type;
-} __attribute__ ((packed));
+} __packed;
 
 struct rxpackethdr {
 	struct eth803hdr eth803_hdr;
 	struct rfc1042hdr rfc1042_hdr;
-} __attribute__ ((packed));
+} __packed;
 
 struct rx80211packethdr {
 	struct rxpd rx_pd;
 	void *eth80211_hdr;
-} __attribute__ ((packed));
+} __packed;
 
 static int process_rxed_802_11_packet(struct lbs_private *priv,
 	struct sk_buff *skb);
 
 /**
- *  @brief	This function computes the avgSNR .
- *
- *  @param	priv	A pointer to struct lbs_private structure
- *  @return	avgSNR
- */
-static u8 lbs_getavgsnr(struct lbs_private *priv)
-{
-	u8 i;
-	u16 temp = 0;
-	if (priv->numSNRNF == 0)
-		return 0;
-	for (i = 0; i < priv->numSNRNF; i++)
-		temp += priv->rawSNR[i];
-	return (u8) (temp / priv->numSNRNF);
-
-}
-
-/**
- *  @brief	This function computes the AvgNF
- *
- *  @param	priv	A pointer to struct lbs_private structure
- *  @return	AvgNF
- */
-static u8 lbs_getavgnf(struct lbs_private *priv)
-{
-	u8 i;
-	u16 temp = 0;
-	if (priv->numSNRNF == 0)
-		return 0;
-	for (i = 0; i < priv->numSNRNF; i++)
-		temp += priv->rawNF[i];
-	return (u8) (temp / priv->numSNRNF);
-
-}
-
-/**
- *  @brief	This function save the raw SNR/NF to our internel buffer
- *
- *  @param	priv	A pointer to struct lbs_private structure
- *  @param	prxpd	A pointer to rxpd structure of received packet
- *  @return	n/a
- */
-static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
-{
-	if (priv->numSNRNF < DEFAULT_DATA_AVG_FACTOR)
-		priv->numSNRNF++;
-	priv->rawSNR[priv->nextSNRNF] = p_rx_pd->snr;
-	priv->rawNF[priv->nextSNRNF] = p_rx_pd->nf;
-	priv->nextSNRNF++;
-	if (priv->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR)
-		priv->nextSNRNF = 0;
-}
-
-/**
- *  @brief	This function computes the RSSI in received packet.
- *
- *  @param	priv	A pointer to struct lbs_private structure
- *  @param	prxpd	A pointer to rxpd structure of received packet
- *  @return	n/a
- */
-static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
-{
-
-	lbs_deb_enter(LBS_DEB_RX);
-
-	lbs_deb_rx("rxpd: SNR %d, NF %d\n", p_rx_pd->snr, p_rx_pd->nf);
-	lbs_deb_rx("before computing SNR: SNR-avg = %d, NF-avg = %d\n",
-	       priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
-	       priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
-
-	priv->SNR[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->snr;
-	priv->NF[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->nf;
-	lbs_save_rawSNRNF(priv, p_rx_pd);
-
-	priv->SNR[TYPE_RXPD][TYPE_AVG] = lbs_getavgsnr(priv) * AVG_SCALE;
-	priv->NF[TYPE_RXPD][TYPE_AVG] = lbs_getavgnf(priv) * AVG_SCALE;
-	lbs_deb_rx("after computing SNR: SNR-avg = %d, NF-avg = %d\n",
-	       priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
-	       priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
-
-	priv->RSSI[TYPE_RXPD][TYPE_NOAVG] =
-	    CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_NOAVG],
-		     priv->NF[TYPE_RXPD][TYPE_NOAVG]);
-
-	priv->RSSI[TYPE_RXPD][TYPE_AVG] =
-	    CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
-		     priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
-
-	lbs_deb_leave(LBS_DEB_RX);
-}
-
-/**
  *  @brief This function processes received packet and forwards it
  *  to kernel/upper layer
  *
@@ -154,7 +63,7 @@
 
 	skb->ip_summed = CHECKSUM_NONE;
 
-	if (priv->monitormode)
+	if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
 		return process_rxed_802_11_packet(priv, skb);
 
 	p_rx_pd = (struct rxpd *) skb->data;
@@ -225,13 +134,7 @@
 	 */
 	skb_pull(skb, hdrchop);
 
-	/* Take the data rate from the rxpd structure
-	 * only if the rate is auto
-	 */
-	if (priv->enablehwauto)
-		priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate);
-
-	lbs_compute_rssi(priv, p_rx_pd);
+	priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate);
 
 	lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
 	dev->stats.rx_bytes += skb->len;
@@ -352,20 +255,18 @@
 	pradiotap_hdr = (void *)skb_push(skb, sizeof(struct rx_radiotap_hdr));
 	memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr));
 
-	/* Take the data rate from the rxpd structure
-	 * only if the rate is auto
-	 */
-	if (priv->enablehwauto)
-		priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
-
-	lbs_compute_rssi(priv, prxpd);
+	priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
 
 	lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
 	dev->stats.rx_bytes += skb->len;
 	dev->stats.rx_packets++;
 
-	skb->protocol = eth_type_trans(skb, priv->rtap_net_dev);
-	netif_rx(skb);
+	skb->protocol = eth_type_trans(skb, priv->dev);
+
+	if (in_interrupt())
+		netif_rx(skb);
+	else
+		netif_rx_ni(skb);
 
 	ret = 0;
 
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
deleted file mode 100644
index 24cd54b..0000000
--- a/drivers/net/wireless/libertas/scan.c
+++ /dev/null
@@ -1,1354 +0,0 @@
-/**
-  * Functions implementing wlan scan IOCTL and firmware command APIs
-  *
-  * IOCTL handlers as well as command preperation and response routines
-  *  for sending scan commands to the firmware.
-  */
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <asm/unaligned.h>
-#include <net/lib80211.h>
-
-#include "host.h"
-#include "dev.h"
-#include "scan.h"
-#include "assoc.h"
-#include "wext.h"
-#include "cmd.h"
-
-//! Approximate amount of data needed to pass a scan result back to iwlist
-#define MAX_SCAN_CELL_SIZE  (IW_EV_ADDR_LEN             \
-                             + IEEE80211_MAX_SSID_LEN   \
-                             + IW_EV_UINT_LEN           \
-                             + IW_EV_FREQ_LEN           \
-                             + IW_EV_QUAL_LEN           \
-                             + IEEE80211_MAX_SSID_LEN   \
-                             + IW_EV_PARAM_LEN          \
-                             + 40)	/* 40 for WPAIE */
-
-//! Memory needed to store a max sized channel List TLV for a firmware scan
-#define CHAN_TLV_MAX_SIZE  (sizeof(struct mrvl_ie_header)    \
-                            + (MRVDRV_MAX_CHANNELS_PER_SCAN     \
-                               * sizeof(struct chanscanparamset)))
-
-//! Memory needed to store a max number/size SSID TLV for a firmware scan
-#define SSID_TLV_MAX_SIZE  (1 * sizeof(struct mrvl_ie_ssid_param_set))
-
-//! Maximum memory needed for a cmd_ds_802_11_scan with all TLVs at max
-#define MAX_SCAN_CFG_ALLOC (sizeof(struct cmd_ds_802_11_scan)	\
-                            + CHAN_TLV_MAX_SIZE + SSID_TLV_MAX_SIZE)
-
-//! The maximum number of channels the firmware can scan per command
-#define MRVDRV_MAX_CHANNELS_PER_SCAN   14
-
-/**
- * @brief Number of channels to scan per firmware scan command issuance.
- *
- *  Number restricted to prevent hitting the limit on the amount of scan data
- *  returned in a single firmware scan command.
- */
-#define MRVDRV_CHANNELS_PER_SCAN_CMD   4
-
-//! Scan time specified in the channel TLV for each channel for passive scans
-#define MRVDRV_PASSIVE_SCAN_CHAN_TIME  100
-
-//! Scan time specified in the channel TLV for each channel for active scans
-#define MRVDRV_ACTIVE_SCAN_CHAN_TIME   100
-
-#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
-
-static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
-			      struct cmd_header *resp);
-
-/*********************************************************************/
-/*                                                                   */
-/*  Misc helper functions                                            */
-/*                                                                   */
-/*********************************************************************/
-
-/**
- *  @brief Unsets the MSB on basic rates
- *
- * Scan through an array and unset the MSB for basic data rates.
- *
- *  @param rates     buffer of data rates
- *  @param len       size of buffer
- */
-static void lbs_unset_basic_rate_flags(u8 *rates, size_t len)
-{
-	int i;
-
-	for (i = 0; i < len; i++)
-		rates[i] &= 0x7f;
-}
-
-
-static inline void clear_bss_descriptor(struct bss_descriptor *bss)
-{
-	/* Don't blow away ->list, just BSS data */
-	memset(bss, 0, offsetof(struct bss_descriptor, list));
-}
-
-/**
- *  @brief Compare two SSIDs
- *
- *  @param ssid1    A pointer to ssid to compare
- *  @param ssid2    A pointer to ssid to compare
- *
- *  @return         0: ssid is same, otherwise is different
- */
-int lbs_ssid_cmp(uint8_t *ssid1, uint8_t ssid1_len, uint8_t *ssid2,
-		 uint8_t ssid2_len)
-{
-	if (ssid1_len != ssid2_len)
-		return -1;
-
-	return memcmp(ssid1, ssid2, ssid1_len);
-}
-
-static inline int is_same_network(struct bss_descriptor *src,
-				  struct bss_descriptor *dst)
-{
-	/* A network is only a duplicate if the channel, BSSID, and ESSID
-	 * all match.  We treat all <hidden> with the same BSSID and channel
-	 * as one network */
-	return ((src->ssid_len == dst->ssid_len) &&
-		(src->channel == dst->channel) &&
-		!compare_ether_addr(src->bssid, dst->bssid) &&
-		!memcmp(src->ssid, dst->ssid, src->ssid_len));
-}
-
-
-
-/*********************************************************************/
-/*                                                                   */
-/* Region channel support                                            */
-/*                                                                   */
-/*********************************************************************/
-
-#define LBS_TX_PWR_DEFAULT		20	/*100mW */
-#define LBS_TX_PWR_US_DEFAULT		20	/*100mW */
-#define LBS_TX_PWR_JP_DEFAULT		16	/*50mW */
-#define LBS_TX_PWR_FR_DEFAULT		20	/*100mW */
-#define LBS_TX_PWR_EMEA_DEFAULT	20	/*100mW */
-
-/* Format { channel, frequency (MHz), maxtxpower } */
-/* band: 'B/G', region: USA FCC/Canada IC */
-static struct chan_freq_power channel_freq_power_US_BG[] = {
-	{1, 2412, LBS_TX_PWR_US_DEFAULT},
-	{2, 2417, LBS_TX_PWR_US_DEFAULT},
-	{3, 2422, LBS_TX_PWR_US_DEFAULT},
-	{4, 2427, LBS_TX_PWR_US_DEFAULT},
-	{5, 2432, LBS_TX_PWR_US_DEFAULT},
-	{6, 2437, LBS_TX_PWR_US_DEFAULT},
-	{7, 2442, LBS_TX_PWR_US_DEFAULT},
-	{8, 2447, LBS_TX_PWR_US_DEFAULT},
-	{9, 2452, LBS_TX_PWR_US_DEFAULT},
-	{10, 2457, LBS_TX_PWR_US_DEFAULT},
-	{11, 2462, LBS_TX_PWR_US_DEFAULT}
-};
-
-/* band: 'B/G', region: Europe ETSI */
-static struct chan_freq_power channel_freq_power_EU_BG[] = {
-	{1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
-	{2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
-	{3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
-	{4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
-	{5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
-	{6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
-	{7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
-	{8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
-	{9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
-	{10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
-	{11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
-	{12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
-	{13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
-};
-
-/* band: 'B/G', region: Spain */
-static struct chan_freq_power channel_freq_power_SPN_BG[] = {
-	{10, 2457, LBS_TX_PWR_DEFAULT},
-	{11, 2462, LBS_TX_PWR_DEFAULT}
-};
-
-/* band: 'B/G', region: France */
-static struct chan_freq_power channel_freq_power_FR_BG[] = {
-	{10, 2457, LBS_TX_PWR_FR_DEFAULT},
-	{11, 2462, LBS_TX_PWR_FR_DEFAULT},
-	{12, 2467, LBS_TX_PWR_FR_DEFAULT},
-	{13, 2472, LBS_TX_PWR_FR_DEFAULT}
-};
-
-/* band: 'B/G', region: Japan */
-static struct chan_freq_power channel_freq_power_JPN_BG[] = {
-	{1, 2412, LBS_TX_PWR_JP_DEFAULT},
-	{2, 2417, LBS_TX_PWR_JP_DEFAULT},
-	{3, 2422, LBS_TX_PWR_JP_DEFAULT},
-	{4, 2427, LBS_TX_PWR_JP_DEFAULT},
-	{5, 2432, LBS_TX_PWR_JP_DEFAULT},
-	{6, 2437, LBS_TX_PWR_JP_DEFAULT},
-	{7, 2442, LBS_TX_PWR_JP_DEFAULT},
-	{8, 2447, LBS_TX_PWR_JP_DEFAULT},
-	{9, 2452, LBS_TX_PWR_JP_DEFAULT},
-	{10, 2457, LBS_TX_PWR_JP_DEFAULT},
-	{11, 2462, LBS_TX_PWR_JP_DEFAULT},
-	{12, 2467, LBS_TX_PWR_JP_DEFAULT},
-	{13, 2472, LBS_TX_PWR_JP_DEFAULT},
-	{14, 2484, LBS_TX_PWR_JP_DEFAULT}
-};
-
-/**
- * the structure for channel, frequency and power
- */
-struct region_cfp_table {
-	u8 region;
-	struct chan_freq_power *cfp_BG;
-	int cfp_no_BG;
-};
-
-/**
- * the structure for the mapping between region and CFP
- */
-static struct region_cfp_table region_cfp_table[] = {
-	{0x10,			/*US FCC */
-	 channel_freq_power_US_BG,
-	 ARRAY_SIZE(channel_freq_power_US_BG),
-	 }
-	,
-	{0x20,			/*CANADA IC */
-	 channel_freq_power_US_BG,
-	 ARRAY_SIZE(channel_freq_power_US_BG),
-	 }
-	,
-	{0x30, /*EU*/ channel_freq_power_EU_BG,
-	 ARRAY_SIZE(channel_freq_power_EU_BG),
-	 }
-	,
-	{0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
-	 ARRAY_SIZE(channel_freq_power_SPN_BG),
-	 }
-	,
-	{0x32, /*FRANCE*/ channel_freq_power_FR_BG,
-	 ARRAY_SIZE(channel_freq_power_FR_BG),
-	 }
-	,
-	{0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
-	 ARRAY_SIZE(channel_freq_power_JPN_BG),
-	 }
-	,
-/*Add new region here */
-};
-
-/**
- *  @brief This function finds the CFP in
- *  region_cfp_table based on region and band parameter.
- *
- *  @param region  The region code
- *  @param band	   The band
- *  @param cfp_no  A pointer to CFP number
- *  @return 	   A pointer to CFP
- */
-static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
-{
-	int i, end;
-
-	lbs_deb_enter(LBS_DEB_MAIN);
-
-	end = ARRAY_SIZE(region_cfp_table);
-
-	for (i = 0; i < end ; i++) {
-		lbs_deb_main("region_cfp_table[i].region=%d\n",
-			region_cfp_table[i].region);
-		if (region_cfp_table[i].region == region) {
-			*cfp_no = region_cfp_table[i].cfp_no_BG;
-			lbs_deb_leave(LBS_DEB_MAIN);
-			return region_cfp_table[i].cfp_BG;
-		}
-	}
-
-	lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
-	return NULL;
-}
-
-int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
-{
-	int ret = 0;
-	int i = 0;
-
-	struct chan_freq_power *cfp;
-	int cfp_no;
-
-	lbs_deb_enter(LBS_DEB_MAIN);
-
-	memset(priv->region_channel, 0, sizeof(priv->region_channel));
-
-	cfp = lbs_get_region_cfp_table(region, &cfp_no);
-	if (cfp != NULL) {
-		priv->region_channel[i].nrcfp = cfp_no;
-		priv->region_channel[i].CFP = cfp;
-	} else {
-		lbs_deb_main("wrong region code %#x in band B/G\n",
-		       region);
-		ret = -1;
-		goto out;
-	}
-	priv->region_channel[i].valid = 1;
-	priv->region_channel[i].region = region;
-	priv->region_channel[i].band = band;
-	i++;
-out:
-	lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
-	return ret;
-}
-
-
-
-
-/*********************************************************************/
-/*                                                                   */
-/*  Main scanning support                                            */
-/*                                                                   */
-/*********************************************************************/
-
-/**
- *  @brief Create a channel list for the driver to scan based on region info
- *
- *  Only used from lbs_scan_setup_scan_config()
- *
- *  Use the driver region/band information to construct a comprehensive list
- *    of channels to scan.  This routine is used for any scan that is not
- *    provided a specific channel list to scan.
- *
- *  @param priv          A pointer to struct lbs_private structure
- *  @param scanchanlist  Output parameter: resulting channel list to scan
- *
- *  @return              void
- */
-static int lbs_scan_create_channel_list(struct lbs_private *priv,
-					struct chanscanparamset *scanchanlist)
-{
-	struct region_channel *scanregion;
-	struct chan_freq_power *cfp;
-	int rgnidx;
-	int chanidx;
-	int nextchan;
-	uint8_t scantype;
-
-	chanidx = 0;
-
-	/* Set the default scan type to the user specified type, will later
-	 *   be changed to passive on a per channel basis if restricted by
-	 *   regulatory requirements (11d or 11h)
-	 */
-	scantype = CMD_SCAN_TYPE_ACTIVE;
-
-	for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) {
-		if (!priv->region_channel[rgnidx].valid)
-			continue;
-		scanregion = &priv->region_channel[rgnidx];
-
-		for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) {
-			struct chanscanparamset *chan = &scanchanlist[chanidx];
-
-			cfp = scanregion->CFP + nextchan;
-
-			if (scanregion->band == BAND_B || scanregion->band == BAND_G)
-				chan->radiotype = CMD_SCAN_RADIO_TYPE_BG;
-
-			if (scantype == CMD_SCAN_TYPE_PASSIVE) {
-				chan->maxscantime = cpu_to_le16(MRVDRV_PASSIVE_SCAN_CHAN_TIME);
-				chan->chanscanmode.passivescan = 1;
-			} else {
-				chan->maxscantime = cpu_to_le16(MRVDRV_ACTIVE_SCAN_CHAN_TIME);
-				chan->chanscanmode.passivescan = 0;
-			}
-
-			chan->channumber = cfp->channel;
-		}
-	}
-	return chanidx;
-}
-
-/*
- * Add SSID TLV of the form:
- *
- * TLV-ID SSID     00 00
- * length          06 00
- * ssid            4d 4e 54 45 53 54
- */
-static int lbs_scan_add_ssid_tlv(struct lbs_private *priv, u8 *tlv)
-{
-	struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv;
-
-	ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID);
-	ssid_tlv->header.len = cpu_to_le16(priv->scan_ssid_len);
-	memcpy(ssid_tlv->ssid, priv->scan_ssid, priv->scan_ssid_len);
-	return sizeof(ssid_tlv->header) + priv->scan_ssid_len;
-}
-
-/*
- * Add CHANLIST TLV of the form
- *
- * TLV-ID CHANLIST 01 01
- * length          5b 00
- * channel 1       00 01 00 00 00 64 00
- *   radio type    00
- *   channel          01
- *   scan type           00
- *   min scan time          00 00
- *   max scan time                64 00
- * channel 2       00 02 00 00 00 64 00
- * channel 3       00 03 00 00 00 64 00
- * channel 4       00 04 00 00 00 64 00
- * channel 5       00 05 00 00 00 64 00
- * channel 6       00 06 00 00 00 64 00
- * channel 7       00 07 00 00 00 64 00
- * channel 8       00 08 00 00 00 64 00
- * channel 9       00 09 00 00 00 64 00
- * channel 10      00 0a 00 00 00 64 00
- * channel 11      00 0b 00 00 00 64 00
- * channel 12      00 0c 00 00 00 64 00
- * channel 13      00 0d 00 00 00 64 00
- *
- */
-static int lbs_scan_add_chanlist_tlv(uint8_t *tlv,
-				     struct chanscanparamset *chan_list,
-				     int chan_count)
-{
-	size_t size = sizeof(struct chanscanparamset) *chan_count;
-	struct mrvl_ie_chanlist_param_set *chan_tlv = (void *)tlv;
-
-	chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
-	memcpy(chan_tlv->chanscanparam, chan_list, size);
-	chan_tlv->header.len = cpu_to_le16(size);
-	return sizeof(chan_tlv->header) + size;
-}
-
-/*
- * Add RATES TLV of the form
- *
- * TLV-ID RATES    01 00
- * length          0e 00
- * rates           82 84 8b 96 0c 12 18 24 30 48 60 6c
- *
- * The rates are in lbs_bg_rates[], but for the 802.11b
- * rates the high bit isn't set.
- */
-static int lbs_scan_add_rates_tlv(uint8_t *tlv)
-{
-	int i;
-	struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
-
-	rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
-	tlv += sizeof(rate_tlv->header);
-	for (i = 0; i < MAX_RATES; i++) {
-		*tlv = lbs_bg_rates[i];
-		if (*tlv == 0)
-			break;
-		/* This code makes sure that the 802.11b rates (1 MBit/s, 2
-		   MBit/s, 5.5 MBit/s and 11 MBit/s get's the high bit set.
-		   Note that the values are MBit/s * 2, to mark them as
-		   basic rates so that the firmware likes it better */
-		if (*tlv == 0x02 || *tlv == 0x04 ||
-		    *tlv == 0x0b || *tlv == 0x16)
-			*tlv |= 0x80;
-		tlv++;
-	}
-	rate_tlv->header.len = cpu_to_le16(i);
-	return sizeof(rate_tlv->header) + i;
-}
-
-/*
- * Generate the CMD_802_11_SCAN command with the proper tlv
- * for a bunch of channels.
- */
-static int lbs_do_scan(struct lbs_private *priv, uint8_t bsstype,
-		       struct chanscanparamset *chan_list, int chan_count)
-{
-	int ret = -ENOMEM;
-	struct cmd_ds_802_11_scan *scan_cmd;
-	uint8_t *tlv;	/* pointer into our current, growing TLV storage area */
-
-	lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, chan_count %d",
-		bsstype, chan_list ? chan_list[0].channumber : -1,
-		chan_count);
-
-	/* create the fixed part for scan command */
-	scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL);
-	if (scan_cmd == NULL)
-		goto out;
-
-	tlv = scan_cmd->tlvbuffer;
-	/* TODO: do we need to scan for a specific BSSID?
-	memcpy(scan_cmd->bssid, priv->scan_bssid, ETH_ALEN); */
-	scan_cmd->bsstype = bsstype;
-
-	/* add TLVs */
-	if (priv->scan_ssid_len)
-		tlv += lbs_scan_add_ssid_tlv(priv, tlv);
-	if (chan_list && chan_count)
-		tlv += lbs_scan_add_chanlist_tlv(tlv, chan_list, chan_count);
-	tlv += lbs_scan_add_rates_tlv(tlv);
-
-	/* This is the final data we are about to send */
-	scan_cmd->hdr.size = cpu_to_le16(tlv - (uint8_t *)scan_cmd);
-	lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd,
-		    sizeof(*scan_cmd));
-	lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer,
-		    tlv - scan_cmd->tlvbuffer);
-
-	ret = __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr,
-			le16_to_cpu(scan_cmd->hdr.size),
-			lbs_ret_80211_scan, 0);
-
-out:
-	kfree(scan_cmd);
-	lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief Internal function used to start a scan based on an input config
- *
- *  Use the input user scan configuration information when provided in
- *    order to send the appropriate scan commands to firmware to populate or
- *    update the internal driver scan table
- *
- *  @param priv          A pointer to struct lbs_private structure
- *  @param full_scan     Do a full-scan (blocking)
- *
- *  @return              0 or < 0 if error
- */
-int lbs_scan_networks(struct lbs_private *priv, int full_scan)
-{
-	int ret = -ENOMEM;
-	struct chanscanparamset *chan_list;
-	struct chanscanparamset *curr_chans;
-	int chan_count;
-	uint8_t bsstype = CMD_BSS_TYPE_ANY;
-	int numchannels = MRVDRV_CHANNELS_PER_SCAN_CMD;
-	union iwreq_data wrqu;
-#ifdef CONFIG_LIBERTAS_DEBUG
-	struct bss_descriptor *iter;
-	int i = 0;
-	DECLARE_SSID_BUF(ssid);
-#endif
-
-	lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan);
-
-	/* Cancel any partial outstanding partial scans if this scan
-	 * is a full scan.
-	 */
-	if (full_scan && delayed_work_pending(&priv->scan_work))
-		cancel_delayed_work(&priv->scan_work);
-
-	/* User-specified bsstype or channel list
-	TODO: this can be implemented if some user-space application
-	need the feature. Formerly, it was accessible from debugfs,
-	but then nowhere used.
-	if (user_cfg) {
-		if (user_cfg->bsstype)
-		bsstype = user_cfg->bsstype;
-	} */
-
-	lbs_deb_scan("numchannels %d, bsstype %d\n", numchannels, bsstype);
-
-	/* Create list of channels to scan */
-	chan_list = kzalloc(sizeof(struct chanscanparamset) *
-			    LBS_IOCTL_USER_SCAN_CHAN_MAX, GFP_KERNEL);
-	if (!chan_list) {
-		lbs_pr_alert("SCAN: chan_list empty\n");
-		goto out;
-	}
-
-	/* We want to scan all channels */
-	chan_count = lbs_scan_create_channel_list(priv, chan_list);
-
-	netif_stop_queue(priv->dev);
-	if (priv->mesh_dev)
-		netif_stop_queue(priv->mesh_dev);
-
-	/* Prepare to continue an interrupted scan */
-	lbs_deb_scan("chan_count %d, scan_channel %d\n",
-		     chan_count, priv->scan_channel);
-	curr_chans = chan_list;
-	/* advance channel list by already-scanned-channels */
-	if (priv->scan_channel > 0) {
-		curr_chans += priv->scan_channel;
-		chan_count -= priv->scan_channel;
-	}
-
-	/* Send scan command(s)
-	 * numchannels contains the number of channels we should maximally scan
-	 * chan_count is the total number of channels to scan
-	 */
-
-	while (chan_count) {
-		int to_scan = min(numchannels, chan_count);
-		lbs_deb_scan("scanning %d of %d channels\n",
-			     to_scan, chan_count);
-		ret = lbs_do_scan(priv, bsstype, curr_chans,
-				  to_scan);
-		if (ret) {
-			lbs_pr_err("SCAN_CMD failed\n");
-			goto out2;
-		}
-		curr_chans += to_scan;
-		chan_count -= to_scan;
-
-		/* somehow schedule the next part of the scan */
-		if (chan_count && !full_scan &&
-		    !priv->surpriseremoved) {
-			/* -1 marks just that we're currently scanning */
-			if (priv->scan_channel < 0)
-				priv->scan_channel = to_scan;
-			else
-				priv->scan_channel += to_scan;
-			cancel_delayed_work(&priv->scan_work);
-			queue_delayed_work(priv->work_thread, &priv->scan_work,
-					   msecs_to_jiffies(300));
-			/* skip over GIWSCAN event */
-			goto out;
-		}
-
-	}
-	memset(&wrqu, 0, sizeof(union iwreq_data));
-	wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
-
-#ifdef CONFIG_LIBERTAS_DEBUG
-	/* Dump the scan table */
-	mutex_lock(&priv->lock);
-	lbs_deb_scan("scan table:\n");
-	list_for_each_entry(iter, &priv->network_list, list)
-		lbs_deb_scan("%02d: BSSID %pM, RSSI %d, SSID '%s'\n",
-			     i++, iter->bssid, iter->rssi,
-			     print_ssid(ssid, iter->ssid, iter->ssid_len));
-	mutex_unlock(&priv->lock);
-#endif
-
-out2:
-	priv->scan_channel = 0;
-
-out:
-	if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
-		netif_wake_queue(priv->dev);
-
-	if (priv->mesh_dev && lbs_mesh_connected(priv) &&
-	    !priv->tx_pending_len)
-		netif_wake_queue(priv->mesh_dev);
-
-	kfree(chan_list);
-
-	lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
-	return ret;
-}
-
-void lbs_scan_worker(struct work_struct *work)
-{
-	struct lbs_private *priv =
-		container_of(work, struct lbs_private, scan_work.work);
-
-	lbs_deb_enter(LBS_DEB_SCAN);
-	lbs_scan_networks(priv, 0);
-	lbs_deb_leave(LBS_DEB_SCAN);
-}
-
-
-/*********************************************************************/
-/*                                                                   */
-/*  Result interpretation                                            */
-/*                                                                   */
-/*********************************************************************/
-
-/**
- *  @brief Interpret a BSS scan response returned from the firmware
- *
- *  Parse the various fixed fields and IEs passed back for a a BSS probe
- *  response or beacon from the scan command.  Record information as needed
- *  in the scan table struct bss_descriptor for that entry.
- *
- *  @param bss  Output parameter: Pointer to the BSS Entry
- *
- *  @return             0 or -1
- */
-static int lbs_process_bss(struct bss_descriptor *bss,
-			   uint8_t **pbeaconinfo, int *bytesleft)
-{
-	struct ieee_ie_fh_param_set *fh;
-	struct ieee_ie_ds_param_set *ds;
-	struct ieee_ie_cf_param_set *cf;
-	struct ieee_ie_ibss_param_set *ibss;
-	DECLARE_SSID_BUF(ssid);
-	uint8_t *pos, *end, *p;
-	uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
-	uint16_t beaconsize = 0;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_SCAN);
-
-	if (*bytesleft >= sizeof(beaconsize)) {
-		/* Extract & convert beacon size from the command buffer */
-		beaconsize = get_unaligned_le16(*pbeaconinfo);
-		*bytesleft -= sizeof(beaconsize);
-		*pbeaconinfo += sizeof(beaconsize);
-	}
-
-	if (beaconsize == 0 || beaconsize > *bytesleft) {
-		*pbeaconinfo += *bytesleft;
-		*bytesleft = 0;
-		ret = -1;
-		goto done;
-	}
-
-	/* Initialize the current working beacon pointer for this BSS iteration */
-	pos = *pbeaconinfo;
-	end = pos + beaconsize;
-
-	/* Advance the return beacon pointer past the current beacon */
-	*pbeaconinfo += beaconsize;
-	*bytesleft -= beaconsize;
-
-	memcpy(bss->bssid, pos, ETH_ALEN);
-	lbs_deb_scan("process_bss: BSSID %pM\n", bss->bssid);
-	pos += ETH_ALEN;
-
-	if ((end - pos) < 12) {
-		lbs_deb_scan("process_bss: Not enough bytes left\n");
-		ret = -1;
-		goto done;
-	}
-
-	/*
-	 * next 4 fields are RSSI, time stamp, beacon interval,
-	 *   and capability information
-	 */
-
-	/* RSSI is 1 byte long */
-	bss->rssi = *pos;
-	lbs_deb_scan("process_bss: RSSI %d\n", *pos);
-	pos++;
-
-	/* time stamp is 8 bytes long */
-	pos += 8;
-
-	/* beacon interval is 2 bytes long */
-	bss->beaconperiod = get_unaligned_le16(pos);
-	pos += 2;
-
-	/* capability information is 2 bytes long */
-	bss->capability = get_unaligned_le16(pos);
-	lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability);
-	pos += 2;
-
-	if (bss->capability & WLAN_CAPABILITY_PRIVACY)
-		lbs_deb_scan("process_bss: WEP enabled\n");
-	if (bss->capability & WLAN_CAPABILITY_IBSS)
-		bss->mode = IW_MODE_ADHOC;
-	else
-		bss->mode = IW_MODE_INFRA;
-
-	/* rest of the current buffer are IE's */
-	lbs_deb_scan("process_bss: IE len %zd\n", end - pos);
-	lbs_deb_hex(LBS_DEB_SCAN, "process_bss: IE info", pos, end - pos);
-
-	/* process variable IE */
-	while (pos <= end - 2) {
-		if (pos + pos[1] > end) {
-			lbs_deb_scan("process_bss: error in processing IE, "
-				     "bytes left < IE length\n");
-			break;
-		}
-
-		switch (pos[0]) {
-		case WLAN_EID_SSID:
-			bss->ssid_len = min_t(int, IEEE80211_MAX_SSID_LEN, pos[1]);
-			memcpy(bss->ssid, pos + 2, bss->ssid_len);
-			lbs_deb_scan("got SSID IE: '%s', len %u\n",
-			             print_ssid(ssid, bss->ssid, bss->ssid_len),
-			             bss->ssid_len);
-			break;
-
-		case WLAN_EID_SUPP_RATES:
-			n_basic_rates = min_t(uint8_t, MAX_RATES, pos[1]);
-			memcpy(bss->rates, pos + 2, n_basic_rates);
-			got_basic_rates = 1;
-			lbs_deb_scan("got RATES IE\n");
-			break;
-
-		case WLAN_EID_FH_PARAMS:
-			fh = (struct ieee_ie_fh_param_set *) pos;
-			memcpy(&bss->phy.fh, fh, sizeof(*fh));
-			lbs_deb_scan("got FH IE\n");
-			break;
-
-		case WLAN_EID_DS_PARAMS:
-			ds = (struct ieee_ie_ds_param_set *) pos;
-			bss->channel = ds->channel;
-			memcpy(&bss->phy.ds, ds, sizeof(*ds));
-			lbs_deb_scan("got DS IE, channel %d\n", bss->channel);
-			break;
-
-		case WLAN_EID_CF_PARAMS:
-			cf = (struct ieee_ie_cf_param_set *) pos;
-			memcpy(&bss->ss.cf, cf, sizeof(*cf));
-			lbs_deb_scan("got CF IE\n");
-			break;
-
-		case WLAN_EID_IBSS_PARAMS:
-			ibss = (struct ieee_ie_ibss_param_set *) pos;
-			bss->atimwindow = ibss->atimwindow;
-			memcpy(&bss->ss.ibss, ibss, sizeof(*ibss));
-			lbs_deb_scan("got IBSS IE\n");
-			break;
-
-		case WLAN_EID_EXT_SUPP_RATES:
-			/* only process extended supported rate if data rate is
-			 * already found. Data rate IE should come before
-			 * extended supported rate IE
-			 */
-			lbs_deb_scan("got RATESEX IE\n");
-			if (!got_basic_rates) {
-				lbs_deb_scan("... but ignoring it\n");
-				break;
-			}
-
-			n_ex_rates = pos[1];
-			if (n_basic_rates + n_ex_rates > MAX_RATES)
-				n_ex_rates = MAX_RATES - n_basic_rates;
-
-			p = bss->rates + n_basic_rates;
-			memcpy(p, pos + 2, n_ex_rates);
-			break;
-
-		case WLAN_EID_GENERIC:
-			if (pos[1] >= 4 &&
-			    pos[2] == 0x00 && pos[3] == 0x50 &&
-			    pos[4] == 0xf2 && pos[5] == 0x01) {
-				bss->wpa_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN);
-				memcpy(bss->wpa_ie, pos, bss->wpa_ie_len);
-				lbs_deb_scan("got WPA IE\n");
-				lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie,
-					    bss->wpa_ie_len);
-			} else if (pos[1] >= MARVELL_MESH_IE_LENGTH &&
-				   pos[2] == 0x00 && pos[3] == 0x50 &&
-				   pos[4] == 0x43 && pos[5] == 0x04) {
-				lbs_deb_scan("got mesh IE\n");
-				bss->mesh = 1;
-			} else {
-				lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n",
-					pos[2], pos[3],
-					pos[4], pos[5],
-					pos[1]);
-			}
-			break;
-
-		case WLAN_EID_RSN:
-			lbs_deb_scan("got RSN IE\n");
-			bss->rsn_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN);
-			memcpy(bss->rsn_ie, pos, bss->rsn_ie_len);
-			lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE",
-				    bss->rsn_ie, bss->rsn_ie_len);
-			break;
-
-		default:
-			lbs_deb_scan("got IE 0x%04x, len %d\n",
-				     pos[0], pos[1]);
-			break;
-		}
-
-		pos += pos[1] + 2;
-	}
-
-	/* Timestamp */
-	bss->last_scanned = jiffies;
-	lbs_unset_basic_rate_flags(bss->rates, sizeof(bss->rates));
-
-	ret = 0;
-
-done:
-	lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief Send a scan command for all available channels filtered on a spec
- *
- *  Used in association code and from debugfs
- *
- *  @param priv             A pointer to struct lbs_private structure
- *  @param ssid             A pointer to the SSID to scan for
- *  @param ssid_len         Length of the SSID
- *
- *  @return                0-success, otherwise fail
- */
-int lbs_send_specific_ssid_scan(struct lbs_private *priv, uint8_t *ssid,
-				uint8_t ssid_len)
-{
-	DECLARE_SSID_BUF(ssid_buf);
-	int ret = 0;
-
-	lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s'\n",
-			   print_ssid(ssid_buf, ssid, ssid_len));
-
-	if (!ssid_len)
-		goto out;
-
-	memcpy(priv->scan_ssid, ssid, ssid_len);
-	priv->scan_ssid_len = ssid_len;
-
-	lbs_scan_networks(priv, 1);
-	if (priv->surpriseremoved) {
-		ret = -1;
-		goto out;
-	}
-
-out:
-	lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
-	return ret;
-}
-
-
-
-
-/*********************************************************************/
-/*                                                                   */
-/*  Support for Wireless Extensions                                  */
-/*                                                                   */
-/*********************************************************************/
-
-
-#define MAX_CUSTOM_LEN 64
-
-static inline char *lbs_translate_scan(struct lbs_private *priv,
-					    struct iw_request_info *info,
-					    char *start, char *stop,
-					    struct bss_descriptor *bss)
-{
-	struct chan_freq_power *cfp;
-	char *current_val;	/* For rates */
-	struct iw_event iwe;	/* Temporary buffer */
-	int j;
-#define PERFECT_RSSI ((uint8_t)50)
-#define WORST_RSSI   ((uint8_t)0)
-#define RSSI_DIFF    ((uint8_t)(PERFECT_RSSI - WORST_RSSI))
-	uint8_t rssi;
-
-	lbs_deb_enter(LBS_DEB_SCAN);
-
-	cfp = lbs_find_cfp_by_band_and_channel(priv, 0, bss->channel);
-	if (!cfp) {
-		lbs_deb_scan("Invalid channel number %d\n", bss->channel);
-		start = NULL;
-		goto out;
-	}
-
-	/* First entry *MUST* be the BSSID */
-	iwe.cmd = SIOCGIWAP;
-	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
-	memcpy(iwe.u.ap_addr.sa_data, &bss->bssid, ETH_ALEN);
-	start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
-
-	/* SSID */
-	iwe.cmd = SIOCGIWESSID;
-	iwe.u.data.flags = 1;
-	iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN);
-	start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
-
-	/* Mode */
-	iwe.cmd = SIOCGIWMODE;
-	iwe.u.mode = bss->mode;
-	start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
-
-	/* Frequency */
-	iwe.cmd = SIOCGIWFREQ;
-	iwe.u.freq.m = (long)cfp->freq * 100000;
-	iwe.u.freq.e = 1;
-	start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
-
-	/* Add quality statistics */
-	iwe.cmd = IWEVQUAL;
-	iwe.u.qual.updated = IW_QUAL_ALL_UPDATED;
-	iwe.u.qual.level = SCAN_RSSI(bss->rssi);
-
-	rssi = iwe.u.qual.level - MRVDRV_NF_DEFAULT_SCAN_VALUE;
-	iwe.u.qual.qual =
-		(100 * RSSI_DIFF * RSSI_DIFF - (PERFECT_RSSI - rssi) *
-		 (15 * (RSSI_DIFF) + 62 * (PERFECT_RSSI - rssi))) /
-		(RSSI_DIFF * RSSI_DIFF);
-	if (iwe.u.qual.qual > 100)
-		iwe.u.qual.qual = 100;
-
-	if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) {
-		iwe.u.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE;
-	} else {
-		iwe.u.qual.noise = CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]);
-	}
-
-	/* Locally created ad-hoc BSSs won't have beacons if this is the
-	 * only station in the adhoc network; so get signal strength
-	 * from receive statistics.
-	 */
-	if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate
-	    && !lbs_ssid_cmp(priv->curbssparams.ssid,
-			     priv->curbssparams.ssid_len,
-			     bss->ssid, bss->ssid_len)) {
-		int snr, nf;
-		snr = priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE;
-		nf = priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE;
-		iwe.u.qual.level = CAL_RSSI(snr, nf);
-	}
-	start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
-
-	/* Add encryption capability */
-	iwe.cmd = SIOCGIWENCODE;
-	if (bss->capability & WLAN_CAPABILITY_PRIVACY) {
-		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
-	} else {
-		iwe.u.data.flags = IW_ENCODE_DISABLED;
-	}
-	iwe.u.data.length = 0;
-	start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
-
-	current_val = start + iwe_stream_lcp_len(info);
-
-	iwe.cmd = SIOCGIWRATE;
-	iwe.u.bitrate.fixed = 0;
-	iwe.u.bitrate.disabled = 0;
-	iwe.u.bitrate.value = 0;
-
-	for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) {
-		/* Bit rate given in 500 kb/s units */
-		iwe.u.bitrate.value = bss->rates[j] * 500000;
-		current_val = iwe_stream_add_value(info, start, current_val,
-						   stop, &iwe, IW_EV_PARAM_LEN);
-	}
-	if ((bss->mode == IW_MODE_ADHOC) && priv->adhoccreate
-	    && !lbs_ssid_cmp(priv->curbssparams.ssid,
-			     priv->curbssparams.ssid_len,
-			     bss->ssid, bss->ssid_len)) {
-		iwe.u.bitrate.value = 22 * 500000;
-		current_val = iwe_stream_add_value(info, start, current_val,
-						   stop, &iwe, IW_EV_PARAM_LEN);
-	}
-	/* Check if we added any event */
-	if ((current_val - start) > iwe_stream_lcp_len(info))
-		start = current_val;
-
-	memset(&iwe, 0, sizeof(iwe));
-	if (bss->wpa_ie_len) {
-		char buf[MAX_WPA_IE_LEN];
-		memcpy(buf, bss->wpa_ie, bss->wpa_ie_len);
-		iwe.cmd = IWEVGENIE;
-		iwe.u.data.length = bss->wpa_ie_len;
-		start = iwe_stream_add_point(info, start, stop, &iwe, buf);
-	}
-
-	memset(&iwe, 0, sizeof(iwe));
-	if (bss->rsn_ie_len) {
-		char buf[MAX_WPA_IE_LEN];
-		memcpy(buf, bss->rsn_ie, bss->rsn_ie_len);
-		iwe.cmd = IWEVGENIE;
-		iwe.u.data.length = bss->rsn_ie_len;
-		start = iwe_stream_add_point(info, start, stop, &iwe, buf);
-	}
-
-	if (bss->mesh) {
-		char custom[MAX_CUSTOM_LEN];
-		char *p = custom;
-
-		iwe.cmd = IWEVCUSTOM;
-		p += snprintf(p, MAX_CUSTOM_LEN, "mesh-type: olpc");
-		iwe.u.data.length = p - custom;
-		if (iwe.u.data.length)
-			start = iwe_stream_add_point(info, start, stop,
-						     &iwe, custom);
-	}
-
-out:
-	lbs_deb_leave_args(LBS_DEB_SCAN, "start %p", start);
-	return start;
-}
-
-
-/**
- *  @brief Handle Scan Network ioctl
- *
- *  @param dev          A pointer to net_device structure
- *  @param info         A pointer to iw_request_info structure
- *  @param vwrq         A pointer to iw_param structure
- *  @param extra        A pointer to extra data buf
- *
- *  @return             0 --success, otherwise fail
- */
-int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
-		 union iwreq_data *wrqu, char *extra)
-{
-	DECLARE_SSID_BUF(ssid);
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (!priv->radio_on) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (!netif_running(dev)) {
-		ret = -ENETDOWN;
-		goto out;
-	}
-
-	/* mac80211 does this:
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	if (sdata->type != IEEE80211_IF_TYPE_xxx) {
-		ret = -EOPNOTSUPP;
-		goto out;
-	}
-	*/
-
-	if (wrqu->data.length == sizeof(struct iw_scan_req) &&
-	    wrqu->data.flags & IW_SCAN_THIS_ESSID) {
-		struct iw_scan_req *req = (struct iw_scan_req *)extra;
-		priv->scan_ssid_len = req->essid_len;
-		memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
-		lbs_deb_wext("set_scan, essid '%s'\n",
-			print_ssid(ssid, priv->scan_ssid, priv->scan_ssid_len));
-	} else {
-		priv->scan_ssid_len = 0;
-	}
-
-	if (!delayed_work_pending(&priv->scan_work))
-		queue_delayed_work(priv->work_thread, &priv->scan_work,
-				   msecs_to_jiffies(50));
-	/* set marker that currently a scan is taking place */
-	priv->scan_channel = -1;
-
-	if (priv->surpriseremoved)
-		ret = -EIO;
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-
-/**
- *  @brief  Handle Retrieve scan table ioctl
- *
- *  @param dev          A pointer to net_device structure
- *  @param info         A pointer to iw_request_info structure
- *  @param dwrq         A pointer to iw_point structure
- *  @param extra        A pointer to extra data buf
- *
- *  @return             0 --success, otherwise fail
- */
-int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
-		 struct iw_point *dwrq, char *extra)
-{
-#define SCAN_ITEM_SIZE 128
-	struct lbs_private *priv = dev->ml_priv;
-	int err = 0;
-	char *ev = extra;
-	char *stop = ev + dwrq->length;
-	struct bss_descriptor *iter_bss;
-	struct bss_descriptor *safe;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	/* iwlist should wait until the current scan is finished */
-	if (priv->scan_channel)
-		return -EAGAIN;
-
-	/* Update RSSI if current BSS is a locally created ad-hoc BSS */
-	if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) {
-		err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
-				CMD_OPTION_WAITFORRSP, 0, NULL);
-		if (err)
-			goto out;
-	}
-
-	mutex_lock(&priv->lock);
-	list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
-		char *next_ev;
-		unsigned long stale_time;
-
-		if (stop - ev < SCAN_ITEM_SIZE) {
-			err = -E2BIG;
-			break;
-		}
-
-		/* For mesh device, list only mesh networks */
-		if (dev == priv->mesh_dev && !iter_bss->mesh)
-			continue;
-
-		/* Prune old an old scan result */
-		stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE;
-		if (time_after(jiffies, stale_time)) {
-			list_move_tail(&iter_bss->list, &priv->network_free_list);
-			clear_bss_descriptor(iter_bss);
-			continue;
-		}
-
-		/* Translate to WE format this entry */
-		next_ev = lbs_translate_scan(priv, info, ev, stop, iter_bss);
-		if (next_ev == NULL)
-			continue;
-		ev = next_ev;
-	}
-	mutex_unlock(&priv->lock);
-
-	dwrq->length = (ev - extra);
-	dwrq->flags = 0;
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err);
-	return err;
-}
-
-
-
-
-/*********************************************************************/
-/*                                                                   */
-/*  Command execution                                                */
-/*                                                                   */
-/*********************************************************************/
-
-
-/**
- *  @brief This function handles the command response of scan
- *
- *  Called from handle_cmd_response() in cmdrespc.
- *
- *   The response buffer for the scan command has the following
- *      memory layout:
- *
- *     .-----------------------------------------------------------.
- *     |  header (4 * sizeof(u16)):  Standard command response hdr |
- *     .-----------------------------------------------------------.
- *     |  bufsize (u16) : sizeof the BSS Description data          |
- *     .-----------------------------------------------------------.
- *     |  NumOfSet (u8) : Number of BSS Descs returned             |
- *     .-----------------------------------------------------------.
- *     |  BSSDescription data (variable, size given in bufsize)    |
- *     .-----------------------------------------------------------.
- *     |  TLV data (variable, size calculated using header->size,  |
- *     |            bufsize and sizeof the fixed fields above)     |
- *     .-----------------------------------------------------------.
- *
- *  @param priv    A pointer to struct lbs_private structure
- *  @param resp    A pointer to cmd_ds_command
- *
- *  @return        0 or -1
- */
-static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
-			      struct cmd_header *resp)
-{
-	struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
-	struct bss_descriptor *iter_bss;
-	struct bss_descriptor *safe;
-	uint8_t *bssinfo;
-	uint16_t scanrespsize;
-	int bytesleft;
-	int idx;
-	int tlvbufsize;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_SCAN);
-
-	/* Prune old entries from scan table */
-	list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
-		unsigned long stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE;
-		if (time_before(jiffies, stale_time))
-			continue;
-		list_move_tail (&iter_bss->list, &priv->network_free_list);
-		clear_bss_descriptor(iter_bss);
-	}
-
-	if (scanresp->nr_sets > MAX_NETWORK_COUNT) {
-		lbs_deb_scan("SCAN_RESP: too many scan results (%d, max %d)\n",
-			     scanresp->nr_sets, MAX_NETWORK_COUNT);
-		ret = -1;
-		goto done;
-	}
-
-	bytesleft = get_unaligned_le16(&scanresp->bssdescriptsize);
-	lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft);
-
-	scanrespsize = le16_to_cpu(resp->size);
-	lbs_deb_scan("SCAN_RESP: scan results %d\n", scanresp->nr_sets);
-
-	bssinfo = scanresp->bssdesc_and_tlvbuffer;
-
-	/* The size of the TLV buffer is equal to the entire command response
-	 *   size (scanrespsize) minus the fixed fields (sizeof()'s), the
-	 *   BSS Descriptions (bssdescriptsize as bytesLef) and the command
-	 *   response header (sizeof(struct cmd_header))
-	 */
-	tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize)
-				     + sizeof(scanresp->nr_sets)
-				     + sizeof(struct cmd_header));
-
-	/*
-	 *  Process each scan response returned (scanresp->nr_sets). Save
-	 *    the information in the newbssentry and then insert into the
-	 *    driver scan table either as an update to an existing entry
-	 *    or as an addition at the end of the table
-	 */
-	for (idx = 0; idx < scanresp->nr_sets && bytesleft; idx++) {
-		struct bss_descriptor new;
-		struct bss_descriptor *found = NULL;
-		struct bss_descriptor *oldest = NULL;
-
-		/* Process the data fields and IEs returned for this BSS */
-		memset(&new, 0, sizeof (struct bss_descriptor));
-		if (lbs_process_bss(&new, &bssinfo, &bytesleft) != 0) {
-			/* error parsing the scan response, skipped */
-			lbs_deb_scan("SCAN_RESP: process_bss returned ERROR\n");
-			continue;
-		}
-
-		/* Try to find this bss in the scan table */
-		list_for_each_entry (iter_bss, &priv->network_list, list) {
-			if (is_same_network(iter_bss, &new)) {
-				found = iter_bss;
-				break;
-			}
-
-			if ((oldest == NULL) ||
-			    (iter_bss->last_scanned < oldest->last_scanned))
-				oldest = iter_bss;
-		}
-
-		if (found) {
-			/* found, clear it */
-			clear_bss_descriptor(found);
-		} else if (!list_empty(&priv->network_free_list)) {
-			/* Pull one from the free list */
-			found = list_entry(priv->network_free_list.next,
-					   struct bss_descriptor, list);
-			list_move_tail(&found->list, &priv->network_list);
-		} else if (oldest) {
-			/* If there are no more slots, expire the oldest */
-			found = oldest;
-			clear_bss_descriptor(found);
-			list_move_tail(&found->list, &priv->network_list);
-		} else {
-			continue;
-		}
-
-		lbs_deb_scan("SCAN_RESP: BSSID %pM\n", new.bssid);
-
-		/* Copy the locally created newbssentry to the scan table */
-		memcpy(found, &new, offsetof(struct bss_descriptor, list));
-	}
-
-	ret = 0;
-
-done:
-	lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
-	return ret;
-}
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
deleted file mode 100644
index 8fb1706..0000000
--- a/drivers/net/wireless/libertas/scan.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
-  * Interface for the wlan network scan routines
-  *
-  * Driver interface functions and type declarations for the scan module
-  * implemented in scan.c.
-  */
-#ifndef _LBS_SCAN_H
-#define _LBS_SCAN_H
-
-#include <net/iw_handler.h>
-
-struct lbs_private;
-
-#define MAX_NETWORK_COUNT 128
-
-/** Chan-freq-TxPower mapping table*/
-struct chan_freq_power {
-	/** channel Number		*/
-	u16 channel;
-	/** frequency of this channel	*/
-	u32 freq;
-	/** Max allowed Tx power level	*/
-	u16 maxtxpower;
-	/** TRUE:channel unsupported;  FLASE:supported*/
-	u8 unsupported;
-};
-
-/** region-band mapping table*/
-struct region_channel {
-	/** TRUE if this entry is valid		     */
-	u8 valid;
-	/** region code for US, Japan ...	     */
-	u8 region;
-	/** band B/G/A, used for BAND_CONFIG cmd	     */
-	u8 band;
-	/** Actual No. of elements in the array below */
-	u8 nrcfp;
-	/** chan-freq-txpower mapping table*/
-	struct chan_freq_power *CFP;
-};
-
-/**
- *  @brief Maximum number of channels that can be sent in a setuserscan ioctl
- */
-#define LBS_IOCTL_USER_SCAN_CHAN_MAX  50
-
-int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len);
-
-int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
-
-int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
-				u8 ssid_len);
-
-int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
-			 struct iw_point *dwrq, char *extra);
-int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
-			 union iwreq_data *wrqu, char *extra);
-
-int lbs_scan_networks(struct lbs_private *priv, int full_scan);
-
-void lbs_scan_worker(struct work_struct *work);
-
-#endif
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index a9bf658..411a3bb 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -4,13 +4,13 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/sched.h>
+#include <net/cfg80211.h>
 
 #include "host.h"
 #include "radiotap.h"
 #include "decl.h"
 #include "defs.h"
 #include "dev.h"
-#include "wext.h"
 
 /**
  *  @brief This function converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE
@@ -111,7 +111,7 @@
 	p802x_hdr = skb->data;
 	pkt_len = skb->len;
 
-	if (dev == priv->rtap_net_dev) {
+	if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
 		struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data;
 
 		/* set txpd fields from the radiotap header */
@@ -147,7 +147,7 @@
 	dev->stats.tx_packets++;
 	dev->stats.tx_bytes += skb->len;
 
-	if (priv->monitormode) {
+	if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
 		/* Keep the skb to echo it back once Tx feedback is
 		   received from FW */
 		skb_orphan(skb);
@@ -158,6 +158,7 @@
  free:
 		dev_kfree_skb_any(skb);
 	}
+
  unlock:
 	spin_unlock_irqrestore(&priv->driver_lock, flags);
 	wake_up(&priv->waitq);
@@ -179,7 +180,8 @@
 {
 	struct tx_radiotap_hdr *radiotap_hdr;
 
-	if (!priv->monitormode || priv->currenttxskb == NULL)
+	if (!priv->wdev->iftype == NL80211_IFTYPE_MONITOR ||
+	    priv->currenttxskb == NULL)
 		return;
 
 	radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data;
@@ -188,7 +190,7 @@
 		(1 + priv->txretrycount - try_count) : 0;
 
 	priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb,
-						      priv->rtap_net_dev);
+						      priv->dev);
 	netif_rx(priv->currenttxskb);
 
 	priv->currenttxskb = NULL;
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 3e72c86..462fbb4 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -11,7 +11,7 @@
 struct ieee_ie_header {
 	u8 id;
 	u8 len;
-} __attribute__ ((packed));
+} __packed;
 
 struct ieee_ie_cf_param_set {
 	struct ieee_ie_header header;
@@ -20,19 +20,19 @@
 	u8 cfpperiod;
 	__le16 cfpmaxduration;
 	__le16 cfpdurationremaining;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct ieee_ie_ibss_param_set {
 	struct ieee_ie_header header;
 
 	__le16 atimwindow;
-} __attribute__ ((packed));
+} __packed;
 
 union ieee_ss_param_set {
 	struct ieee_ie_cf_param_set cf;
 	struct ieee_ie_ibss_param_set ibss;
-} __attribute__ ((packed));
+} __packed;
 
 struct ieee_ie_fh_param_set {
 	struct ieee_ie_header header;
@@ -41,18 +41,18 @@
 	u8 hopset;
 	u8 hoppattern;
 	u8 hopindex;
-} __attribute__ ((packed));
+} __packed;
 
 struct ieee_ie_ds_param_set {
 	struct ieee_ie_header header;
 
 	u8 channel;
-} __attribute__ ((packed));
+} __packed;
 
 union ieee_phy_param_set {
 	struct ieee_ie_fh_param_set fh;
 	struct ieee_ie_ds_param_set ds;
-} __attribute__ ((packed));
+} __packed;
 
 /** TLV  type ID definition */
 #define PROPRIETARY_TLV_BASE_ID		0x0100
@@ -100,28 +100,28 @@
 struct mrvl_ie_header {
 	__le16 type;
 	__le16 len;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_data {
 	struct mrvl_ie_header header;
 	u8 Data[1];
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_rates_param_set {
 	struct mrvl_ie_header header;
 	u8 rates[1];
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_ssid_param_set {
 	struct mrvl_ie_header header;
 	u8 ssid[1];
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_wildcard_ssid_param_set {
 	struct mrvl_ie_header header;
 	u8 MaxSsidlength;
 	u8 ssid[1];
-} __attribute__ ((packed));
+} __packed;
 
 struct chanscanmode {
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -133,7 +133,7 @@
 	u8 disablechanfilt:1;
 	u8 reserved_2_7:6;
 #endif
-} __attribute__ ((packed));
+} __packed;
 
 struct chanscanparamset {
 	u8 radiotype;
@@ -141,12 +141,12 @@
 	struct chanscanmode chanscanmode;
 	__le16 minscantime;
 	__le16 maxscantime;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_chanlist_param_set {
 	struct mrvl_ie_header header;
 	struct chanscanparamset chanscanparam[1];
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_cf_param_set {
 	struct mrvl_ie_header header;
@@ -154,86 +154,86 @@
 	u8 cfpperiod;
 	__le16 cfpmaxduration;
 	__le16 cfpdurationremaining;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_ds_param_set {
 	struct mrvl_ie_header header;
 	u8 channel;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_rsn_param_set {
 	struct mrvl_ie_header header;
 	u8 rsnie[1];
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_tsf_timestamp {
 	struct mrvl_ie_header header;
 	__le64 tsftable[1];
-} __attribute__ ((packed));
+} __packed;
 
 /* v9 and later firmware only */
 struct mrvl_ie_auth_type {
 	struct mrvl_ie_header header;
 	__le16 auth;
-} __attribute__ ((packed));
+} __packed;
 
 /**  Local Power capability */
 struct mrvl_ie_power_capability {
 	struct mrvl_ie_header header;
 	s8 minpower;
 	s8 maxpower;
-} __attribute__ ((packed));
+} __packed;
 
 /* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */
 struct mrvl_ie_thresholds {
 	struct mrvl_ie_header header;
 	u8 value;
 	u8 freq;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_beacons_missed {
 	struct mrvl_ie_header header;
 	u8 beaconmissed;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_num_probes {
 	struct mrvl_ie_header header;
 	__le16 numprobes;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_bcast_probe {
 	struct mrvl_ie_header header;
 	__le16 bcastprobe;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_num_ssid_probe {
 	struct mrvl_ie_header header;
 	__le16 numssidprobe;
-} __attribute__ ((packed));
+} __packed;
 
 struct led_pin {
 	u8 led;
 	u8 pin;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_ie_ledgpio {
 	struct mrvl_ie_header header;
 	struct led_pin ledpin[1];
-} __attribute__ ((packed));
+} __packed;
 
 struct led_bhv {
 	uint8_t	firmwarestate;
 	uint8_t	led;
 	uint8_t	ledstate;
 	uint8_t	ledarg;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct mrvl_ie_ledbhv {
 	struct mrvl_ie_header header;
 	struct led_bhv ledbhv[1];
-} __attribute__ ((packed));
+} __packed;
 
 /* Meant to be packed as the value member of a struct ieee80211_info_element.
  * Note that the len member of the ieee80211_info_element varies depending on
@@ -248,12 +248,12 @@
 	uint8_t mesh_capability;
 	uint8_t mesh_id_len;
 	uint8_t mesh_id[IEEE80211_MAX_SSID_LEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_meshie {
 	u8 id, len;
 	struct mrvl_meshie_val val;
-} __attribute__ ((packed));
+} __packed;
 
 struct mrvl_mesh_defaults {
 	__le32 bootflag;
@@ -261,6 +261,6 @@
 	uint8_t reserved;
 	__le16 channel;
 	struct mrvl_meshie meshie;
-} __attribute__ ((packed));
+} __packed;
 
 #endif
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
deleted file mode 100644
index f96a960..0000000
--- a/drivers/net/wireless/libertas/wext.c
+++ /dev/null
@@ -1,2353 +0,0 @@
-/**
-  * This file contains ioctl functions
-  */
-#include <linux/ctype.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/if.h>
-#include <linux/if_arp.h>
-#include <linux/wireless.h>
-#include <linux/bitops.h>
-
-#include <net/lib80211.h>
-#include <net/iw_handler.h>
-
-#include "host.h"
-#include "radiotap.h"
-#include "decl.h"
-#include "defs.h"
-#include "dev.h"
-#include "wext.h"
-#include "scan.h"
-#include "assoc.h"
-#include "cmd.h"
-
-
-static inline void lbs_postpone_association_work(struct lbs_private *priv)
-{
-	if (priv->surpriseremoved)
-		return;
-	cancel_delayed_work(&priv->assoc_work);
-	queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2);
-}
-
-static inline void lbs_do_association_work(struct lbs_private *priv)
-{
-	if (priv->surpriseremoved)
-		return;
-	cancel_delayed_work(&priv->assoc_work);
-	queue_delayed_work(priv->work_thread, &priv->assoc_work, 0);
-}
-
-static inline void lbs_cancel_association_work(struct lbs_private *priv)
-{
-	cancel_delayed_work(&priv->assoc_work);
-	kfree(priv->pending_assoc_req);
-	priv->pending_assoc_req = NULL;
-}
-
-void lbs_send_disconnect_notification(struct lbs_private *priv)
-{
-	union iwreq_data wrqu;
-
-	memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
-	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-}
-
-static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
-{
-	union iwreq_data iwrq;
-	u8 buf[50];
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	memset(&iwrq, 0, sizeof(union iwreq_data));
-	memset(buf, 0, sizeof(buf));
-
-	snprintf(buf, sizeof(buf) - 1, "%s", str);
-
-	iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
-
-	/* Send Event to upper layer */
-	lbs_deb_wext("event indication string %s\n", (char *)buf);
-	lbs_deb_wext("event indication length %d\n", iwrq.data.length);
-	lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
-
-	wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-}
-
-/**
- *  @brief This function handles MIC failure event.
- *
- *  @param priv    A pointer to struct lbs_private structure
- *  @para  event   the event id
- *  @return 	   n/a
- */
-void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
-{
-	char buf[50];
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	memset(buf, 0, sizeof(buf));
-
-	sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
-
-	if (event == MACREG_INT_CODE_MIC_ERR_UNICAST)
-		strcat(buf, "unicast ");
-	else
-		strcat(buf, "multicast ");
-
-	lbs_send_iwevcustom_event(priv, buf);
-	lbs_deb_leave(LBS_DEB_CMD);
-}
-
-/**
- *  @brief Find the channel frequency power info with specific channel
- *
- *  @param priv 	A pointer to struct lbs_private structure
- *  @param band		it can be BAND_A, BAND_G or BAND_B
- *  @param channel      the channel for looking
- *  @return 	   	A pointer to struct chan_freq_power structure or NULL if not find.
- */
-struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
-	struct lbs_private *priv,
-	u8 band,
-	u16 channel)
-{
-	struct chan_freq_power *cfp = NULL;
-	struct region_channel *rc;
-	int i, j;
-
-	for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
-		rc = &priv->region_channel[j];
-
-		if (!rc->valid || !rc->CFP)
-			continue;
-		if (rc->band != band)
-			continue;
-		for (i = 0; i < rc->nrcfp; i++) {
-			if (rc->CFP[i].channel == channel) {
-				cfp = &rc->CFP[i];
-				break;
-			}
-		}
-	}
-
-	if (!cfp && channel)
-		lbs_deb_wext("lbs_find_cfp_by_band_and_channel: can't find "
-		       "cfp by band %d / channel %d\n", band, channel);
-
-	return cfp;
-}
-
-/**
- *  @brief Find the channel frequency power info with specific frequency
- *
- *  @param priv 	A pointer to struct lbs_private structure
- *  @param band		it can be BAND_A, BAND_G or BAND_B
- *  @param freq	        the frequency for looking
- *  @return 	   	A pointer to struct chan_freq_power structure or NULL if not find.
- */
-static struct chan_freq_power *find_cfp_by_band_and_freq(
-	struct lbs_private *priv,
-	u8 band,
-	u32 freq)
-{
-	struct chan_freq_power *cfp = NULL;
-	struct region_channel *rc;
-	int i, j;
-
-	for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
-		rc = &priv->region_channel[j];
-
-		if (!rc->valid || !rc->CFP)
-			continue;
-		if (rc->band != band)
-			continue;
-		for (i = 0; i < rc->nrcfp; i++) {
-			if (rc->CFP[i].freq == freq) {
-				cfp = &rc->CFP[i];
-				break;
-			}
-		}
-	}
-
-	if (!cfp && freq)
-		lbs_deb_wext("find_cfp_by_band_and_freql: can't find cfp by "
-		       "band %d / freq %d\n", band, freq);
-
-	return cfp;
-}
-
-/**
- *  @brief Copy active data rates based on adapter mode and status
- *
- *  @param priv              A pointer to struct lbs_private structure
- *  @param rate		        The buf to return the active rates
- */
-static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
-{
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if ((priv->connect_status != LBS_CONNECTED) &&
-		!lbs_mesh_connected(priv))
-		memcpy(rates, lbs_bg_rates, MAX_RATES);
-	else
-		memcpy(rates, priv->curbssparams.rates, MAX_RATES);
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-}
-
-static int lbs_get_name(struct net_device *dev, struct iw_request_info *info,
-			 char *cwrq, char *extra)
-{
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	/* We could add support for 802.11n here as needed. Jean II */
-	snprintf(cwrq, IFNAMSIZ, "IEEE 802.11b/g");
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
-			 struct iw_freq *fwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	struct chan_freq_power *cfp;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	cfp = lbs_find_cfp_by_band_and_channel(priv, 0,
-					   priv->channel);
-
-	if (!cfp) {
-		if (priv->channel)
-			lbs_deb_wext("invalid channel %d\n",
-			       priv->channel);
-		return -EINVAL;
-	}
-
-	fwrq->m = (long)cfp->freq * 100000;
-	fwrq->e = 1;
-
-	lbs_deb_wext("freq %u\n", fwrq->m);
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info,
-			struct sockaddr *awrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (priv->connect_status == LBS_CONNECTED) {
-		memcpy(awrq->sa_data, priv->curbssparams.bssid, ETH_ALEN);
-	} else {
-		memset(awrq->sa_data, 0, ETH_ALEN);
-	}
-	awrq->sa_family = ARPHRD_ETHER;
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info,
-			 struct iw_point *dwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	/*
-	 * Check the size of the string
-	 */
-
-	if (dwrq->length > 16) {
-		return -E2BIG;
-	}
-
-	mutex_lock(&priv->lock);
-	memset(priv->nodename, 0, sizeof(priv->nodename));
-	memcpy(priv->nodename, extra, dwrq->length);
-	mutex_unlock(&priv->lock);
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
-			 struct iw_point *dwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	dwrq->length = strlen(priv->nodename);
-	memcpy(extra, priv->nodename, dwrq->length);
-	extra[dwrq->length] = '\0';
-
-	dwrq->flags = 1;	/* active */
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-#ifdef CONFIG_LIBERTAS_MESH
-static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
-			 struct iw_point *dwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	/* Use nickname to indicate that mesh is on */
-
-	if (lbs_mesh_connected(priv)) {
-		strncpy(extra, "Mesh", 12);
-		extra[12] = '\0';
-		dwrq->length = strlen(extra);
-	}
-
-	else {
-		extra[0] = '\0';
-		dwrq->length = 0;
-	}
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-#endif
-
-static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
-			struct iw_param *vwrq, char *extra)
-{
-	int ret = 0;
-	struct lbs_private *priv = dev->ml_priv;
-	u32 val = vwrq->value;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (vwrq->disabled)
-		val = MRVDRV_RTS_MAX_VALUE;
-
-	if (val > MRVDRV_RTS_MAX_VALUE) /* min rts value is 0 */
-		return -EINVAL;
-
-	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, (u16) val);
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info,
-			struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-	u16 val = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, &val);
-	if (ret)
-		goto out;
-
-	vwrq->value = val;
-	vwrq->disabled = val > MRVDRV_RTS_MAX_VALUE; /* min rts value is 0 */
-	vwrq->fixed = 1;
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
-			 struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-	u32 val = vwrq->value;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (vwrq->disabled)
-		val = MRVDRV_FRAG_MAX_VALUE;
-
-	if (val < MRVDRV_FRAG_MIN_VALUE || val > MRVDRV_FRAG_MAX_VALUE)
-		return -EINVAL;
-
-	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, (u16) val);
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info,
-			 struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-	u16 val = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, &val);
-	if (ret)
-		goto out;
-
-	vwrq->value = val;
-	vwrq->disabled = ((val < MRVDRV_FRAG_MIN_VALUE)
-			  || (val > MRVDRV_FRAG_MAX_VALUE));
-	vwrq->fixed = 1;
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_get_mode(struct net_device *dev,
-			 struct iw_request_info *info, u32 * uwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	*uwrq = priv->mode;
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-#ifdef CONFIG_LIBERTAS_MESH
-static int mesh_wlan_get_mode(struct net_device *dev,
-		              struct iw_request_info *info, u32 * uwrq,
-			      char *extra)
-{
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	*uwrq = IW_MODE_REPEAT;
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-#endif
-
-static int lbs_get_txpow(struct net_device *dev,
-			  struct iw_request_info *info,
-			  struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	s16 curlevel = 0;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (!priv->radio_on) {
-		lbs_deb_wext("tx power off\n");
-		vwrq->value = 0;
-		vwrq->disabled = 1;
-		goto out;
-	}
-
-	ret = lbs_get_tx_power(priv, &curlevel, NULL, NULL);
-	if (ret)
-		goto out;
-
-	lbs_deb_wext("tx power level %d dbm\n", curlevel);
-	priv->txpower_cur = curlevel;
-
-	vwrq->value = curlevel;
-	vwrq->fixed = 1;
-	vwrq->disabled = 0;
-	vwrq->flags = IW_TXPOW_DBM;
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info,
-			  struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-	u16 slimit = 0, llimit = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-        if ((vwrq->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
-                return -EOPNOTSUPP;
-
-	/* The MAC has a 4-bit Total_Tx_Count register
-	   Total_Tx_Count = 1 + Tx_Retry_Count */
-#define TX_RETRY_MIN 0
-#define TX_RETRY_MAX 14
-	if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX)
-		return -EINVAL;
-
-	/* Add 1 to convert retry count to try count */
-	if (vwrq->flags & IW_RETRY_SHORT)
-		slimit = (u16) (vwrq->value + 1);
-	else if (vwrq->flags & IW_RETRY_LONG)
-		llimit = (u16) (vwrq->value + 1);
-	else
-		slimit = llimit = (u16) (vwrq->value + 1); /* set both */
-
-	if (llimit) {
-		ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT,
-				       llimit);
-		if (ret)
-			goto out;
-	}
-
-	if (slimit) {
-		/* txretrycount follows the short retry limit */
-		priv->txretrycount = slimit;
-		ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT,
-				       slimit);
-		if (ret)
-			goto out;
-	}
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info,
-			  struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-	u16 val = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	vwrq->disabled = 0;
-
-	if (vwrq->flags & IW_RETRY_LONG) {
-		ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, &val);
-		if (ret)
-			goto out;
-
-		/* Subtract 1 to convert try count to retry count */
-		vwrq->value = val - 1;
-		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
-	} else {
-		ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, &val);
-		if (ret)
-			goto out;
-
-		/* txretry count follows the short retry limit */
-		priv->txretrycount = val;
-		/* Subtract 1 to convert try count to retry count */
-		vwrq->value = val - 1;
-		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
-	}
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static inline void sort_channels(struct iw_freq *freq, int num)
-{
-	int i, j;
-	struct iw_freq temp;
-
-	for (i = 0; i < num; i++)
-		for (j = i + 1; j < num; j++)
-			if (freq[i].i > freq[j].i) {
-				temp.i = freq[i].i;
-				temp.m = freq[i].m;
-
-				freq[i].i = freq[j].i;
-				freq[i].m = freq[j].m;
-
-				freq[j].i = temp.i;
-				freq[j].m = temp.m;
-			}
-}
-
-/* data rate listing
-	MULTI_BANDS:
-		abg		a	b	b/g
-   Infra 	G(12)		A(8)	B(4)	G(12)
-   Adhoc 	A+B(12)		A(8)	B(4)	B(4)
-
-	non-MULTI_BANDS:
-					b	b/g
-   Infra 	     		    	B(4)	G(12)
-   Adhoc 	      		    	B(4)	B(4)
- */
-/**
- *  @brief Get Range Info
- *
- *  @param dev                  A pointer to net_device structure
- *  @param info			A pointer to iw_request_info structure
- *  @param vwrq 		A pointer to iw_param structure
- *  @param extra		A pointer to extra data buf
- *  @return 	   		0 --success, otherwise fail
- */
-static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
-			  struct iw_point *dwrq, char *extra)
-{
-	int i, j;
-	struct lbs_private *priv = dev->ml_priv;
-	struct iw_range *range = (struct iw_range *)extra;
-	struct chan_freq_power *cfp;
-	u8 rates[MAX_RATES + 1];
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	dwrq->length = sizeof(struct iw_range);
-	memset(range, 0, sizeof(struct iw_range));
-
-	range->min_nwid = 0;
-	range->max_nwid = 0;
-
-	memset(rates, 0, sizeof(rates));
-	copy_active_data_rates(priv, rates);
-	range->num_bitrates = strnlen(rates, IW_MAX_BITRATES);
-	for (i = 0; i < range->num_bitrates; i++)
-		range->bitrate[i] = rates[i] * 500000;
-	range->num_bitrates = i;
-	lbs_deb_wext("IW_MAX_BITRATES %d, num_bitrates %d\n", IW_MAX_BITRATES,
-	       range->num_bitrates);
-
-	range->num_frequency = 0;
-
-	range->scan_capa = IW_SCAN_CAPA_ESSID;
-
-	for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
-	     && (j < ARRAY_SIZE(priv->region_channel)); j++) {
-		cfp = priv->region_channel[j].CFP;
-		for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
-		     && priv->region_channel[j].valid
-		     && cfp
-		     && (i < priv->region_channel[j].nrcfp); i++) {
-			range->freq[range->num_frequency].i =
-			    (long)cfp->channel;
-			range->freq[range->num_frequency].m =
-			    (long)cfp->freq * 100000;
-			range->freq[range->num_frequency].e = 1;
-			cfp++;
-			range->num_frequency++;
-		}
-	}
-
-	lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n",
-	       IW_MAX_FREQUENCIES, range->num_frequency);
-
-	range->num_channels = range->num_frequency;
-
-	sort_channels(&range->freq[0], range->num_frequency);
-
-	/*
-	 * Set an indication of the max TCP throughput in bit/s that we can
-	 * expect using this interface
-	 */
-	if (i > 2)
-		range->throughput = 5000 * 1000;
-	else
-		range->throughput = 1500 * 1000;
-
-	range->min_rts = MRVDRV_RTS_MIN_VALUE;
-	range->max_rts = MRVDRV_RTS_MAX_VALUE;
-	range->min_frag = MRVDRV_FRAG_MIN_VALUE;
-	range->max_frag = MRVDRV_FRAG_MAX_VALUE;
-
-	range->encoding_size[0] = 5;
-	range->encoding_size[1] = 13;
-	range->num_encoding_sizes = 2;
-	range->max_encoding_tokens = 4;
-
-	/*
-	 * Right now we support only "iwconfig ethX power on|off"
-	 */
-	range->pm_capa = IW_POWER_ON;
-
-	/*
-	 * Minimum version we recommend
-	 */
-	range->we_version_source = 15;
-
-	/*
-	 * Version we are compiled with
-	 */
-	range->we_version_compiled = WIRELESS_EXT;
-
-	range->retry_capa = IW_RETRY_LIMIT;
-	range->retry_flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
-
-	range->min_retry = TX_RETRY_MIN;
-	range->max_retry = TX_RETRY_MAX;
-
-	/*
-	 * Set the qual, level and noise range values
-	 */
-	range->max_qual.qual = 100;
-	range->max_qual.level = 0;
-	range->max_qual.noise = 0;
-	range->max_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-
-	range->avg_qual.qual = 70;
-	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
-	range->avg_qual.level = 0;
-	range->avg_qual.noise = 0;
-	range->avg_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-
-	range->sensitivity = 0;
-
-	/* Setup the supported power level ranges */
-	memset(range->txpower, 0, sizeof(range->txpower));
-	range->txpower_capa = IW_TXPOW_DBM | IW_TXPOW_RANGE;
-	range->txpower[0] = priv->txpower_min;
-	range->txpower[1] = priv->txpower_max;
-	range->num_txpower = 2;
-
-	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
-				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
-				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
-	range->event_capa[1] = IW_EVENT_CAPA_K_1;
-
-	if (priv->fwcapinfo & FW_CAPINFO_WPA) {
-		range->enc_capa =   IW_ENC_CAPA_WPA
-		                  | IW_ENC_CAPA_WPA2
-		                  | IW_ENC_CAPA_CIPHER_TKIP
-		                  | IW_ENC_CAPA_CIPHER_CCMP;
-	}
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
-			  struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (!(priv->fwcapinfo & FW_CAPINFO_PS)) {
-		if (vwrq->disabled)
-			return 0;
-		else
-			return -EINVAL;
-	}
-
-	/* PS is currently supported only in Infrastructure mode
-	 * Remove this check if it is to be supported in IBSS mode also
-	 */
-
-	if (vwrq->disabled) {
-		priv->psmode = LBS802_11POWERMODECAM;
-		if (priv->psstate != PS_STATE_FULL_POWER) {
-			lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
-		}
-
-		return 0;
-	}
-
-	if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
-		lbs_deb_wext(
-		       "setting power timeout is not supported\n");
-		return -EINVAL;
-	} else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
-		vwrq->value = vwrq->value / 1000;
-		if (!priv->enter_deep_sleep) {
-			lbs_pr_err("deep sleep feature is not implemented "
-					"for this interface driver\n");
-			return -EINVAL;
-		}
-
-		if (priv->connect_status == LBS_CONNECTED) {
-			if ((priv->is_auto_deep_sleep_enabled) &&
-						(vwrq->value == -1000)) {
-				lbs_exit_auto_deep_sleep(priv);
-				return 0;
-			} else {
-				lbs_pr_err("can't use deep sleep cmd in "
-						"connected state\n");
-				return -EINVAL;
-			}
-		}
-
-		if ((vwrq->value < 0) && (vwrq->value != -1000)) {
-			lbs_pr_err("unknown option\n");
-			return -EINVAL;
-		}
-
-		if (vwrq->value > 0) {
-			if (!priv->is_auto_deep_sleep_enabled) {
-				priv->is_activity_detected = 0;
-				priv->auto_deep_sleep_timeout = vwrq->value;
-				lbs_enter_auto_deep_sleep(priv);
-			} else {
-				priv->auto_deep_sleep_timeout = vwrq->value;
-				lbs_deb_debugfs("auto deep sleep: "
-						"already enabled\n");
-			}
-			return 0;
-		} else {
-			if (priv->is_auto_deep_sleep_enabled) {
-				lbs_exit_auto_deep_sleep(priv);
-				/* Try to exit deep sleep if auto */
-				/*deep sleep disabled */
-				ret = lbs_set_deep_sleep(priv, 0);
-			}
-			if (vwrq->value == 0)
-				ret = lbs_set_deep_sleep(priv, 1);
-			else if (vwrq->value == -1000)
-				ret = lbs_set_deep_sleep(priv, 0);
-			return ret;
-		}
-	}
-
-	if (priv->psmode != LBS802_11POWERMODECAM) {
-		return 0;
-	}
-
-	priv->psmode = LBS802_11POWERMODEMAX_PSP;
-
-	if (priv->connect_status == LBS_CONNECTED) {
-		lbs_ps_sleep(priv, CMD_OPTION_WAITFORRSP);
-	}
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-
-	return 0;
-}
-
-static int lbs_get_power(struct net_device *dev, struct iw_request_info *info,
-			  struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	vwrq->value = 0;
-	vwrq->flags = 0;
-	vwrq->disabled = priv->psmode == LBS802_11POWERMODECAM
-		|| priv->connect_status == LBS_DISCONNECTED;
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
-{
-	enum {
-		POOR = 30,
-		FAIR = 60,
-		GOOD = 80,
-		VERY_GOOD = 90,
-		EXCELLENT = 95,
-		PERFECT = 100
-	};
-	struct lbs_private *priv = dev->ml_priv;
-	u32 rssi_qual;
-	u32 tx_qual;
-	u32 quality = 0;
-	int ret, stats_valid = 0;
-	u8 rssi;
-	u32 tx_retries;
-	struct cmd_ds_802_11_get_log log;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	priv->wstats.status = priv->mode;
-
-	/* If we're not associated, all quality values are meaningless */
-	if ((priv->connect_status != LBS_CONNECTED) &&
-	    !lbs_mesh_connected(priv))
-		goto out;
-
-	/* Quality by RSSI */
-	priv->wstats.qual.level =
-	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
-	     priv->NF[TYPE_BEACON][TYPE_NOAVG]);
-
-	if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) {
-		priv->wstats.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE;
-	} else {
-		priv->wstats.qual.noise =
-		    CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]);
-	}
-
-	lbs_deb_wext("signal level %#x\n", priv->wstats.qual.level);
-	lbs_deb_wext("noise %#x\n", priv->wstats.qual.noise);
-
-	rssi = priv->wstats.qual.level - priv->wstats.qual.noise;
-	if (rssi < 15)
-		rssi_qual = rssi * POOR / 10;
-	else if (rssi < 20)
-		rssi_qual = (rssi - 15) * (FAIR - POOR) / 5 + POOR;
-	else if (rssi < 30)
-		rssi_qual = (rssi - 20) * (GOOD - FAIR) / 5 + FAIR;
-	else if (rssi < 40)
-		rssi_qual = (rssi - 30) * (VERY_GOOD - GOOD) /
-		    10 + GOOD;
-	else
-		rssi_qual = (rssi - 40) * (PERFECT - VERY_GOOD) /
-		    10 + VERY_GOOD;
-	quality = rssi_qual;
-
-	/* Quality by TX errors */
-	priv->wstats.discard.retries = dev->stats.tx_errors;
-
-	memset(&log, 0, sizeof(log));
-	log.hdr.size = cpu_to_le16(sizeof(log));
-	ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
-	if (ret)
-		goto out;
-
-	tx_retries = le32_to_cpu(log.retry);
-
-	if (tx_retries > 75)
-		tx_qual = (90 - tx_retries) * POOR / 15;
-	else if (tx_retries > 70)
-		tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR;
-	else if (tx_retries > 65)
-		tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR;
-	else if (tx_retries > 50)
-		tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) /
-		    15 + GOOD;
-	else
-		tx_qual = (50 - tx_retries) *
-		    (PERFECT - VERY_GOOD) / 50 + VERY_GOOD;
-	quality = min(quality, tx_qual);
-
-	priv->wstats.discard.code = le32_to_cpu(log.wepundecryptable);
-	priv->wstats.discard.retries = tx_retries;
-	priv->wstats.discard.misc = le32_to_cpu(log.ackfailure);
-
-	/* Calculate quality */
-	priv->wstats.qual.qual = min_t(u8, quality, 100);
-	priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-	stats_valid = 1;
-
-	/* update stats asynchronously for future calls */
-	ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
-					0, 0, NULL);
-	if (ret)
-		lbs_pr_err("RSSI command failed\n");
-out:
-	if (!stats_valid) {
-		priv->wstats.miss.beacon = 0;
-		priv->wstats.discard.retries = 0;
-		priv->wstats.qual.qual = 0;
-		priv->wstats.qual.level = 0;
-		priv->wstats.qual.noise = 0;
-		priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED;
-		priv->wstats.qual.updated |= IW_QUAL_NOISE_INVALID |
-		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
-	}
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return &priv->wstats;
-
-
-}
-
-static int lbs_set_freq(struct net_device *dev, struct iw_request_info *info,
-		  struct iw_freq *fwrq, char *extra)
-{
-	int ret = -EINVAL;
-	struct lbs_private *priv = dev->ml_priv;
-	struct chan_freq_power *cfp;
-	struct assoc_request * assoc_req;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	mutex_lock(&priv->lock);
-	assoc_req = lbs_get_association_request(priv);
-	if (!assoc_req) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	/* If setting by frequency, convert to a channel */
-	if (fwrq->e == 1) {
-		long f = fwrq->m / 100000;
-
-		cfp = find_cfp_by_band_and_freq(priv, 0, f);
-		if (!cfp) {
-			lbs_deb_wext("invalid freq %ld\n", f);
-			goto out;
-		}
-
-		fwrq->e = 0;
-		fwrq->m = (int) cfp->channel;
-	}
-
-	/* Setting by channel number */
-	if (fwrq->m > 1000 || fwrq->e > 0) {
-		goto out;
-	}
-
-	cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m);
-	if (!cfp) {
-		goto out;
-	}
-
-	assoc_req->channel = fwrq->m;
-	ret = 0;
-
-out:
-	if (ret == 0) {
-		set_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags);
-		lbs_postpone_association_work(priv);
-	} else {
-		lbs_cancel_association_work(priv);
-	}
-	mutex_unlock(&priv->lock);
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-#ifdef CONFIG_LIBERTAS_MESH
-static int lbs_mesh_set_freq(struct net_device *dev,
-			     struct iw_request_info *info,
-			     struct iw_freq *fwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	struct chan_freq_power *cfp;
-	int ret = -EINVAL;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	/* If setting by frequency, convert to a channel */
-	if (fwrq->e == 1) {
-		long f = fwrq->m / 100000;
-
-		cfp = find_cfp_by_band_and_freq(priv, 0, f);
-		if (!cfp) {
-			lbs_deb_wext("invalid freq %ld\n", f);
-			goto out;
-		}
-
-		fwrq->e = 0;
-		fwrq->m = (int) cfp->channel;
-	}
-
-	/* Setting by channel number */
-	if (fwrq->m > 1000 || fwrq->e > 0) {
-		goto out;
-	}
-
-	cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m);
-	if (!cfp) {
-		goto out;
-	}
-
-	if (fwrq->m != priv->channel) {
-		lbs_deb_wext("mesh channel change forces eth disconnect\n");
-		if (priv->mode == IW_MODE_INFRA)
-			lbs_cmd_80211_deauthenticate(priv,
-						     priv->curbssparams.bssid,
-						     WLAN_REASON_DEAUTH_LEAVING);
-		else if (priv->mode == IW_MODE_ADHOC)
-			lbs_adhoc_stop(priv);
-	}
-	lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m);
-	lbs_update_channel(priv);
-	ret = 0;
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-#endif
-
-static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
-		  struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	u8 new_rate = 0;
-	int ret = -EINVAL;
-	u8 rates[MAX_RATES + 1];
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	lbs_deb_wext("vwrq->value %d\n", vwrq->value);
-	lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed);
-
-	if (vwrq->fixed && vwrq->value == -1)
-		goto out;
-
-	/* Auto rate? */
-	priv->enablehwauto = !vwrq->fixed;
-
-	if (vwrq->value == -1)
-		priv->cur_rate = 0;
-	else {
-		if (vwrq->value % 100000)
-			goto out;
-
-		new_rate = vwrq->value / 500000;
-		priv->cur_rate = new_rate;
-		/* the rest is only needed for lbs_set_data_rate() */
-		memset(rates, 0, sizeof(rates));
-		copy_active_data_rates(priv, rates);
-		if (!memchr(rates, new_rate, sizeof(rates))) {
-			lbs_pr_alert("fixed data rate 0x%X out of range\n",
-				new_rate);
-			goto out;
-		}
-		if (priv->fwrelease < 0x09000000) {
-			ret = lbs_set_power_adapt_cfg(priv, 0,
-					POW_ADAPT_DEFAULT_P0,
-					POW_ADAPT_DEFAULT_P1,
-					POW_ADAPT_DEFAULT_P2);
-			if (ret)
-				goto out;
-		}
-		ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
-				TPC_DEFAULT_P2, 1);
-		if (ret)
-			goto out;
-	}
-
-	/* Try the newer command first (Firmware Spec 5.1 and above) */
-	ret = lbs_cmd_802_11_rate_adapt_rateset(priv, CMD_ACT_SET);
-
-	/* Fallback to older version */
-	if (ret)
-		ret = lbs_set_data_rate(priv, new_rate);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info,
-		  struct iw_param *vwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (priv->connect_status == LBS_CONNECTED) {
-		vwrq->value = priv->cur_rate * 500000;
-
-		if (priv->enablehwauto)
-			vwrq->fixed = 0;
-		else
-			vwrq->fixed = 1;
-
-	} else {
-		vwrq->fixed = 0;
-		vwrq->value = 0;
-	}
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-static int lbs_set_mode(struct net_device *dev,
-		  struct iw_request_info *info, u32 * uwrq, char *extra)
-{
-	int ret = 0;
-	struct lbs_private *priv = dev->ml_priv;
-	struct assoc_request * assoc_req;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (   (*uwrq != IW_MODE_ADHOC)
-	    && (*uwrq != IW_MODE_INFRA)
-	    && (*uwrq != IW_MODE_AUTO)) {
-		lbs_deb_wext("Invalid mode: 0x%x\n", *uwrq);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	mutex_lock(&priv->lock);
-	assoc_req = lbs_get_association_request(priv);
-	if (!assoc_req) {
-		ret = -ENOMEM;
-		lbs_cancel_association_work(priv);
-	} else {
-		assoc_req->mode = *uwrq;
-		set_bit(ASSOC_FLAG_MODE, &assoc_req->flags);
-		lbs_postpone_association_work(priv);
-		lbs_deb_wext("Switching to mode: 0x%x\n", *uwrq);
-	}
-	mutex_unlock(&priv->lock);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-
-/**
- *  @brief Get Encryption key
- *
- *  @param dev                  A pointer to net_device structure
- *  @param info			A pointer to iw_request_info structure
- *  @param vwrq 		A pointer to iw_param structure
- *  @param extra		A pointer to extra data buf
- *  @return 	   		0 --success, otherwise fail
- */
-static int lbs_get_encode(struct net_device *dev,
-			   struct iw_request_info *info,
-			   struct iw_point *dwrq, u8 * extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	lbs_deb_wext("flags 0x%x, index %d, length %d, wep_tx_keyidx %d\n",
-	       dwrq->flags, index, dwrq->length, priv->wep_tx_keyidx);
-
-	dwrq->flags = 0;
-
-	/* Authentication method */
-	switch (priv->secinfo.auth_mode) {
-	case IW_AUTH_ALG_OPEN_SYSTEM:
-		dwrq->flags = IW_ENCODE_OPEN;
-		break;
-
-	case IW_AUTH_ALG_SHARED_KEY:
-	case IW_AUTH_ALG_LEAP:
-		dwrq->flags = IW_ENCODE_RESTRICTED;
-		break;
-	default:
-		dwrq->flags = IW_ENCODE_DISABLED | IW_ENCODE_OPEN;
-		break;
-	}
-
-	memset(extra, 0, 16);
-
-	mutex_lock(&priv->lock);
-
-	/* Default to returning current transmit key */
-	if (index < 0)
-		index = priv->wep_tx_keyidx;
-
-	if ((priv->wep_keys[index].len) && priv->secinfo.wep_enabled) {
-		memcpy(extra, priv->wep_keys[index].key,
-		       priv->wep_keys[index].len);
-		dwrq->length = priv->wep_keys[index].len;
-
-		dwrq->flags |= (index + 1);
-		/* Return WEP enabled */
-		dwrq->flags &= ~IW_ENCODE_DISABLED;
-	} else if ((priv->secinfo.WPAenabled)
-		   || (priv->secinfo.WPA2enabled)) {
-		/* return WPA enabled */
-		dwrq->flags &= ~IW_ENCODE_DISABLED;
-		dwrq->flags |= IW_ENCODE_NOKEY;
-	} else {
-		dwrq->flags |= IW_ENCODE_DISABLED;
-	}
-
-	mutex_unlock(&priv->lock);
-
-	lbs_deb_wext("key: %02x:%02x:%02x:%02x:%02x:%02x, keylen %d\n",
-	       extra[0], extra[1], extra[2],
-	       extra[3], extra[4], extra[5], dwrq->length);
-
-	lbs_deb_wext("return flags 0x%x\n", dwrq->flags);
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-/**
- *  @brief Set Encryption key (internal)
- *
- *  @param priv			A pointer to private card structure
- *  @param key_material		A pointer to key material
- *  @param key_length		length of key material
- *  @param index		key index to set
- *  @param set_tx_key		Force set TX key (1 = yes, 0 = no)
- *  @return 	   		0 --success, otherwise fail
- */
-static int lbs_set_wep_key(struct assoc_request *assoc_req,
-			    const char *key_material,
-			    u16 key_length,
-			    u16 index,
-			    int set_tx_key)
-{
-	int ret = 0;
-	struct enc_key *pkey;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	/* Paranoid validation of key index */
-	if (index > 3) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* validate max key length */
-	if (key_length > KEY_LEN_WEP_104) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	pkey = &assoc_req->wep_keys[index];
-
-	if (key_length > 0) {
-		memset(pkey, 0, sizeof(struct enc_key));
-		pkey->type = KEY_TYPE_ID_WEP;
-
-		/* Standardize the key length */
-		pkey->len = (key_length > KEY_LEN_WEP_40) ?
-		                KEY_LEN_WEP_104 : KEY_LEN_WEP_40;
-		memcpy(pkey->key, key_material, key_length);
-	}
-
-	if (set_tx_key) {
-		/* Ensure the chosen key is valid */
-		if (!pkey->len) {
-			lbs_deb_wext("key not set, so cannot enable it\n");
-			ret = -EINVAL;
-			goto out;
-		}
-		assoc_req->wep_tx_keyidx = index;
-	}
-
-	assoc_req->secinfo.wep_enabled = 1;
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int validate_key_index(u16 def_index, u16 raw_index,
-			      u16 *out_index, u16 *is_default)
-{
-	if (!out_index || !is_default)
-		return -EINVAL;
-
-	/* Verify index if present, otherwise use default TX key index */
-	if (raw_index > 0) {
-		if (raw_index > 4)
-			return -EINVAL;
-		*out_index = raw_index - 1;
-	} else {
-		*out_index = def_index;
-		*is_default = 1;
-	}
-	return 0;
-}
-
-static void disable_wep(struct assoc_request *assoc_req)
-{
-	int i;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	/* Set Open System auth mode */
-	assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-
-	/* Clear WEP keys and mark WEP as disabled */
-	assoc_req->secinfo.wep_enabled = 0;
-	for (i = 0; i < 4; i++)
-		assoc_req->wep_keys[i].len = 0;
-
-	set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
-	set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-}
-
-static void disable_wpa(struct assoc_request *assoc_req)
-{
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	memset(&assoc_req->wpa_mcast_key, 0, sizeof (struct enc_key));
-	assoc_req->wpa_mcast_key.flags = KEY_INFO_WPA_MCAST;
-	set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
-
-	memset(&assoc_req->wpa_unicast_key, 0, sizeof (struct enc_key));
-	assoc_req->wpa_unicast_key.flags = KEY_INFO_WPA_UNICAST;
-	set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
-
-	assoc_req->secinfo.WPAenabled = 0;
-	assoc_req->secinfo.WPA2enabled = 0;
-	set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-}
-
-/**
- *  @brief Set Encryption key
- *
- *  @param dev                  A pointer to net_device structure
- *  @param info			A pointer to iw_request_info structure
- *  @param vwrq 		A pointer to iw_param structure
- *  @param extra		A pointer to extra data buf
- *  @return 	   		0 --success, otherwise fail
- */
-static int lbs_set_encode(struct net_device *dev,
-		    struct iw_request_info *info,
-		    struct iw_point *dwrq, char *extra)
-{
-	int ret = 0;
-	struct lbs_private *priv = dev->ml_priv;
-	struct assoc_request * assoc_req;
-	u16 is_default = 0, index = 0, set_tx_key = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	mutex_lock(&priv->lock);
-	assoc_req = lbs_get_association_request(priv);
-	if (!assoc_req) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	if (dwrq->flags & IW_ENCODE_DISABLED) {
-		disable_wep (assoc_req);
-		disable_wpa (assoc_req);
-		goto out;
-	}
-
-	ret = validate_key_index(assoc_req->wep_tx_keyidx,
-	                         (dwrq->flags & IW_ENCODE_INDEX),
-	                         &index, &is_default);
-	if (ret) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* If WEP isn't enabled, or if there is no key data but a valid
-	 * index, set the TX key.
-	 */
-	if (!assoc_req->secinfo.wep_enabled || (dwrq->length == 0 && !is_default))
-		set_tx_key = 1;
-
-	ret = lbs_set_wep_key(assoc_req, extra, dwrq->length, index, set_tx_key);
-	if (ret)
-		goto out;
-
-	if (dwrq->length)
-		set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
-	if (set_tx_key)
-		set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
-
-	if (dwrq->flags & IW_ENCODE_RESTRICTED) {
-		priv->authtype_auto = 0;
-		assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
-	} else if (dwrq->flags & IW_ENCODE_OPEN) {
-		priv->authtype_auto = 0;
-		assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-	}
-
-out:
-	if (ret == 0) {
-		set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
-		lbs_postpone_association_work(priv);
-	} else {
-		lbs_cancel_association_work(priv);
-	}
-	mutex_unlock(&priv->lock);
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief Get Extended Encryption key (WPA/802.1x and WEP)
- *
- *  @param dev                  A pointer to net_device structure
- *  @param info			A pointer to iw_request_info structure
- *  @param vwrq 		A pointer to iw_param structure
- *  @param extra		A pointer to extra data buf
- *  @return 	   		0 on success, otherwise failure
- */
-static int lbs_get_encodeext(struct net_device *dev,
-			      struct iw_request_info *info,
-			      struct iw_point *dwrq,
-			      char *extra)
-{
-	int ret = -EINVAL;
-	struct lbs_private *priv = dev->ml_priv;
-	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
-	int index, max_key_len;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	max_key_len = dwrq->length - sizeof(*ext);
-	if (max_key_len < 0)
-		goto out;
-
-	index = dwrq->flags & IW_ENCODE_INDEX;
-	if (index) {
-		if (index < 1 || index > 4)
-			goto out;
-		index--;
-	} else {
-		index = priv->wep_tx_keyidx;
-	}
-
-	if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
-	    ext->alg != IW_ENCODE_ALG_WEP) {
-		if (index != 0 || priv->mode != IW_MODE_INFRA)
-			goto out;
-	}
-
-	dwrq->flags = index + 1;
-	memset(ext, 0, sizeof(*ext));
-
-	if (   !priv->secinfo.wep_enabled
-	    && !priv->secinfo.WPAenabled
-	    && !priv->secinfo.WPA2enabled) {
-		ext->alg = IW_ENCODE_ALG_NONE;
-		ext->key_len = 0;
-		dwrq->flags |= IW_ENCODE_DISABLED;
-	} else {
-		u8 *key = NULL;
-
-		if (   priv->secinfo.wep_enabled
-		    && !priv->secinfo.WPAenabled
-		    && !priv->secinfo.WPA2enabled) {
-			/* WEP */
-			ext->alg = IW_ENCODE_ALG_WEP;
-			ext->key_len = priv->wep_keys[index].len;
-			key = &priv->wep_keys[index].key[0];
-		} else if (   !priv->secinfo.wep_enabled
-		           && (priv->secinfo.WPAenabled ||
-		               priv->secinfo.WPA2enabled)) {
-			/* WPA */
-			struct enc_key * pkey = NULL;
-
-			if (   priv->wpa_mcast_key.len
-			    && (priv->wpa_mcast_key.flags & KEY_INFO_WPA_ENABLED))
-				pkey = &priv->wpa_mcast_key;
-			else if (   priv->wpa_unicast_key.len
-			         && (priv->wpa_unicast_key.flags & KEY_INFO_WPA_ENABLED))
-				pkey = &priv->wpa_unicast_key;
-
-			if (pkey) {
-				if (pkey->type == KEY_TYPE_ID_AES) {
-					ext->alg = IW_ENCODE_ALG_CCMP;
-				} else {
-					ext->alg = IW_ENCODE_ALG_TKIP;
-				}
-				ext->key_len = pkey->len;
-				key = &pkey->key[0];
-			} else {
-				ext->alg = IW_ENCODE_ALG_TKIP;
-				ext->key_len = 0;
-			}
-		} else {
-			goto out;
-		}
-
-		if (ext->key_len > max_key_len) {
-			ret = -E2BIG;
-			goto out;
-		}
-
-		if (ext->key_len)
-			memcpy(ext->key, key, ext->key_len);
-		else
-			dwrq->flags |= IW_ENCODE_NOKEY;
-		dwrq->flags |= IW_ENCODE_ENABLED;
-	}
-	ret = 0;
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief Set Encryption key Extended (WPA/802.1x and WEP)
- *
- *  @param dev                  A pointer to net_device structure
- *  @param info			A pointer to iw_request_info structure
- *  @param vwrq 		A pointer to iw_param structure
- *  @param extra		A pointer to extra data buf
- *  @return 	   		0 --success, otherwise fail
- */
-static int lbs_set_encodeext(struct net_device *dev,
-			      struct iw_request_info *info,
-			      struct iw_point *dwrq,
-			      char *extra)
-{
-	int ret = 0;
-	struct lbs_private *priv = dev->ml_priv;
-	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
-	int alg = ext->alg;
-	struct assoc_request * assoc_req;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	mutex_lock(&priv->lock);
-	assoc_req = lbs_get_association_request(priv);
-	if (!assoc_req) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	if ((alg == IW_ENCODE_ALG_NONE) || (dwrq->flags & IW_ENCODE_DISABLED)) {
-		disable_wep (assoc_req);
-		disable_wpa (assoc_req);
-	} else if (alg == IW_ENCODE_ALG_WEP) {
-		u16 is_default = 0, index, set_tx_key = 0;
-
-		ret = validate_key_index(assoc_req->wep_tx_keyidx,
-		                         (dwrq->flags & IW_ENCODE_INDEX),
-		                         &index, &is_default);
-		if (ret)
-			goto out;
-
-		/* If WEP isn't enabled, or if there is no key data but a valid
-		 * index, or if the set-TX-key flag was passed, set the TX key.
-		 */
-		if (   !assoc_req->secinfo.wep_enabled
-		    || (dwrq->length == 0 && !is_default)
-		    || (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY))
-			set_tx_key = 1;
-
-		/* Copy key to driver */
-		ret = lbs_set_wep_key(assoc_req, ext->key, ext->key_len, index,
-					set_tx_key);
-		if (ret)
-			goto out;
-
-		if (dwrq->flags & IW_ENCODE_RESTRICTED) {
-			priv->authtype_auto = 0;
-			assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
-		} else if (dwrq->flags & IW_ENCODE_OPEN) {
-			priv->authtype_auto = 0;
-			assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-		}
-
-		/* Mark the various WEP bits as modified */
-		set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
-		if (dwrq->length)
-			set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
-		if (set_tx_key)
-			set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
-	} else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) {
-		struct enc_key * pkey;
-
-		/* validate key length */
-		if (((alg == IW_ENCODE_ALG_TKIP)
-			&& (ext->key_len != KEY_LEN_WPA_TKIP))
-		    || ((alg == IW_ENCODE_ALG_CCMP)
-		        && (ext->key_len != KEY_LEN_WPA_AES))) {
-				lbs_deb_wext("invalid size %d for key of alg "
-				       "type %d\n",
-				       ext->key_len,
-				       alg);
-				ret = -EINVAL;
-				goto out;
-		}
-
-		if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
-			pkey = &assoc_req->wpa_mcast_key;
-			set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
-		} else {
-			pkey = &assoc_req->wpa_unicast_key;
-			set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
-		}
-
-		memset(pkey, 0, sizeof (struct enc_key));
-		memcpy(pkey->key, ext->key, ext->key_len);
-		pkey->len = ext->key_len;
-		if (pkey->len)
-			pkey->flags |= KEY_INFO_WPA_ENABLED;
-
-		/* Do this after zeroing key structure */
-		if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
-			pkey->flags |= KEY_INFO_WPA_MCAST;
-		} else {
-			pkey->flags |= KEY_INFO_WPA_UNICAST;
-		}
-
-		if (alg == IW_ENCODE_ALG_TKIP) {
-			pkey->type = KEY_TYPE_ID_TKIP;
-		} else if (alg == IW_ENCODE_ALG_CCMP) {
-			pkey->type = KEY_TYPE_ID_AES;
-		}
-
-		/* If WPA isn't enabled yet, do that now */
-		if (   assoc_req->secinfo.WPAenabled == 0
-		    && assoc_req->secinfo.WPA2enabled == 0) {
-			assoc_req->secinfo.WPAenabled = 1;
-			assoc_req->secinfo.WPA2enabled = 1;
-			set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
-		}
-
-		/* Only disable wep if necessary: can't waste time here. */
-		if (priv->mac_control & CMD_ACT_MAC_WEP_ENABLE)
-			disable_wep(assoc_req);
-	}
-
-out:
-	if (ret == 0) {
-		/* 802.1x and WPA rekeying must happen as quickly as possible,
-		 * especially during the 4-way handshake; thus if in
-		 * infrastructure mode, and either (a) 802.1x is enabled or
-		 * (b) WPA is being used, set the key right away.
-		 */
-		if (assoc_req->mode == IW_MODE_INFRA &&
-		    ((assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_802_1X) ||
-		     (assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_PSK) ||
-		      assoc_req->secinfo.WPAenabled ||
-		      assoc_req->secinfo.WPA2enabled)) {
-			lbs_do_association_work(priv);
-		} else
-			lbs_postpone_association_work(priv);
-	} else {
-		lbs_cancel_association_work(priv);
-	}
-	mutex_unlock(&priv->lock);
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-
-static int lbs_set_genie(struct net_device *dev,
-			  struct iw_request_info *info,
-			  struct iw_point *dwrq,
-			  char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-	struct assoc_request * assoc_req;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	mutex_lock(&priv->lock);
-	assoc_req = lbs_get_association_request(priv);
-	if (!assoc_req) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	if (dwrq->length > MAX_WPA_IE_LEN ||
-	    (dwrq->length && extra == NULL)) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (dwrq->length) {
-		memcpy(&assoc_req->wpa_ie[0], extra, dwrq->length);
-		assoc_req->wpa_ie_len = dwrq->length;
-	} else {
-		memset(&assoc_req->wpa_ie[0], 0, sizeof(priv->wpa_ie));
-		assoc_req->wpa_ie_len = 0;
-	}
-
-out:
-	if (ret == 0) {
-		set_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags);
-		lbs_postpone_association_work(priv);
-	} else {
-		lbs_cancel_association_work(priv);
-	}
-	mutex_unlock(&priv->lock);
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_get_genie(struct net_device *dev,
-			  struct iw_request_info *info,
-			  struct iw_point *dwrq,
-			  char *extra)
-{
-	int ret = 0;
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (priv->wpa_ie_len == 0) {
-		dwrq->length = 0;
-		goto out;
-	}
-
-	if (dwrq->length < priv->wpa_ie_len) {
-		ret = -E2BIG;
-		goto out;
-	}
-
-	dwrq->length = priv->wpa_ie_len;
-	memcpy(extra, &priv->wpa_ie[0], priv->wpa_ie_len);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-
-static int lbs_set_auth(struct net_device *dev,
-			 struct iw_request_info *info,
-			 struct iw_param *dwrq,
-			 char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	struct assoc_request * assoc_req;
-	int ret = 0;
-	int updated = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	mutex_lock(&priv->lock);
-	assoc_req = lbs_get_association_request(priv);
-	if (!assoc_req) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	switch (dwrq->flags & IW_AUTH_INDEX) {
-	case IW_AUTH_PRIVACY_INVOKED:
-	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
-	case IW_AUTH_TKIP_COUNTERMEASURES:
-	case IW_AUTH_CIPHER_PAIRWISE:
-	case IW_AUTH_CIPHER_GROUP:
-	case IW_AUTH_DROP_UNENCRYPTED:
-		/*
-		 * libertas does not use these parameters
-		 */
-		break;
-
-	case IW_AUTH_KEY_MGMT:
-		assoc_req->secinfo.key_mgmt = dwrq->value;
-		updated = 1;
-		break;
-
-	case IW_AUTH_WPA_VERSION:
-		if (dwrq->value & IW_AUTH_WPA_VERSION_DISABLED) {
-			assoc_req->secinfo.WPAenabled = 0;
-			assoc_req->secinfo.WPA2enabled = 0;
-			disable_wpa (assoc_req);
-		}
-		if (dwrq->value & IW_AUTH_WPA_VERSION_WPA) {
-			assoc_req->secinfo.WPAenabled = 1;
-			assoc_req->secinfo.wep_enabled = 0;
-			assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-		}
-		if (dwrq->value & IW_AUTH_WPA_VERSION_WPA2) {
-			assoc_req->secinfo.WPA2enabled = 1;
-			assoc_req->secinfo.wep_enabled = 0;
-			assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-		}
-		updated = 1;
-		break;
-
-	case IW_AUTH_80211_AUTH_ALG:
-		if (dwrq->value & IW_AUTH_ALG_SHARED_KEY) {
-			assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
-		} else if (dwrq->value & IW_AUTH_ALG_OPEN_SYSTEM) {
-			assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-		} else if (dwrq->value & IW_AUTH_ALG_LEAP) {
-			assoc_req->secinfo.auth_mode = IW_AUTH_ALG_LEAP;
-		} else {
-			ret = -EINVAL;
-		}
-		updated = 1;
-		break;
-
-	case IW_AUTH_WPA_ENABLED:
-		if (dwrq->value) {
-			if (!assoc_req->secinfo.WPAenabled &&
-			    !assoc_req->secinfo.WPA2enabled) {
-				assoc_req->secinfo.WPAenabled = 1;
-				assoc_req->secinfo.WPA2enabled = 1;
-				assoc_req->secinfo.wep_enabled = 0;
-				assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-			}
-		} else {
-			assoc_req->secinfo.WPAenabled = 0;
-			assoc_req->secinfo.WPA2enabled = 0;
-			disable_wpa (assoc_req);
-		}
-		updated = 1;
-		break;
-
-	default:
-		ret = -EOPNOTSUPP;
-		break;
-	}
-
-out:
-	if (ret == 0) {
-		if (updated)
-			set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
-		lbs_postpone_association_work(priv);
-	} else if (ret != -EOPNOTSUPP) {
-		lbs_cancel_association_work(priv);
-	}
-	mutex_unlock(&priv->lock);
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_get_auth(struct net_device *dev,
-			 struct iw_request_info *info,
-			 struct iw_param *dwrq,
-			 char *extra)
-{
-	int ret = 0;
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	switch (dwrq->flags & IW_AUTH_INDEX) {
-	case IW_AUTH_KEY_MGMT:
-		dwrq->value = priv->secinfo.key_mgmt;
-		break;
-
-	case IW_AUTH_WPA_VERSION:
-		dwrq->value = 0;
-		if (priv->secinfo.WPAenabled)
-			dwrq->value |= IW_AUTH_WPA_VERSION_WPA;
-		if (priv->secinfo.WPA2enabled)
-			dwrq->value |= IW_AUTH_WPA_VERSION_WPA2;
-		if (!dwrq->value)
-			dwrq->value |= IW_AUTH_WPA_VERSION_DISABLED;
-		break;
-
-	case IW_AUTH_80211_AUTH_ALG:
-		dwrq->value = priv->secinfo.auth_mode;
-		break;
-
-	case IW_AUTH_WPA_ENABLED:
-		if (priv->secinfo.WPAenabled && priv->secinfo.WPA2enabled)
-			dwrq->value = 1;
-		break;
-
-	default:
-		ret = -EOPNOTSUPP;
-	}
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-
-static int lbs_set_txpow(struct net_device *dev, struct iw_request_info *info,
-		   struct iw_param *vwrq, char *extra)
-{
-	int ret = 0;
-	struct lbs_private *priv = dev->ml_priv;
-	s16 dbm = (s16) vwrq->value;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (vwrq->disabled) {
-		lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 0);
-		goto out;
-	}
-
-	if (vwrq->fixed == 0) {
-		/* User requests automatic tx power control, however there are
-		 * many auto tx settings.  For now use firmware defaults until
-		 * we come up with a good way to expose these to the user. */
-		if (priv->fwrelease < 0x09000000) {
-			ret = lbs_set_power_adapt_cfg(priv, 1,
-					POW_ADAPT_DEFAULT_P0,
-					POW_ADAPT_DEFAULT_P1,
-					POW_ADAPT_DEFAULT_P2);
-			if (ret)
-				goto out;
-		}
-		ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
-				TPC_DEFAULT_P2, 1);
-		if (ret)
-			goto out;
-		dbm = priv->txpower_max;
-	} else {
-		/* Userspace check in iwrange if it should use dBm or mW,
-		 * therefore this should never happen... Jean II */
-		if ((vwrq->flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) {
-			ret = -EOPNOTSUPP;
-			goto out;
-		}
-
-		/* Validate requested power level against firmware allowed
-		 * levels */
-		if (priv->txpower_min && (dbm < priv->txpower_min)) {
-			ret = -EINVAL;
-			goto out;
-		}
-
-		if (priv->txpower_max && (dbm > priv->txpower_max)) {
-			ret = -EINVAL;
-			goto out;
-		}
-		if (priv->fwrelease < 0x09000000) {
-			ret = lbs_set_power_adapt_cfg(priv, 0,
-					POW_ADAPT_DEFAULT_P0,
-					POW_ADAPT_DEFAULT_P1,
-					POW_ADAPT_DEFAULT_P2);
-			if (ret)
-				goto out;
-		}
-		ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
-				TPC_DEFAULT_P2, 1);
-		if (ret)
-			goto out;
-	}
-
-	/* If the radio was off, turn it on */
-	if (!priv->radio_on) {
-		ret = lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 1);
-		if (ret)
-			goto out;
-	}
-
-	lbs_deb_wext("txpower set %d dBm\n", dbm);
-
-	ret = lbs_set_tx_power(priv, dbm);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
-		   struct iw_point *dwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	/*
-	 * Note : if dwrq->flags != 0, we should get the relevant SSID from
-	 * the SSID list...
-	 */
-
-	/*
-	 * Get the current SSID
-	 */
-	if (priv->connect_status == LBS_CONNECTED) {
-		memcpy(extra, priv->curbssparams.ssid,
-		       priv->curbssparams.ssid_len);
-	} else {
-		memset(extra, 0, 32);
-	}
-	/*
-	 * If none, we may want to get the one that was set
-	 */
-
-	dwrq->length = priv->curbssparams.ssid_len;
-
-	dwrq->flags = 1;	/* active */
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
-		   struct iw_point *dwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-	u8 ssid[IEEE80211_MAX_SSID_LEN];
-	u8 ssid_len = 0;
-	struct assoc_request * assoc_req;
-	int in_ssid_len = dwrq->length;
-	DECLARE_SSID_BUF(ssid_buf);
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (!priv->radio_on) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* Check the size of the string */
-	if (in_ssid_len > IEEE80211_MAX_SSID_LEN) {
-		ret = -E2BIG;
-		goto out;
-	}
-
-	memset(&ssid, 0, sizeof(ssid));
-
-	if (!dwrq->flags || !in_ssid_len) {
-		/* "any" SSID requested; leave SSID blank */
-	} else {
-		/* Specific SSID requested */
-		memcpy(&ssid, extra, in_ssid_len);
-		ssid_len = in_ssid_len;
-	}
-
-	if (!ssid_len) {
-		lbs_deb_wext("requested any SSID\n");
-	} else {
-		lbs_deb_wext("requested SSID '%s'\n",
-		             print_ssid(ssid_buf, ssid, ssid_len));
-	}
-
-out:
-	mutex_lock(&priv->lock);
-	if (ret == 0) {
-		/* Get or create the current association request */
-		assoc_req = lbs_get_association_request(priv);
-		if (!assoc_req) {
-			ret = -ENOMEM;
-		} else {
-			/* Copy the SSID to the association request */
-			memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN);
-			assoc_req->ssid_len = ssid_len;
-			set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
-			lbs_postpone_association_work(priv);
-		}
-	}
-
-	/* Cancel the association request if there was an error */
-	if (ret != 0) {
-		lbs_cancel_association_work(priv);
-	}
-
-	mutex_unlock(&priv->lock);
-
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-
-#ifdef CONFIG_LIBERTAS_MESH
-static int lbs_mesh_get_essid(struct net_device *dev,
-			      struct iw_request_info *info,
-			      struct iw_point *dwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	memcpy(extra, priv->mesh_ssid, priv->mesh_ssid_len);
-
-	dwrq->length = priv->mesh_ssid_len;
-
-	dwrq->flags = 1;	/* active */
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-	return 0;
-}
-
-static int lbs_mesh_set_essid(struct net_device *dev,
-			      struct iw_request_info *info,
-			      struct iw_point *dwrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (!priv->radio_on) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* Check the size of the string */
-	if (dwrq->length > IEEE80211_MAX_SSID_LEN) {
-		ret = -E2BIG;
-		goto out;
-	}
-
-	if (!dwrq->flags || !dwrq->length) {
-		ret = -EINVAL;
-		goto out;
-	} else {
-		/* Specific SSID requested */
-		memcpy(priv->mesh_ssid, extra, dwrq->length);
-		priv->mesh_ssid_len = dwrq->length;
-	}
-
-	lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-			priv->channel);
- out:
-	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
-	return ret;
-}
-#endif
-
-/**
- *  @brief Connect to the AP or Ad-hoc Network with specific bssid
- *
- *  @param dev          A pointer to net_device structure
- *  @param info         A pointer to iw_request_info structure
- *  @param awrq         A pointer to iw_param structure
- *  @param extra        A pointer to extra data buf
- *  @return             0 --success, otherwise fail
- */
-static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info,
-		 struct sockaddr *awrq, char *extra)
-{
-	struct lbs_private *priv = dev->ml_priv;
-	struct assoc_request * assoc_req;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	if (!priv->radio_on)
-		return -EINVAL;
-
-	if (awrq->sa_family != ARPHRD_ETHER)
-		return -EINVAL;
-
-	lbs_deb_wext("ASSOC: WAP: sa_data %pM\n", awrq->sa_data);
-
-	mutex_lock(&priv->lock);
-
-	/* Get or create the current association request */
-	assoc_req = lbs_get_association_request(priv);
-	if (!assoc_req) {
-		lbs_cancel_association_work(priv);
-		ret = -ENOMEM;
-	} else {
-		/* Copy the BSSID to the association request */
-		memcpy(&assoc_req->bssid, awrq->sa_data, ETH_ALEN);
-		set_bit(ASSOC_FLAG_BSSID, &assoc_req->flags);
-		lbs_postpone_association_work(priv);
-	}
-
-	mutex_unlock(&priv->lock);
-
-	return ret;
-}
-
-/*
- * iwconfig settable callbacks
- */
-static const iw_handler lbs_handler[] = {
-	(iw_handler) NULL,	/* SIOCSIWCOMMIT */
-	(iw_handler) lbs_get_name,	/* SIOCGIWNAME */
-	(iw_handler) NULL,	/* SIOCSIWNWID */
-	(iw_handler) NULL,	/* SIOCGIWNWID */
-	(iw_handler) lbs_set_freq,	/* SIOCSIWFREQ */
-	(iw_handler) lbs_get_freq,	/* SIOCGIWFREQ */
-	(iw_handler) lbs_set_mode,	/* SIOCSIWMODE */
-	(iw_handler) lbs_get_mode,	/* SIOCGIWMODE */
-	(iw_handler) NULL,	/* SIOCSIWSENS */
-	(iw_handler) NULL,	/* SIOCGIWSENS */
-	(iw_handler) NULL,	/* SIOCSIWRANGE */
-	(iw_handler) lbs_get_range,	/* SIOCGIWRANGE */
-	(iw_handler) NULL,	/* SIOCSIWPRIV */
-	(iw_handler) NULL,	/* SIOCGIWPRIV */
-	(iw_handler) NULL,	/* SIOCSIWSTATS */
-	(iw_handler) NULL,	/* SIOCGIWSTATS */
-	iw_handler_set_spy,	/* SIOCSIWSPY */
-	iw_handler_get_spy,	/* SIOCGIWSPY */
-	iw_handler_set_thrspy,	/* SIOCSIWTHRSPY */
-	iw_handler_get_thrspy,	/* SIOCGIWTHRSPY */
-	(iw_handler) lbs_set_wap,	/* SIOCSIWAP */
-	(iw_handler) lbs_get_wap,	/* SIOCGIWAP */
-	(iw_handler) NULL,	/* SIOCSIWMLME */
-	(iw_handler) NULL,	/* SIOCGIWAPLIST - deprecated */
-	(iw_handler) lbs_set_scan,	/* SIOCSIWSCAN */
-	(iw_handler) lbs_get_scan,	/* SIOCGIWSCAN */
-	(iw_handler) lbs_set_essid,	/* SIOCSIWESSID */
-	(iw_handler) lbs_get_essid,	/* SIOCGIWESSID */
-	(iw_handler) lbs_set_nick,	/* SIOCSIWNICKN */
-	(iw_handler) lbs_get_nick,	/* SIOCGIWNICKN */
-	(iw_handler) NULL,	/* -- hole -- */
-	(iw_handler) NULL,	/* -- hole -- */
-	(iw_handler) lbs_set_rate,	/* SIOCSIWRATE */
-	(iw_handler) lbs_get_rate,	/* SIOCGIWRATE */
-	(iw_handler) lbs_set_rts,	/* SIOCSIWRTS */
-	(iw_handler) lbs_get_rts,	/* SIOCGIWRTS */
-	(iw_handler) lbs_set_frag,	/* SIOCSIWFRAG */
-	(iw_handler) lbs_get_frag,	/* SIOCGIWFRAG */
-	(iw_handler) lbs_set_txpow,	/* SIOCSIWTXPOW */
-	(iw_handler) lbs_get_txpow,	/* SIOCGIWTXPOW */
-	(iw_handler) lbs_set_retry,	/* SIOCSIWRETRY */
-	(iw_handler) lbs_get_retry,	/* SIOCGIWRETRY */
-	(iw_handler) lbs_set_encode,	/* SIOCSIWENCODE */
-	(iw_handler) lbs_get_encode,	/* SIOCGIWENCODE */
-	(iw_handler) lbs_set_power,	/* SIOCSIWPOWER */
-	(iw_handler) lbs_get_power,	/* SIOCGIWPOWER */
-	(iw_handler) NULL,	/* -- hole -- */
-	(iw_handler) NULL,	/* -- hole -- */
-	(iw_handler) lbs_set_genie,	/* SIOCSIWGENIE */
-	(iw_handler) lbs_get_genie,	/* SIOCGIWGENIE */
-	(iw_handler) lbs_set_auth,	/* SIOCSIWAUTH */
-	(iw_handler) lbs_get_auth,	/* SIOCGIWAUTH */
-	(iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */
-	(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
-	(iw_handler) NULL,		/* SIOCSIWPMKSA */
-};
-struct iw_handler_def lbs_handler_def = {
-	.num_standard	= ARRAY_SIZE(lbs_handler),
-	.standard	= (iw_handler *) lbs_handler,
-	.get_wireless_stats = lbs_get_wireless_stats,
-};
-
-#ifdef CONFIG_LIBERTAS_MESH
-static const iw_handler mesh_wlan_handler[] = {
-	(iw_handler) NULL,	/* SIOCSIWCOMMIT */
-	(iw_handler) lbs_get_name,	/* SIOCGIWNAME */
-	(iw_handler) NULL,	/* SIOCSIWNWID */
-	(iw_handler) NULL,	/* SIOCGIWNWID */
-	(iw_handler) lbs_mesh_set_freq,	/* SIOCSIWFREQ */
-	(iw_handler) lbs_get_freq,	/* SIOCGIWFREQ */
-	(iw_handler) NULL,		/* SIOCSIWMODE */
-	(iw_handler) mesh_wlan_get_mode,	/* SIOCGIWMODE */
-	(iw_handler) NULL,	/* SIOCSIWSENS */
-	(iw_handler) NULL,	/* SIOCGIWSENS */
-	(iw_handler) NULL,	/* SIOCSIWRANGE */
-	(iw_handler) lbs_get_range,	/* SIOCGIWRANGE */
-	(iw_handler) NULL,	/* SIOCSIWPRIV */
-	(iw_handler) NULL,	/* SIOCGIWPRIV */
-	(iw_handler) NULL,	/* SIOCSIWSTATS */
-	(iw_handler) NULL,	/* SIOCGIWSTATS */
-	iw_handler_set_spy,	/* SIOCSIWSPY */
-	iw_handler_get_spy,	/* SIOCGIWSPY */
-	iw_handler_set_thrspy,	/* SIOCSIWTHRSPY */
-	iw_handler_get_thrspy,	/* SIOCGIWTHRSPY */
-	(iw_handler) NULL,	/* SIOCSIWAP */
-	(iw_handler) NULL,	/* SIOCGIWAP */
-	(iw_handler) NULL,	/* SIOCSIWMLME */
-	(iw_handler) NULL,	/* SIOCGIWAPLIST - deprecated */
-	(iw_handler) lbs_set_scan,	/* SIOCSIWSCAN */
-	(iw_handler) lbs_get_scan,	/* SIOCGIWSCAN */
-	(iw_handler) lbs_mesh_set_essid,/* SIOCSIWESSID */
-	(iw_handler) lbs_mesh_get_essid,/* SIOCGIWESSID */
-	(iw_handler) NULL,		/* SIOCSIWNICKN */
-	(iw_handler) mesh_get_nick,	/* SIOCGIWNICKN */
-	(iw_handler) NULL,	/* -- hole -- */
-	(iw_handler) NULL,	/* -- hole -- */
-	(iw_handler) lbs_set_rate,	/* SIOCSIWRATE */
-	(iw_handler) lbs_get_rate,	/* SIOCGIWRATE */
-	(iw_handler) lbs_set_rts,	/* SIOCSIWRTS */
-	(iw_handler) lbs_get_rts,	/* SIOCGIWRTS */
-	(iw_handler) lbs_set_frag,	/* SIOCSIWFRAG */
-	(iw_handler) lbs_get_frag,	/* SIOCGIWFRAG */
-	(iw_handler) lbs_set_txpow,	/* SIOCSIWTXPOW */
-	(iw_handler) lbs_get_txpow,	/* SIOCGIWTXPOW */
-	(iw_handler) lbs_set_retry,	/* SIOCSIWRETRY */
-	(iw_handler) lbs_get_retry,	/* SIOCGIWRETRY */
-	(iw_handler) lbs_set_encode,	/* SIOCSIWENCODE */
-	(iw_handler) lbs_get_encode,	/* SIOCGIWENCODE */
-	(iw_handler) lbs_set_power,	/* SIOCSIWPOWER */
-	(iw_handler) lbs_get_power,	/* SIOCGIWPOWER */
-	(iw_handler) NULL,	/* -- hole -- */
-	(iw_handler) NULL,	/* -- hole -- */
-	(iw_handler) lbs_set_genie,	/* SIOCSIWGENIE */
-	(iw_handler) lbs_get_genie,	/* SIOCGIWGENIE */
-	(iw_handler) lbs_set_auth,	/* SIOCSIWAUTH */
-	(iw_handler) lbs_get_auth,	/* SIOCGIWAUTH */
-	(iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */
-	(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
-	(iw_handler) NULL,		/* SIOCSIWPMKSA */
-};
-
-struct iw_handler_def mesh_handler_def = {
-	.num_standard	= ARRAY_SIZE(mesh_wlan_handler),
-	.standard	= (iw_handler *) mesh_wlan_handler,
-	.get_wireless_stats = lbs_get_wireless_stats,
-};
-#endif
diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h
deleted file mode 100644
index f3f19fe..0000000
--- a/drivers/net/wireless/libertas/wext.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/**
-  * This file contains definition for IOCTL call.
-  */
-#ifndef	_LBS_WEXT_H_
-#define	_LBS_WEXT_H_
-
-void lbs_send_disconnect_notification(struct lbs_private *priv);
-void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
-
-struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
-	struct lbs_private *priv,
-	u8 band,
-	u16 channel);
-
-extern struct iw_handler_def lbs_handler_def;
-
-#endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index c445500..b172f5d 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -538,7 +538,8 @@
 		return;
 	}
 
-	syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
+	syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader),
+			       GFP_ATOMIC);
 	if (!syncfwheader) {
 		lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
 		kfree_skb(skb);
@@ -546,8 +547,6 @@
 		return;
 	}
 
-	memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
-
 	if (!syncfwheader->cmd) {
 		lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
 		lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index fbbaaae..737eac9 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -316,7 +316,7 @@
 	__le16 size;
 	__le16 seqnum;
 	__le16 result;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ctrl_node {
 	struct list_head list;
@@ -369,7 +369,7 @@
 
 	/*FW/HW capability */
 	__le32 fwcapinfo;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_ds_mac_control {
 	struct cmd_header hdr;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6f8cb3e..e7f299d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -317,7 +317,7 @@
 	u8 rt_rate;
 	__le16 rt_channel;
 	__le16 rt_chbitmask;
-} __attribute__ ((packed));
+} __packed;
 
 
 static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
@@ -1291,6 +1291,11 @@
 		hw->wiphy->n_addresses = 2;
 		hw->wiphy->addresses = data->addresses;
 
+		if (fake_hw_scan) {
+			hw->wiphy->max_scan_ssids = 255;
+			hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+		}
+
 		hw->channel_change_time = 1;
 		hw->queues = 4;
 		hw->wiphy->interface_modes =
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 808adb9..c019fdc 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -109,7 +109,7 @@
 	dma_addr_t rxd_dma;
 	struct {
 		struct sk_buff *skb;
-		DECLARE_PCI_UNMAP_ADDR(dma)
+		DEFINE_DMA_UNMAP_ADDR(dma);
 	} *buf;
 };
 
@@ -426,7 +426,7 @@
 	__u8	macid;
 	__le16	result;
 	char	payload[0];
-} __attribute__((packed));
+} __packed;
 
 /*
  * Firmware loading.
@@ -632,7 +632,7 @@
 	__le16 fwlen;
 	struct ieee80211_hdr wh;
 	char data[0];
-} __attribute__((packed));
+} __packed;
 
 /* Routines to add/remove DMA header from skb.  */
 static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
@@ -711,7 +711,7 @@
 	__u8 rx_status;
 	__u8 channel;
 	__u8 rx_ctrl;
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT	0x80
 #define MWL8K_8366_AP_RATE_INFO_40MHZ		0x40
@@ -806,7 +806,7 @@
 	__u8 rx_ctrl;
 	__u8 rx_status;
 	__u8 pad2[2];
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_STA_RATE_INFO_SHORTPRE		0x8000
 #define MWL8K_STA_RATE_INFO_ANTSELECT(x)	(((x) >> 11) & 0x3)
@@ -963,7 +963,7 @@
 		if (rxq->tail == MWL8K_RX_DESCS)
 			rxq->tail = 0;
 		rxq->buf[rx].skb = skb;
-		pci_unmap_addr_set(&rxq->buf[rx], dma, addr);
+		dma_unmap_addr_set(&rxq->buf[rx], dma, addr);
 
 		rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size);
 		priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ);
@@ -984,9 +984,9 @@
 	for (i = 0; i < MWL8K_RX_DESCS; i++) {
 		if (rxq->buf[i].skb != NULL) {
 			pci_unmap_single(priv->pdev,
-					 pci_unmap_addr(&rxq->buf[i], dma),
+					 dma_unmap_addr(&rxq->buf[i], dma),
 					 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
-			pci_unmap_addr_set(&rxq->buf[i], dma, 0);
+			dma_unmap_addr_set(&rxq->buf[i], dma, 0);
 
 			kfree_skb(rxq->buf[i].skb);
 			rxq->buf[i].skb = NULL;
@@ -1060,9 +1060,9 @@
 		rxq->buf[rxq->head].skb = NULL;
 
 		pci_unmap_single(priv->pdev,
-				 pci_unmap_addr(&rxq->buf[rxq->head], dma),
+				 dma_unmap_addr(&rxq->buf[rxq->head], dma),
 				 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
-		pci_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
+		dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
 
 		rxq->head++;
 		if (rxq->head == MWL8K_RX_DESCS)
@@ -1120,7 +1120,7 @@
 	__le16 rate_info;
 	__u8 peer_id;
 	__u8 tx_frag_cnt;
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_TX_DESCS		128
 
@@ -1666,7 +1666,7 @@
 	__le32 caps2;
 	__le32 num_tx_desc_per_queue;
 	__le32 total_rxd;
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_CAP_MAX_AMSDU		0x20000000
 #define MWL8K_CAP_GREENFIELD		0x08000000
@@ -1810,7 +1810,7 @@
 	__le32 wcbbase1;
 	__le32 wcbbase2;
 	__le32 wcbbase3;
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
 {
@@ -1883,7 +1883,7 @@
 	__le32 flags;
 	__le32 num_tx_desc_per_queue;
 	__le32 total_rxd;
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT		0x00000080
 #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP	0x00000020
@@ -1985,7 +1985,7 @@
 struct mwl8k_cmd_get_stat {
 	struct mwl8k_cmd_pkt header;
 	__le32 stats[64];
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_STAT_ACK_FAILURE	9
 #define MWL8K_STAT_RTS_FAILURE	12
@@ -2029,7 +2029,7 @@
 	__le16 action;
 	__le16 control;
 	__le16 radio_on;
-} __attribute__((packed));
+} __packed;
 
 static int
 mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
@@ -2092,7 +2092,7 @@
 	__le16 current_level;
 	__le16 reserved;
 	__le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
 {
@@ -2121,7 +2121,7 @@
 	struct mwl8k_cmd_pkt header;
 	__le16 antenna;
 	__le16 mode;
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_RF_ANTENNA_RX		1
 #define MWL8K_RF_ANTENNA_TX		2
@@ -2182,7 +2182,7 @@
  */
 struct mwl8k_cmd_set_pre_scan {
 	struct mwl8k_cmd_pkt header;
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
 {
@@ -2209,7 +2209,7 @@
 	struct mwl8k_cmd_pkt header;
 	__le32 isibss;
 	__u8 bssid[ETH_ALEN];
-} __attribute__((packed));
+} __packed;
 
 static int
 mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
@@ -2240,7 +2240,7 @@
 	__le16 action;
 	__u8 current_channel;
 	__le32 channel_flags;
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
 				    struct ieee80211_conf *conf)
@@ -2293,7 +2293,7 @@
 	__u8	bssid[ETH_ALEN];
 	__le16	protection_mode;
 	__u8	supp_rates[14];
-} __attribute__((packed));
+} __packed;
 
 static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
 {
@@ -2364,7 +2364,7 @@
 	/* Bitmap for supported MCS codes.  */
 	__u8	mcs_set[16];
 	__u8	reserved[16];
-} __attribute__((packed));
+} __packed;
 
 static int
 mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -2397,7 +2397,7 @@
 	struct mwl8k_cmd_pkt header;
 	__le32 sleep_interval;	/* Number of beacon periods to sleep */
 	__u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
 				   int framelen, int dtim)
@@ -2436,7 +2436,7 @@
 	struct mwl8k_cmd_pkt header;
 	__le16 action;
 	__le16 threshold;
-} __attribute__((packed));
+} __packed;
 
 static int
 mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
@@ -2466,7 +2466,7 @@
 	struct mwl8k_cmd_pkt header;
 	__le16 action;
 	__u8 short_slot;
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
 {
@@ -2528,7 +2528,7 @@
 			__u8 txq;
 		} sta;
 	};
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_SET_EDCA_CW	0x01
 #define MWL8K_SET_EDCA_TXOP	0x02
@@ -2579,7 +2579,7 @@
 struct mwl8k_cmd_set_wmm_mode {
 	struct mwl8k_cmd_pkt header;
 	__le16 action;
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
 {
@@ -2612,7 +2612,7 @@
 	__le32 action;
 	__u8 rx_antenna_map;
 	__u8 tx_antenna_map;
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
 {
@@ -2652,7 +2652,7 @@
 	__le32 rate_type;
 	__le32 reserved1;
 	__le32 reserved2;
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_USE_AUTO_RATE	0x0002
 #define MWL8K_UCAST_RATE	0
@@ -2694,7 +2694,7 @@
 	u8 multicast_rate;
 	u8 multicast_rate_type;
 	u8 management_rate;
-} __attribute__((packed));
+} __packed;
 
 static int
 mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
@@ -2724,7 +2724,7 @@
 struct mwl8k_cmd_enable_sniffer {
 	struct mwl8k_cmd_pkt header;
 	__le32 action;
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
 {
@@ -2757,7 +2757,7 @@
 		} mbss;
 		__u8 mac_addr[ETH_ALEN];
 	};
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_MAC_TYPE_PRIMARY_CLIENT		0
 #define MWL8K_MAC_TYPE_SECONDARY_CLIENT		1
@@ -2812,7 +2812,7 @@
 	struct mwl8k_cmd_pkt header;
 	__le16 action;
 	__le16 mode;
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
 {
@@ -2840,7 +2840,7 @@
 struct mwl8k_cmd_bss_start {
 	struct mwl8k_cmd_pkt header;
 	__le32 enable;
-} __attribute__((packed));
+} __packed;
 
 static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif, int enable)
@@ -2885,7 +2885,7 @@
 	__u8 add_qos_info;
 	__u8 is_qos_sta;
 	__le32 fw_sta_ptr;
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_STA_ACTION_ADD		0
 #define MWL8K_STA_ACTION_REMOVE		2
@@ -2978,7 +2978,7 @@
 	__le16	control1;
 	__le16	control2;
 	__le16	control3;
-} __attribute__((packed));
+} __packed;
 
 struct peer_capability_info {
 	/* Peer type - AP vs. STA.  */
@@ -3007,7 +3007,7 @@
 	__u8	pad2;
 	__u8	station_id;
 	__le16	amsdu_enabled;
-} __attribute__((packed));
+} __packed;
 
 struct mwl8k_cmd_update_stadb {
 	struct mwl8k_cmd_pkt header;
@@ -3022,7 +3022,7 @@
 
 	/* Peer info - valid during add/update.  */
 	struct peer_capability_info	peer_info;
-} __attribute__((packed));
+} __packed;
 
 #define MWL8K_STA_DB_MODIFY_ENTRY	1
 #define MWL8K_STA_DB_DEL_ENTRY		2
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 3e1947d..259d758 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -49,7 +49,7 @@
 	__le32 pri_offset;      /* Offset to primary plug data */
 	__le32 compat_offset;   /* Offset to compatibility data*/
 	char signature[0];      /* FW signature length headersize-20 */
-} __attribute__ ((packed));
+} __packed;
 
 /* Check the range of various header entries. Return a pointer to a
  * description of the problem, or NULL if everything checks out. */
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 9ca34e7..d9f18c1 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -205,7 +205,7 @@
 	u8 retry_count;
 	u8 tx_rate;
 	__le16 tx_control;
-} __attribute__ ((packed));
+} __packed;
 
 #define HERMES_TXSTAT_RETRYERR		(0x0001)
 #define HERMES_TXSTAT_AGEDERR		(0x0002)
@@ -254,7 +254,7 @@
 	/* Those last are probably not available in very old firmwares */
 	__le16 RxDiscards_WEPICVError;
 	__le16 RxDiscards_WEPExcluded;
-} __attribute__ ((packed));
+} __packed;
 
 /* Grabbed from wlan-ng - Thanks Mark... - Jean II
  * This is the result of a scan inquiry command */
@@ -271,7 +271,7 @@
 	u8 rates[10];		/* Bit rate supported */
 	__le16 proberesp_rate;	/* Data rate of the response frame */
 	__le16 atim;		/* ATIM window time, Kus (hostscan only) */
-} __attribute__ ((packed));
+} __packed;
 
 /* Same stuff for the Lucent/Agere card.
  * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
@@ -285,7 +285,7 @@
 	/* bits: 0-ess, 1-ibss, 4-privacy [wep] */
 	__le16 essid_len;	/* ESSID length */
 	u8 essid[32];		/* ESSID of the network */
-} __attribute__ ((packed));
+} __packed;
 
 /* Moustafa: Scan structure for Symbol cards */
 struct symbol_scan_apinfo {
@@ -303,7 +303,7 @@
 	__le16 basic_rates;	/* Basic rates bitmask */
 	u8 unknown2[6];		/* Always FF:FF:FF:FF:00:00 */
 	u8 unknown3[8];		/* Always 0, appeared in f/w 3.91-68 */
-} __attribute__ ((packed));
+} __packed;
 
 union hermes_scan_info {
 	struct agere_scan_apinfo	a;
@@ -343,7 +343,7 @@
 	__le16	beacon_interval;
 	__le16	capabilities;
 	u8	data[0];
-} __attribute__ ((packed));
+} __packed;
 
 #define HERMES_LINKSTATUS_NOT_CONNECTED   (0x0000)
 #define HERMES_LINKSTATUS_CONNECTED       (0x0001)
@@ -355,7 +355,7 @@
 
 struct hermes_linkstatus {
 	__le16 linkstatus;         /* Link status */
-} __attribute__ ((packed));
+} __packed;
 
 struct hermes_response {
 	u16 status, resp0, resp1, resp2;
@@ -365,11 +365,11 @@
 struct hermes_idstring {
 	__le16 len;
 	__le16 val[16];
-} __attribute__ ((packed));
+} __packed;
 
 struct hermes_multicast {
 	u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
 
 /* Timeouts */
 #define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index 6da85e7..2b2b9a1 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -65,10 +65,10 @@
 	__le32 addr;		/* adapter address where to write the block */
 	__le16 len;		/* length of the data only, in bytes */
 	char data[0];		/* data to be written */
-} __attribute__ ((packed));
+} __packed;
 
 /*
- * Plug Data References are located in in the image after the last data
+ * Plug Data References are located in the image after the last data
  * block.  They refer to areas in the adapter memory where the plug data
  * items with matching ID should be written.
  */
@@ -77,7 +77,7 @@
 	__le32 addr;		/* adapter address where to write the data */
 	__le32 len;		/* expected length of the data, in bytes */
 	char next[0];		/* next PDR starts here */
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * Plug Data Items are located in the EEPROM read from the adapter by
@@ -88,7 +88,7 @@
 	__le16 len;		/* length of ID and data, in words */
 	__le16 id;		/* record ID */
 	char data[0];		/* plug data */
-} __attribute__ ((packed));
+} __packed;
 
 /*** FW data block access functions ***/
 
@@ -317,7 +317,7 @@
 	__le16 len;							\
 	__le16 id;							\
 	u8 val[length];							\
-} __attribute__ ((packed)) default_pdr_data_##pid = {			\
+} __packed default_pdr_data_##pid = {			\
 	cpu_to_le16((sizeof(default_pdr_data_##pid)/			\
 				sizeof(__le16)) - 1),			\
 	cpu_to_le16(pid),						\
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 6fbd788..077baa8 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -45,7 +45,7 @@
 /* Firmware version encoding */
 struct comp_id {
 	u16 id, variant, major, minor;
-} __attribute__ ((packed));
+} __packed;
 
 static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
 {
@@ -995,7 +995,7 @@
 		u8 tx_mic[MIC_KEYLEN];
 		u8 rx_mic[MIC_KEYLEN];
 		u8 tsc[ORINOCO_SEQ_LEN];
-	} __attribute__ ((packed)) buf;
+	} __packed buf;
 	hermes_t *hw = &priv->hw;
 	int ret;
 	int err;
@@ -1326,7 +1326,7 @@
 	struct {
 		u8 addr[ETH_ALEN];
 		__le16 reason_code;
-	} __attribute__ ((packed)) buf;
+	} __packed buf;
 
 	/* Currently only supported by WPA enabled Agere fw */
 	if (!priv->has_wpa)
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index ca71f08..e8e2d0f 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -172,7 +172,7 @@
 	__le16 frame_ctl;
 	__le16 duration_id;
 	u8 addr1[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
 
 /* Rx frame header except compatibility 802.3 header */
 struct hermes_rx_descriptor {
@@ -196,7 +196,7 @@
 
 	/* Data length */
 	__le16 data_len;
-} __attribute__ ((packed));
+} __packed;
 
 struct orinoco_rx_data {
 	struct hermes_rx_descriptor *desc;
@@ -390,7 +390,7 @@
 		struct header_struct {
 			struct ethhdr eth;	/* 802.3 header */
 			u8 encap[6];		/* 802.2 header */
-		} __attribute__ ((packed)) hdr;
+		} __packed hdr;
 		int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
 
 		if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
@@ -1170,7 +1170,7 @@
 	struct join_req {
 		u8 bssid[ETH_ALEN];
 		__le16 channel;
-	} __attribute__ ((packed)) req;
+	} __packed req;
 	const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
 	struct prism2_scan_apinfo *atom = NULL;
 	int offset = 4;
@@ -1410,7 +1410,7 @@
 	struct {
 		__le16 len;
 		__le16 type;
-	} __attribute__ ((packed)) info;
+	} __packed info;
 	int len, type;
 	int err;
 
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index a6da86e..255710e 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -32,7 +32,7 @@
 struct orinoco_key {
 	__le16 len;	/* always stored as little-endian */
 	char data[ORINOCO_MAX_KEY_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 #define TKIP_KEYLEN	16
 #define MIC_KEYLEN	8
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 78f089b..1558381 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -90,7 +90,7 @@
 	/* SNAP */
 	u8 oui[3];
 	__be16 ethertype;
-} __attribute__ ((packed));
+} __packed;
 
 struct ez_usb_fw {
 	u16 size;
@@ -222,7 +222,7 @@
 	__le16 hermes_len;
 	__le16 hermes_rid;
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 /* Table of devices that work or may work with this driver */
 static struct usb_device_id ezusb_table[] = {
@@ -356,12 +356,10 @@
 {
 	struct request_context *ctx;
 
-	ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
 	if (!ctx)
 		return NULL;
 
-	memset(ctx, 0, sizeof(*ctx));
-
 	ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC);
 	if (!ctx->buf) {
 		kfree(ctx);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 5775124..cf7be1e 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -128,7 +128,7 @@
 	} else {
 		struct {
 			__le16 qual, signal, noise, unused;
-		} __attribute__ ((packed)) cq;
+		} __packed cq;
 
 		err = HERMES_READ_RECORD(hw, USER_BAP,
 					 HERMES_RID_COMMSQUALITY, &cq);
@@ -993,11 +993,9 @@
 		return -EINVAL;
 
 	if (wrqu->data.length) {
-		buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
 		if (buf == NULL)
 			return -ENOMEM;
-
-		memcpy(buf, extra, wrqu->data.length);
 	} else
 		buf = NULL;
 
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 187e263..e51650e 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -599,13 +599,13 @@
 			}
 			break;
 		case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
-			priv->iq_autocal = kmalloc(data_len, GFP_KERNEL);
+			priv->iq_autocal = kmemdup(entry->data, data_len,
+						   GFP_KERNEL);
 			if (!priv->iq_autocal) {
 				err = -ENOMEM;
 				goto err;
 			}
 
-			memcpy(priv->iq_autocal, entry->data, data_len);
 			priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry);
 			break;
 		case PDR_DEFAULT_COUNTRY:
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
index 4915d9d..e3ed893 100644
--- a/drivers/net/wireless/p54/net2280.h
+++ b/drivers/net/wireless/p54/net2280.h
@@ -232,7 +232,7 @@
 #define GPIO2_INTERRUPT						2
 #define GPIO1_INTERRUPT						1
 #define GPIO0_INTERRUPT						0
-} __attribute__ ((packed));
+} __packed;
 
 /* usb control, BAR0 + 0x0080 */
 struct net2280_usb_regs {
@@ -296,7 +296,7 @@
 #define FORCE_IMMEDIATE						7
 #define OUR_USB_ADDRESS						0
 	__le32			ourconfig;
-} __attribute__ ((packed));
+} __packed;
 
 /* pci control, BAR0 + 0x0100 */
 struct net2280_pci_regs {
@@ -323,7 +323,7 @@
 #define PCI_ARBITER_CLEAR					2
 #define PCI_EXTERNAL_ARBITER					1
 #define PCI_HOST_MODE						0
-} __attribute__ ((packed));
+} __packed;
 
 /* dma control, BAR0 + 0x0180 ... array of four structs like this,
  * for channels 0..3.  see also struct net2280_dma:  descriptor
@@ -364,7 +364,7 @@
 	__le32			dmaaddr;
 	__le32			dmadesc;
 	u32			_unused1;
-} __attribute__ ((packed));
+} __packed;
 
 /* dedicated endpoint registers, BAR0 + 0x0200 */
 
@@ -374,7 +374,7 @@
 	/* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */
 	__le32			dep_rsp;
 	u32			_unused[2];
-} __attribute__ ((packed));
+} __packed;
 
 /* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
  * like this, for ep0 then the configurable endpoints A..F
@@ -437,16 +437,16 @@
 	__le32			ep_avail;
 	__le32			ep_data;
 	u32			_unused0[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct net2280_reg_write {
 	__le16 port;
 	__le32 addr;
 	__le32 val;
-} __attribute__ ((packed));
+} __packed;
 
 struct net2280_reg_read {
 	__le16 port;
 	__le32 addr;
-} __attribute__ ((packed));
+} __packed;
 #endif /* NET2280_H */
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 2feead6..ee9bc62 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -65,7 +65,7 @@
 	u8 unused_6[1924];
 	u8 cardbus_cis[0x800];
 	u8 direct_mem_win[0x1000];
-} __attribute__ ((packed));
+} __packed;
 
 /* usb backend only needs the register defines above */
 #ifndef P54USB_H
@@ -74,7 +74,7 @@
 	__le32 device_addr;
 	__le16 len;
 	__le16 flags;
-} __attribute__ ((packed));
+} __packed;
 
 struct p54p_ring_control {
 	__le32 host_idx[4];
@@ -83,7 +83,7 @@
 	struct p54p_desc tx_data[32];
 	struct p54p_desc rx_mgmt[4];
 	struct p54p_desc tx_mgmt[4];
-} __attribute__ ((packed));
+} __packed;
 
 #define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r)
 #define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r)
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index c8f09da..087bf06 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -697,9 +697,7 @@
 
 static struct spi_driver p54spi_driver = {
 	.driver = {
-		/* use cx3110x name because board-n800.c uses that for the
-		 * SPI port */
-		.name		= "cx3110x",
+		.name		= "p54spi",
 		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
@@ -733,3 +731,4 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
 MODULE_ALIAS("spi:cx3110x");
+MODULE_ALIAS("spi:p54spi");
diff --git a/drivers/net/wireless/p54/p54spi.h b/drivers/net/wireless/p54/p54spi.h
index 7fbe8d8..dfaa62a 100644
--- a/drivers/net/wireless/p54/p54spi.h
+++ b/drivers/net/wireless/p54/p54spi.h
@@ -96,7 +96,7 @@
 	__le16 cmd;
 	__le16 len;
 	__le32 addr;
-} __attribute__ ((packed));
+} __packed;
 
 struct p54s_tx_info {
 	struct list_head tx_list;
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 7307325..ad59595 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -69,7 +69,8 @@
 	{USB_DEVICE(0x0915, 0x2002)},	/* Cohiba Proto board */
 	{USB_DEVICE(0x0baf, 0x0118)},   /* U.S. Robotics U5 802.11g Adapter*/
 	{USB_DEVICE(0x0bf8, 0x1009)},   /* FUJITSU E-5400 USB D1700*/
-	{USB_DEVICE(0x0cde, 0x0006)},   /* Medion MD40900 */
+	/* {USB_DEVICE(0x0cde, 0x0006)}, * Medion MD40900 already listed above,
+					 * just noting it here for clarity */
 	{USB_DEVICE(0x0cde, 0x0008)},	/* Sagem XG703A */
 	{USB_DEVICE(0x0cde, 0x0015)},	/* Zcomax XG-705A */
 	{USB_DEVICE(0x0d8e, 0x3762)},	/* DLink DWL-G120 Cohiba */
@@ -434,10 +435,9 @@
 	u8 *buf;
 	int ret;
 
-	buf = kmalloc(4, GFP_KERNEL);
+	buf = kmemdup(p54u_romboot_3887, 4, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
-	memcpy(buf, p54u_romboot_3887, 4);
 	ret = p54u_bulk_msg(priv, P54U_PIPE_DATA,
 			    buf, 4);
 	kfree(buf);
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index e935b79..ed4034a 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -70,12 +70,12 @@
 	__le16 len;
 	__le16 follower;	/* ? */
 	u8 padding[8];
-} __attribute__((packed));
+} __packed;
 
 struct lm87_tx_hdr {
 	__le32 device_addr;
 	__le32 chksum;
-} __attribute__((packed));
+} __packed;
 
 /* Some flags for the isl hardware registers controlling DMA inside the
  * chip */
@@ -103,7 +103,7 @@
 	__le32 fw_load_addr;
 	__le32 fw_length;
 	__le32 crc;
-} __attribute__((packed));
+} __packed;
 
 /* pipes 3 and 4 are not used by the driver */
 #define P54U_PIPE_NUMBER 9
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 8d1190c..912fdc0 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2101,7 +2101,7 @@
 	u8 timestamp[8];
 	u16 beacon_int;
 	u16 capab_info;
-} __attribute__ ((packed));
+} __packed;
 
 #define WLAN_EID_GENERIC 0xdd
 static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 };
@@ -2751,14 +2751,9 @@
            p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
                return -EINVAL;
 
-       param = kmalloc(p->length, GFP_KERNEL);
-       if (param == NULL)
-               return -ENOMEM;
-
-       if (copy_from_user(param, p->pointer, p->length)) {
-               kfree(param);
-               return -EFAULT;
-       }
+	param = memdup_user(p->pointer, p->length);
+	if (IS_ERR(param))
+		return PTR_ERR(param);
 
        switch (param->cmd) {
        case PRISM2_SET_ENCRYPTION:
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
index b7534c2..59e3125 100644
--- a/drivers/net/wireless/prism54/isl_oid.h
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -29,20 +29,20 @@
 struct obj_ssid {
 	u8 length;
 	char octets[33];
-} __attribute__ ((packed));
+} __packed;
 
 struct obj_key {
 	u8 type;		/* dot11_priv_t */
 	u8 length;
 	char key[32];
-} __attribute__ ((packed));
+} __packed;
 
 struct obj_mlme {
 	u8 address[6];
 	u16 id;
 	u16 state;
 	u16 code;
-} __attribute__ ((packed));
+} __packed;
 
 struct obj_mlmeex {
 	u8 address[6];
@@ -51,12 +51,12 @@
 	u16 code;
 	u16 size;
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct obj_buffer {
 	u32 size;
 	u32 addr;		/* 32bit bus address */
-} __attribute__ ((packed));
+} __packed;
 
 struct obj_bss {
 	u8 address[6];
@@ -77,17 +77,17 @@
 	short rates;
 	short basic_rates;
 	int:16;			/* padding */
-} __attribute__ ((packed));
+} __packed;
 
 struct obj_bsslist {
 	u32 nr;
 	struct obj_bss bsslist[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct obj_frequencies {
 	u16 nr;
 	u16 mhz[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct obj_attachment {
 	char type;
@@ -95,7 +95,7 @@
 	short id;
 	short size;
 	char data[0];
-} __attribute__((packed));
+} __packed;
 
 /*
  * in case everything's ok, the inlined function below will be
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 54f9a4b..6ca30a5 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -34,13 +34,13 @@
 	__le16 unk3;
 	u8 rssi;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct rx_annex_header {
 	u8 addr1[ETH_ALEN];
 	u8 addr2[ETH_ALEN];
 	struct rfmon_header rfmon;
-} __attribute__ ((packed));
+} __packed;
 
 /* wlan-ng (and hopefully others) AVS header, version one.  Fields in
  * network byte order. */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 0b27e50..0db93db 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -101,7 +101,7 @@
 	u8 device_id;
 	u8 flags;
 	u32 length;
-} __attribute__ ((packed))
+} __packed
 pimfor_header_t;
 
 /* A received and interrupt-processed management frame, either for
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 4bd61ee..719573b 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -238,19 +238,19 @@
 	u8 bssid[6];
 	u8 padding[2];
 	__le32 flags;
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_pmkid_candidate {
 	u8 bssid[6];
 	u8 padding[2];
 	__le32 flags;
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_pmkid_cand_list {
 	__le32 version;
 	__le32 num_candidates;
 	struct ndis_80211_pmkid_candidate candidate_list[0];
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_status_indication {
 	__le32 status_type;
@@ -260,19 +260,19 @@
 		struct ndis_80211_auth_request		auth_request[0];
 		struct ndis_80211_pmkid_cand_list	cand_list;
 	} u;
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_ssid {
 	__le32 length;
 	u8 essid[NDIS_802_11_LENGTH_SSID];
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_conf_freq_hop {
 	__le32 length;
 	__le32 hop_pattern;
 	__le32 hop_set;
 	__le32 dwell_time;
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_conf {
 	__le32 length;
@@ -280,7 +280,7 @@
 	__le32 atim_window;
 	__le32 ds_config;
 	struct ndis_80211_conf_freq_hop fh_config;
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_bssid_ex {
 	__le32 length;
@@ -295,25 +295,25 @@
 	u8 rates[NDIS_802_11_LENGTH_RATES_EX];
 	__le32 ie_length;
 	u8 ies[0];
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_bssid_list_ex {
 	__le32 num_items;
 	struct ndis_80211_bssid_ex bssid[0];
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_fixed_ies {
 	u8 timestamp[8];
 	__le16 beacon_interval;
 	__le16 capabilities;
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_wep_key {
 	__le32 size;
 	__le32 index;
 	__le32 length;
 	u8 material[32];
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_key {
 	__le32 size;
@@ -323,14 +323,14 @@
 	u8 padding[6];
 	u8 rsc[8];
 	u8 material[32];
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_remove_key {
 	__le32 size;
 	__le32 index;
 	u8 bssid[6];
 	u8 padding[2];
-} __attribute__((packed));
+} __packed;
 
 struct ndis_config_param {
 	__le32 name_offs;
@@ -338,7 +338,7 @@
 	__le32 type;
 	__le32 value_offs;
 	__le32 value_length;
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_assoc_info {
 	__le32 length;
@@ -358,12 +358,12 @@
 	} resp_ie;
 	__le32 resp_ie_length;
 	__le32 offset_resp_ies;
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_auth_encr_pair {
 	__le32 auth_mode;
 	__le32 encr_mode;
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_capability {
 	__le32 length;
@@ -371,7 +371,7 @@
 	__le32 num_pmkids;
 	__le32 num_auth_encr_pair;
 	struct ndis_80211_auth_encr_pair auth_encr_pair[0];
-} __attribute__((packed));
+} __packed;
 
 struct ndis_80211_bssid_info {
 	u8 bssid[6];
@@ -520,8 +520,9 @@
 
 static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed);
 
-static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type,
-				int dbm);
+static int rndis_set_tx_power(struct wiphy *wiphy,
+			      enum nl80211_tx_power_setting type,
+			      int mbm);
 static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm);
 
 static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
@@ -1856,20 +1857,25 @@
 	return 0;
 }
 
-static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type,
-				int dbm)
+static int rndis_set_tx_power(struct wiphy *wiphy,
+			      enum nl80211_tx_power_setting type,
+			      int mbm)
 {
 	struct rndis_wlan_private *priv = wiphy_priv(wiphy);
 	struct usbnet *usbdev = priv->usbdev;
 
-	netdev_dbg(usbdev->net, "%s(): type:0x%x dbm:%i\n",
-		   __func__, type, dbm);
+	netdev_dbg(usbdev->net, "%s(): type:0x%x mbm:%i\n",
+		   __func__, type, mbm);
+
+	if (mbm < 0 || (mbm % 100))
+		return -ENOTSUPP;
 
 	/* Device doesn't support changing txpower after initialization, only
 	 * turn off/on radio. Support 'auto' mode and setting same dBm that is
 	 * currently used.
 	 */
-	if (type == TX_POWER_AUTOMATIC || dbm == get_bcm4320_power_dbm(priv)) {
+	if (type == NL80211_TX_POWER_AUTOMATIC ||
+	    MBM_TO_DBM(mbm) == get_bcm4320_power_dbm(priv)) {
 		if (!priv->radio_on)
 			disassociate(usbdev, true); /* turn on radio */
 
@@ -2495,8 +2501,7 @@
 static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
 {
 	struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
-	struct ndis_80211_assoc_info *info;
-	u8 assoc_buf[sizeof(*info) + IW_CUSTOM_MAX + 32];
+	struct ndis_80211_assoc_info *info = NULL;
 	u8 bssid[ETH_ALEN];
 	int resp_ie_len, req_ie_len;
 	u8 *req_ie, *resp_ie;
@@ -2515,23 +2520,43 @@
 	resp_ie = NULL;
 
 	if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
-		memset(assoc_buf, 0, sizeof(assoc_buf));
-		info = (void *)assoc_buf;
+		info = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
+		if (!info) {
+			/* No memory? Try resume work later */
+			set_bit(WORK_LINK_UP, &priv->work_pending);
+			queue_work(priv->workqueue, &priv->work);
+			return;
+		}
 
-		/* Get association info IEs from device and send them back to
-		 * userspace. */
-		ret = get_association_info(usbdev, info, sizeof(assoc_buf));
+		/* Get association info IEs from device. */
+		ret = get_association_info(usbdev, info, CONTROL_BUFFER_SIZE);
 		if (!ret) {
 			req_ie_len = le32_to_cpu(info->req_ie_length);
 			if (req_ie_len > 0) {
 				offset = le32_to_cpu(info->offset_req_ies);
+
+				if (offset > CONTROL_BUFFER_SIZE)
+					offset = CONTROL_BUFFER_SIZE;
+
 				req_ie = (u8 *)info + offset;
+
+				if (offset + req_ie_len > CONTROL_BUFFER_SIZE)
+					req_ie_len =
+						CONTROL_BUFFER_SIZE - offset;
 			}
 
 			resp_ie_len = le32_to_cpu(info->resp_ie_length);
 			if (resp_ie_len > 0) {
 				offset = le32_to_cpu(info->offset_resp_ies);
+
+				if (offset > CONTROL_BUFFER_SIZE)
+					offset = CONTROL_BUFFER_SIZE;
+
 				resp_ie = (u8 *)info + offset;
+
+				if (offset + resp_ie_len > CONTROL_BUFFER_SIZE)
+					resp_ie_len =
+						CONTROL_BUFFER_SIZE - offset;
 			}
 		}
 	} else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC))
@@ -2563,6 +2588,9 @@
 	} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
 		cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
 
+	if (info != NULL)
+		kfree(info);
+
 	priv->connected = true;
 	memcpy(priv->bssid, bssid, ETH_ALEN);
 
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index ad2c98a..3bedf56 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1076,9 +1076,6 @@
 				   struct txentry_desc *txdesc)
 {
 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
-	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
-	u32 word;
 	u32 reg;
 
 	/*
@@ -1091,9 +1088,15 @@
 
 	rt2x00queue_map_txskb(rt2x00dev, entry->skb);
 
-	rt2x00_desc_read(entry_priv->desc, 1, &word);
-	rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
-	rt2x00_desc_write(entry_priv->desc, 1, word);
+	/*
+	 * Write the TX descriptor for the beacon.
+	 */
+	rt2400pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+	/*
+	 * Dump beacon to userspace through debugfs.
+	 */
+	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
 
 	/*
 	 * Enable beaconing again.
@@ -1585,7 +1588,6 @@
 	.reset_tuner		= rt2400pci_reset_tuner,
 	.link_tuner		= rt2400pci_link_tuner,
 	.write_tx_desc		= rt2400pci_write_tx_desc,
-	.write_tx_data		= rt2x00pci_write_tx_data,
 	.write_beacon		= rt2400pci_write_beacon,
 	.kick_tx_queue		= rt2400pci_kick_tx_queue,
 	.kill_tx_queue		= rt2400pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 41da3d2..69d231d 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1233,9 +1233,6 @@
 				   struct txentry_desc *txdesc)
 {
 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
-	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
-	u32 word;
 	u32 reg;
 
 	/*
@@ -1248,9 +1245,15 @@
 
 	rt2x00queue_map_txskb(rt2x00dev, entry->skb);
 
-	rt2x00_desc_read(entry_priv->desc, 1, &word);
-	rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
-	rt2x00_desc_write(entry_priv->desc, 1, word);
+	/*
+	 * Write the TX descriptor for the beacon.
+	 */
+	rt2500pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+	/*
+	 * Dump beacon to userspace through debugfs.
+	 */
+	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
 
 	/*
 	 * Enable beaconing again.
@@ -1883,7 +1886,6 @@
 	.reset_tuner		= rt2500pci_reset_tuner,
 	.link_tuner		= rt2500pci_link_tuner,
 	.write_tx_desc		= rt2500pci_write_tx_desc,
-	.write_tx_data		= rt2x00pci_write_tx_data,
 	.write_beacon		= rt2500pci_write_beacon,
 	.kick_tx_queue		= rt2500pci_kick_tx_queue,
 	.kill_tx_queue		= rt2500pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9ae96a6..4420552 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -345,9 +345,9 @@
 				struct rt2x00lib_crypto *crypto,
 				struct ieee80211_key_conf *key)
 {
-	int timeout;
 	u32 mask;
 	u16 reg;
+	enum cipher curr_cipher;
 
 	if (crypto->cmd == SET_KEY) {
 		/*
@@ -358,6 +358,7 @@
 		mask = TXRX_CSR0_KEY_ID.bit_mask;
 
 		rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
+		curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM);
 		reg &= mask;
 
 		if (reg && reg == mask)
@@ -366,19 +367,17 @@
 		reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID);
 
 		key->hw_key_idx += reg ? ffz(reg) : 0;
-
 		/*
-		 * The encryption key doesn't fit within the CSR cache,
-		 * this means we should allocate it separately and use
-		 * rt2x00usb_vendor_request() to send the key to the hardware.
+		 * Hardware requires that all keys use the same cipher
+		 * (e.g. TKIP-only, AES-only, but not TKIP+AES).
+		 * If this is not the first key, compare the cipher with the
+		 * first one and fall back to SW crypto if not the same.
 		 */
-		reg = KEY_ENTRY(key->hw_key_idx);
-		timeout = REGISTER_TIMEOUT32(sizeof(crypto->key));
-		rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
-						    USB_VENDOR_REQUEST_OUT, reg,
-						    crypto->key,
-						    sizeof(crypto->key),
-						    timeout);
+		if (key->hw_key_idx > 0 && crypto->cipher != curr_cipher)
+			return -EOPNOTSUPP;
+
+		rt2500usb_register_multiwrite(rt2x00dev, reg,
+					      crypto->key, sizeof(crypto->key));
 
 		/*
 		 * The driver does not support the IV/EIV generation
@@ -1034,7 +1033,7 @@
 				    struct txentry_desc *txdesc)
 {
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
-	__le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
+	__le32 *txd = (__le32 *) skb->data;
 	u32 word;
 
 	/*
@@ -1080,6 +1079,7 @@
 	/*
 	 * Register descriptor details in skb frame descriptor.
 	 */
+	skbdesc->flags |= SKBDESC_DESC_IN_SKB;
 	skbdesc->desc = txd;
 	skbdesc->desc_len = TXD_DESC_SIZE;
 }
@@ -1108,9 +1108,20 @@
 	rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
 
 	/*
-	 * Take the descriptor in front of the skb into account.
+	 * Add space for the descriptor in front of the skb.
 	 */
 	skb_push(entry->skb, TXD_DESC_SIZE);
+	memset(entry->skb->data, 0, TXD_DESC_SIZE);
+
+	/*
+	 * Write the TX descriptor for the beacon.
+	 */
+	rt2500usb_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+	/*
+	 * Dump beacon to userspace through debugfs.
+	 */
+	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
 
 	/*
 	 * USB devices cannot blindly pass the skb->len as the
@@ -1768,7 +1779,6 @@
 	.link_stats		= rt2500usb_link_stats,
 	.reset_tuner		= rt2500usb_reset_tuner,
 	.write_tx_desc		= rt2500usb_write_tx_desc,
-	.write_tx_data		= rt2x00usb_write_tx_data,
 	.write_beacon		= rt2500usb_write_beacon,
 	.get_tx_data_len	= rt2500usb_get_tx_data_len,
 	.kick_tx_queue		= rt2x00usb_kick_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 2aa0375..552f9f4 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -63,7 +63,6 @@
  */
 #define REV_RT2860C			0x0100
 #define REV_RT2860D			0x0101
-#define REV_RT2870D			0x0101
 #define REV_RT2872E			0x0200
 #define REV_RT3070E			0x0200
 #define REV_RT3070F			0x0201
@@ -99,6 +98,21 @@
  */
 
 /*
+ * E2PROM_CSR: PCI EEPROM control register.
+ * RELOAD: Write 1 to reload eeprom content.
+ * TYPE: 0: 93c46, 1:93c66.
+ * LOAD_STATUS: 1:loading, 0:done.
+ */
+#define E2PROM_CSR			0x0004
+#define E2PROM_CSR_DATA_CLOCK		FIELD32(0x00000001)
+#define E2PROM_CSR_CHIP_SELECT		FIELD32(0x00000002)
+#define E2PROM_CSR_DATA_IN		FIELD32(0x00000004)
+#define E2PROM_CSR_DATA_OUT		FIELD32(0x00000008)
+#define E2PROM_CSR_TYPE			FIELD32(0x00000030)
+#define E2PROM_CSR_LOAD_STATUS		FIELD32(0x00000040)
+#define E2PROM_CSR_RELOAD		FIELD32(0x00000080)
+
+/*
  * OPT_14: Unknown register used by rt3xxx devices.
  */
 #define OPT_14_CSR			0x0114
@@ -322,6 +336,39 @@
 #define RX_DRX_IDX			0x029c
 
 /*
+ * USB_DMA_CFG
+ * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
+ * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
+ * PHY_CLEAR: phy watch dog enable.
+ * TX_CLEAR: Clear USB DMA TX path.
+ * TXOP_HALT: Halt TXOP count down when TX buffer is full.
+ * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation.
+ * RX_BULK_EN: Enable USB DMA Rx.
+ * TX_BULK_EN: Enable USB DMA Tx.
+ * EP_OUT_VALID: OUT endpoint data valid.
+ * RX_BUSY: USB DMA RX FSM busy.
+ * TX_BUSY: USB DMA TX FSM busy.
+ */
+#define USB_DMA_CFG			0x02a0
+#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT	FIELD32(0x000000ff)
+#define USB_DMA_CFG_RX_BULK_AGG_LIMIT	FIELD32(0x0000ff00)
+#define USB_DMA_CFG_PHY_CLEAR		FIELD32(0x00010000)
+#define USB_DMA_CFG_TX_CLEAR		FIELD32(0x00080000)
+#define USB_DMA_CFG_TXOP_HALT		FIELD32(0x00100000)
+#define USB_DMA_CFG_RX_BULK_AGG_EN	FIELD32(0x00200000)
+#define USB_DMA_CFG_RX_BULK_EN		FIELD32(0x00400000)
+#define USB_DMA_CFG_TX_BULK_EN		FIELD32(0x00800000)
+#define USB_DMA_CFG_EP_OUT_VALID	FIELD32(0x3f000000)
+#define USB_DMA_CFG_RX_BUSY		FIELD32(0x40000000)
+#define USB_DMA_CFG_TX_BUSY		FIELD32(0x80000000)
+
+/*
+ * US_CYC_CNT
+ */
+#define US_CYC_CNT			0x02a4
+#define US_CYC_CNT_CLOCK_CYCLE		FIELD32(0x000000ff)
+
+/*
  * PBF_SYS_CTRL
  * HOST_RAM_WRITE: enable Host program ram write selection
  */
@@ -1370,17 +1417,17 @@
 struct mac_wcid_entry {
 	u8 mac[6];
 	u8 reserved[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct hw_key_entry {
 	u8 key[16];
 	u8 tx_mic[8];
 	u8 rx_mic[8];
-} __attribute__ ((packed));
+} __packed;
 
 struct mac_iveiv_entry {
 	u8 iv[8];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * MAC_WCID_ATTRIBUTE:
@@ -1389,6 +1436,10 @@
 #define MAC_WCID_ATTRIBUTE_CIPHER	FIELD32(0x0000000e)
 #define MAC_WCID_ATTRIBUTE_BSS_IDX	FIELD32(0x00000070)
 #define MAC_WCID_ATTRIBUTE_RX_WIUDF	FIELD32(0x00000380)
+#define MAC_WCID_ATTRIBUTE_CIPHER_EXT	FIELD32(0x00000400)
+#define MAC_WCID_ATTRIBUTE_BSS_IDX_EXT	FIELD32(0x00000800)
+#define MAC_WCID_ATTRIBUTE_WAPI_MCBC	FIELD32(0x00008000)
+#define MAC_WCID_ATTRIBUTE_WAPI_KEY_IDX	FIELD32(0xff000000)
 
 /*
  * SHARED_KEY_MODE:
@@ -1510,7 +1561,9 @@
  */
 
 /*
- * BBP 1: TX Antenna
+ * BBP 1: TX Antenna & Power
+ * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm,
+ *	3 - increase tx power by 6dBm
  */
 #define BBP1_TX_POWER			FIELD8(0x07)
 #define BBP1_TX_ANTENNA			FIELD8(0x18)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index db4250d..d3cf0cc 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1,9 +1,9 @@
 /*
+	Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com>
 	Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
 	Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com>
 
 	Based on the original rt2800pci.c and rt2800usb.c.
-	  Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
 	  Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
 	  Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
 	  Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
@@ -38,16 +38,8 @@
 #include <linux/slab.h>
 
 #include "rt2x00.h"
-#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
-#include "rt2x00usb.h"
-#endif
 #include "rt2800lib.h"
 #include "rt2800.h"
-#include "rt2800usb.h"
-
-MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("rt2800 library");
-MODULE_LICENSE("GPL");
 
 /*
  * Register access.
@@ -107,8 +99,7 @@
 		rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
 		rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
 		rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
-		if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
-			rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
 
 		rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
 	}
@@ -136,8 +127,7 @@
 		rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
 		rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
 		rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
-		if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
-			rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
 
 		rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
 
@@ -282,9 +272,8 @@
 }
 EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
 
-void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc)
+void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc)
 {
-	__le32 *txwi = (__le32 *)(skb->data - TXWI_DESC_SIZE);
 	u32 word;
 
 	/*
@@ -380,6 +369,81 @@
 }
 EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
 
+void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
+{
+	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+	unsigned int beacon_base;
+	u32 reg;
+
+	/*
+	 * Disable beaconing while we are reloading the beacon data,
+	 * otherwise we might be sending out invalid data.
+	 */
+	rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+	/*
+	 * Add space for the TXWI in front of the skb.
+	 */
+	skb_push(entry->skb, TXWI_DESC_SIZE);
+	memset(entry->skb, 0, TXWI_DESC_SIZE);
+
+	/*
+	 * Register descriptor details in skb frame descriptor.
+	 */
+	skbdesc->flags |= SKBDESC_DESC_IN_SKB;
+	skbdesc->desc = entry->skb->data;
+	skbdesc->desc_len = TXWI_DESC_SIZE;
+
+	/*
+	 * Add the TXWI for the beacon to the skb.
+	 */
+	rt2800_write_txwi((__le32 *)entry->skb->data, txdesc);
+
+	/*
+	 * Dump beacon to userspace through debugfs.
+	 */
+	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+
+	/*
+	 * Write entire beacon with TXWI to register.
+	 */
+	beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+	rt2800_register_multiwrite(rt2x00dev, beacon_base,
+				   entry->skb->data, entry->skb->len);
+
+	/*
+	 * Enable beaconing again.
+	 */
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+	/*
+	 * Clean up beacon skb.
+	 */
+	dev_kfree_skb_any(entry->skb);
+	entry->skb = NULL;
+}
+EXPORT_SYMBOL(rt2800_write_beacon);
+
+static void inline rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
+				       unsigned int beacon_base)
+{
+	int i;
+
+	/*
+	 * For the Beacon base registers we only need to clear
+	 * the whole TXWI which (when set to 0) will invalidate
+	 * the entire beacon.
+	 */
+	for (i = 0; i < TXWI_DESC_SIZE; i += sizeof(__le32))
+		rt2800_register_write(rt2x00dev, beacon_base + i, 0);
+}
+
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
 const struct rt2x00debug rt2800_rt2x00debug = {
 	.owner	= THIS_MODULE,
@@ -502,15 +566,28 @@
 
 	offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
 
-	rt2800_register_read(rt2x00dev, offset, &reg);
-	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
-			   !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
-	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
-			   (crypto->cmd == SET_KEY) * crypto->cipher);
-	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
-			   (crypto->cmd == SET_KEY) * crypto->bssidx);
-	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
-	rt2800_register_write(rt2x00dev, offset, reg);
+	if (crypto->cmd == SET_KEY) {
+		rt2800_register_read(rt2x00dev, offset, &reg);
+		rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
+				   !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+		/*
+		 * Both the cipher as the BSS Idx numbers are split in a main
+		 * value of 3 bits, and a extended field for adding one additional
+		 * bit to the value.
+		 */
+		rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
+				   (crypto->cipher & 0x7));
+		rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER_EXT,
+				   (crypto->cipher & 0x8) >> 3);
+		rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
+				   (crypto->bssidx & 0x7));
+		rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT,
+				   (crypto->bssidx & 0x8) >> 3);
+		rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
+		rt2800_register_write(rt2x00dev, offset, reg);
+	} else {
+		rt2800_register_write(rt2x00dev, offset, 0);
+	}
 
 	offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
 
@@ -668,19 +745,14 @@
 void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
 			struct rt2x00intf_conf *conf, const unsigned int flags)
 {
-	unsigned int beacon_base;
 	u32 reg;
 
 	if (flags & CONFIG_UPDATE_TYPE) {
 		/*
 		 * Clear current synchronisation setup.
-		 * For the Beacon base registers we only need to clear
-		 * the first byte since that byte contains the VALID and OWNER
-		 * bits which (when set to 0) will invalidate the entire beacon.
 		 */
-		beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
-		rt2800_register_write(rt2x00dev, beacon_base, 0);
-
+		rt2800_clear_beacon(rt2x00dev,
+				    HW_BEACON_OFFSET(intf->beacon->entry_idx));
 		/*
 		 * Enable synchronisation.
 		 */
@@ -703,8 +775,8 @@
 
 	if (flags & CONFIG_UPDATE_BSSID) {
 		reg = le32_to_cpu(conf->bssid[1]);
-		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
-		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
+		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
+		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7);
 		conf->bssid[1] = cpu_to_le32(reg);
 
 		rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
@@ -762,14 +834,12 @@
 	switch ((int)ant->tx) {
 	case 1:
 		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
-		if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
-			rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
 		break;
 	case 2:
 		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
 		break;
 	case 3:
-		/* Do nothing */
+		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
 		break;
 	}
 
@@ -1023,7 +1093,7 @@
 	u8 r1;
 
 	rt2800_bbp_read(rt2x00dev, 1, &r1);
-	rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
+	rt2x00_set_field8(&r1, BBP1_TX_POWER, 0);
 	rt2800_bbp_write(rt2x00dev, 1, r1);
 
 	rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
@@ -1212,6 +1282,7 @@
 	u32 reg;
 	u16 eeprom;
 	unsigned int i;
+	int ret;
 
 	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
 	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
@@ -1221,59 +1292,9 @@
 	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
 	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
 
-	if (rt2x00_is_usb(rt2x00dev)) {
-		/*
-		 * Wait until BBP and RF are ready.
-		 */
-		for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-			rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
-			if (reg && reg != ~0)
-				break;
-			msleep(1);
-		}
-
-		if (i == REGISTER_BUSY_COUNT) {
-			ERROR(rt2x00dev, "Unstable hardware.\n");
-			return -EBUSY;
-		}
-
-		rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
-		rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
-				      reg & ~0x00002000);
-	} else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
-		/*
-		 * Reset DMA indexes
-		 */
-		rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
-		rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
-		rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
-		rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
-		rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
-		rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
-		rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
-		rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
-		rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
-		rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
-		rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
-		rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
-	}
-
-	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
-	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
-	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
-	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
-	if (rt2x00_is_usb(rt2x00dev)) {
-		rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
-		rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
-					    USB_MODE_RESET, REGISTER_TIMEOUT);
-#endif
-	}
-
-	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+	ret = rt2800_drv_init_registers(rt2x00dev);
+	if (ret)
+		return ret;
 
 	rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
 	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
@@ -1328,7 +1349,6 @@
 		} else {
 			rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
 		}
-		rt2800_register_write(rt2x00dev, TX_SW_CFG2, reg);
 	} else if (rt2x00_rt(rt2x00dev, RT3070)) {
 		rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
 
@@ -1339,6 +1359,10 @@
 			rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
 			rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
 		}
+	} else if (rt2800_is_305x_soc(rt2x00dev)) {
+		rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+		rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+		rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
 	} else {
 		rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
 		rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -1546,23 +1570,20 @@
 
 	/*
 	 * Clear all beacons
-	 * For the Beacon base registers we only need to clear
-	 * the first byte since that byte contains the VALID and OWNER
-	 * bits which (when set to 0) will invalidate the entire beacon.
 	 */
-	rt2800_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
-	rt2800_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
-	rt2800_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
-	rt2800_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
-	rt2800_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
-	rt2800_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
-	rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
-	rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
+	rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0);
+	rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1);
+	rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2);
+	rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3);
+	rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4);
+	rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5);
+	rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6);
+	rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7);
 
 	if (rt2x00_is_usb(rt2x00dev)) {
-		rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg);
-		rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
-		rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg);
+		rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
+		rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
+		rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
 	}
 
 	rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -1706,8 +1727,7 @@
 	rt2800_bbp_write(rt2x00dev, 82, 0x62);
 	rt2800_bbp_write(rt2x00dev, 83, 0x6a);
 
-	if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D) ||
-	    rt2x00_rt_rev(rt2x00dev, RT2870, REV_RT2870D))
+	if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
 		rt2800_bbp_write(rt2x00dev, 84, 0x19);
 	else
 		rt2800_bbp_write(rt2x00dev, 84, 0x99);
@@ -2013,8 +2033,7 @@
 	if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
 	    rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
 	    rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
-		rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
-		if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
+		if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
 			rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
 	}
 	rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
@@ -2147,7 +2166,6 @@
 		rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
 		EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
 	} else if (rt2x00_rt(rt2x00dev, RT2860) ||
-		   rt2x00_rt(rt2x00dev, RT2870) ||
 		   rt2x00_rt(rt2x00dev, RT2872)) {
 		/*
 		 * There is a max of 2 RX streams for RT28x0 series
@@ -2169,6 +2187,8 @@
 		rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
 		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
 		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_ANT_DIVERSITY, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_DAC_TEST, 0);
 		rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
 		EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
 	}
@@ -2176,6 +2196,10 @@
 	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
 	if ((word & 0x00ff) == 0x00ff) {
 		rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+		EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
+	}
+	if ((word & 0xff00) == 0xff00) {
 		rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
 				   LED_MODE_TXRX_ACTIVITY);
 		rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
@@ -2183,7 +2207,7 @@
 		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
 		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
 		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
-		EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
+		EEPROM(rt2x00dev, "Led Mode: 0x%04x\n", word);
 	}
 
 	/*
@@ -2251,7 +2275,6 @@
 			value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
 
 	if (!rt2x00_rt(rt2x00dev, RT2860) &&
-	    !rt2x00_rt(rt2x00dev, RT2870) &&
 	    !rt2x00_rt(rt2x00dev, RT2872) &&
 	    !rt2x00_rt(rt2x00dev, RT2883) &&
 	    !rt2x00_rt(rt2x00dev, RT3070) &&
@@ -2484,13 +2507,26 @@
 	    IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
 	    IEEE80211_HW_SIGNAL_DBM |
 	    IEEE80211_HW_SUPPORTS_PS |
-	    IEEE80211_HW_PS_NULLFUNC_STACK;
+	    IEEE80211_HW_PS_NULLFUNC_STACK |
+	    IEEE80211_HW_AMPDU_AGGREGATION;
 
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
 				rt2x00_eeprom_addr(rt2x00dev,
 						   EEPROM_MAC_ADDR_0));
 
+	/*
+	 * As rt2800 has a global fallback table we cannot specify
+	 * more then one tx rate per frame but since the hw will
+	 * try several rates (based on the fallback table) we should
+	 * still initialize max_rates to the maximum number of rates
+	 * we are going to try. Otherwise mac80211 will truncate our
+	 * reported tx rates and the rc algortihm will end up with
+	 * incorrect data.
+	 */
+	rt2x00dev->hw->max_rates = 7;
+	rt2x00dev->hw->max_rate_tries = 1;
+
 	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
 
 	/*
@@ -2528,16 +2564,19 @@
 	else
 		spec->ht.ht_supported = false;
 
-	/*
-	 * Don't set IEEE80211_HT_CAP_SUP_WIDTH_20_40 for now as it causes
-	 * reception problems with HT40 capable 11n APs
-	 */
 	spec->ht.cap =
+	    IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
 	    IEEE80211_HT_CAP_GRN_FLD |
 	    IEEE80211_HT_CAP_SGI_20 |
-	    IEEE80211_HT_CAP_SGI_40 |
-	    IEEE80211_HT_CAP_TX_STBC |
-	    IEEE80211_HT_CAP_RX_STBC;
+	    IEEE80211_HT_CAP_SGI_40;
+
+	if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) >= 2)
+		spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+	spec->ht.cap |=
+	    rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) <<
+		IEEE80211_HT_CAP_RX_STBC_SHIFT;
+
 	spec->ht.ampdu_factor = 3;
 	spec->ht.ampdu_density = 4;
 	spec->ht.mcs.tx_params =
@@ -2724,6 +2763,35 @@
 	return tsf;
 }
 
+static int rt2800_ampdu_action(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       enum ieee80211_ampdu_mlme_action action,
+			       struct ieee80211_sta *sta,
+			       u16 tid, u16 *ssn)
+{
+	int ret = 0;
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+	case IEEE80211_AMPDU_RX_STOP:
+		/* we don't support RX aggregation yet */
+		ret = -ENOTSUPP;
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP:
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		break;
+	default:
+		WARNING((struct rt2x00_dev *)hw->priv, "Unknown AMPDU action\n");
+	}
+
+	return ret;
+}
+
 const struct ieee80211_ops rt2800_mac80211_ops = {
 	.tx			= rt2x00mac_tx,
 	.start			= rt2x00mac_start,
@@ -2741,5 +2809,11 @@
 	.conf_tx		= rt2800_conf_tx,
 	.get_tsf		= rt2800_get_tsf,
 	.rfkill_poll		= rt2x00mac_rfkill_poll,
+	.ampdu_action		= rt2800_ampdu_action,
 };
 EXPORT_SYMBOL_GPL(rt2800_mac80211_ops);
+
+MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink RT2800 library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 94de999..8313dbf 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -40,6 +40,8 @@
 	int (*regbusy_read)(struct rt2x00_dev *rt2x00dev,
 			    const unsigned int offset,
 			    const struct rt2x00_field32 field, u32 *reg);
+
+	int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
 };
 
 static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev,
@@ -107,13 +109,22 @@
 	return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
 }
 
+static inline int rt2800_drv_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+	const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
+
+	return rt2800ops->drv_init_registers(rt2x00dev);
+}
+
 void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
 			const u8 command, const u8 token,
 			const u8 arg0, const u8 arg1);
 
-void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc);
+void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc);
 void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *txdesc);
 
+void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
+
 extern const struct rt2x00debug rt2800_rt2x00debug;
 
 int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b2f2327..6f11760 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -51,7 +51,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 1;
+static int modparam_nohwcrypt = 0;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
@@ -139,8 +139,18 @@
 	eeprom.data = rt2x00dev;
 	eeprom.register_read = rt2800pci_eepromregister_read;
 	eeprom.register_write = rt2800pci_eepromregister_write;
-	eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ?
-	    PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66;
+	switch (rt2x00_get_field32(reg, E2PROM_CSR_TYPE))
+	{
+	case 0:
+		eeprom.width = PCI_EEPROM_WIDTH_93C46;
+		break;
+	case 1:
+		eeprom.width = PCI_EEPROM_WIDTH_93C66;
+		break;
+	default:
+		eeprom.width = PCI_EEPROM_WIDTH_93C86;
+		break;
+	}
 	eeprom.reg_data_in = 0;
 	eeprom.reg_data_out = 0;
 	eeprom.reg_data_clock = 0;
@@ -446,6 +456,38 @@
 	rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
 }
 
+static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+
+	/*
+	 * Reset DMA indexes
+	 */
+	rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+	rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+	rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+	return 0;
+}
+
 static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
 {
 	u32 reg;
@@ -465,7 +507,7 @@
 	/*
 	 * Send signal to firmware during boot time.
 	 */
-	rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
+	rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
 
 	/*
 	 * Enable RX.
@@ -613,18 +655,12 @@
 /*
  * TX descriptor initialization
  */
-static int rt2800pci_write_tx_data(struct queue_entry* entry,
-				   struct txentry_desc *txdesc)
+static void rt2800pci_write_tx_data(struct queue_entry* entry,
+				    struct txentry_desc *txdesc)
 {
-	int ret;
+	__le32 *txwi = (__le32 *) entry->skb->data;
 
-	ret = rt2x00pci_write_tx_data(entry, txdesc);
-	if (ret)
-		return ret;
-
-	rt2800_write_txwi(entry->skb, txdesc);
-
-	return 0;
+	rt2800_write_txwi(txwi, txdesc);
 }
 
 
@@ -684,49 +720,6 @@
 /*
  * TX data initialization
  */
-static void rt2800pci_write_beacon(struct queue_entry *entry,
-				   struct txentry_desc *txdesc)
-{
-	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-	unsigned int beacon_base;
-	u32 reg;
-
-	/*
-	 * Disable beaconing while we are reloading the beacon data,
-	 * otherwise we might be sending out invalid data.
-	 */
-	rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
-	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-	/*
-	 * Add the TXWI for the beacon to the skb.
-	 */
-	rt2800_write_txwi(entry->skb, txdesc);
-	skb_push(entry->skb, TXWI_DESC_SIZE);
-
-	/*
-	 * Write entire beacon with TXWI to register.
-	 */
-	beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
-	rt2800_register_multiwrite(rt2x00dev, beacon_base,
-				   entry->skb->data, entry->skb->len);
-
-	/*
-	 * Enable beaconing again.
-	 */
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
-	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-	/*
-	 * Clean up beacon skb.
-	 */
-	dev_kfree_skb_any(entry->skb);
-	entry->skb = NULL;
-}
-
 static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
 				    const enum data_queue_qid queue_idx)
 {
@@ -832,29 +825,24 @@
 	struct txdone_entry_desc txdesc;
 	u32 word;
 	u32 reg;
-	u32 old_reg;
 	int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
 	u16 mcs, real_mcs;
+	int i;
 
 	/*
-	 * During each loop we will compare the freshly read
-	 * TX_STA_FIFO register value with the value read from
-	 * the previous loop. If the 2 values are equal then
-	 * we should stop processing because the chance it
-	 * quite big that the device has been unplugged and
-	 * we risk going into an endless loop.
+	 * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
+	 * at most X times and also stop processing once the TX_STA_FIFO_VALID
+	 * flag is not set anymore.
+	 *
+	 * The legacy drivers use X=TX_RING_SIZE but state in a comment
+	 * that the TX_STA_FIFO stack has a size of 16. We stick to our
+	 * tx ring size for now.
 	 */
-	old_reg = 0;
-
-	while (1) {
+	for (i = 0; i < TX_ENTRIES; i++) {
 		rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
 		if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
 			break;
 
-		if (old_reg == reg)
-			break;
-		old_reg = reg;
-
 		wcid    = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
 		ack     = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
 		pid     = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
@@ -880,8 +868,7 @@
 
 		/* Check if we got a match by looking at WCID/ACK/PID
 		 * fields */
-		txwi = (__le32 *)(entry->skb->data -
-				  rt2x00dev->ops->extra_tx_headroom);
+		txwi = (__le32 *) entry->skb->data;
 
 		rt2x00_desc_read(txwi, 1, &word);
 		tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
@@ -923,8 +910,12 @@
 			txdesc.retry = 7;
 		}
 
-		__set_bit(TXDONE_FALLBACK, &txdesc.flags);
-
+		/*
+		 * the frame was retried at least once
+		 * -> hw used fallback rates
+		 */
+		if (txdesc.retry)
+			__set_bit(TXDONE_FALLBACK, &txdesc.flags);
 
 		rt2x00lib_txdone(entry, &txdesc);
 	}
@@ -962,6 +953,12 @@
 	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
 		rt2800pci_txdone(rt2x00dev);
 
+	/*
+	 * Current beacon was sent out, fetch the next one
+	 */
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+		rt2x00lib_beacondone(rt2x00dev);
+
 	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
 		rt2800pci_wakeup(rt2x00dev);
 
@@ -996,6 +993,8 @@
 	.register_multiwrite	= rt2x00pci_register_multiwrite,
 
 	.regbusy_read		= rt2x00pci_regbusy_read,
+
+	.drv_init_registers	= rt2800pci_init_registers,
 };
 
 static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1064,7 +1063,7 @@
 	.link_tuner		= rt2800_link_tuner,
 	.write_tx_desc		= rt2800pci_write_tx_desc,
 	.write_tx_data		= rt2800pci_write_tx_data,
-	.write_beacon		= rt2800pci_write_beacon,
+	.write_beacon		= rt2800_write_beacon,
 	.kick_tx_queue		= rt2800pci_kick_tx_queue,
 	.kill_tx_queue		= rt2800pci_kill_tx_queue,
 	.fill_rxdone		= rt2800pci_fill_rxdone,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index afc8e7d..5a8dda9 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -35,25 +35,6 @@
 #define RT2800PCI_H
 
 /*
- * PCI registers.
- */
-
-/*
- * E2PROM_CSR: EEPROM control register.
- * RELOAD: Write 1 to reload eeprom content.
- * TYPE: 0: 93c46, 1:93c66.
- * LOAD_STATUS: 1:loading, 0:done.
- */
-#define E2PROM_CSR			0x0004
-#define E2PROM_CSR_DATA_CLOCK		FIELD32(0x00000001)
-#define E2PROM_CSR_CHIP_SELECT		FIELD32(0x00000002)
-#define E2PROM_CSR_DATA_IN		FIELD32(0x00000004)
-#define E2PROM_CSR_DATA_OUT		FIELD32(0x00000008)
-#define E2PROM_CSR_TYPE			FIELD32(0x00000030)
-#define E2PROM_CSR_LOAD_STATUS		FIELD32(0x00000040)
-#define E2PROM_CSR_RELOAD		FIELD32(0x00000080)
-
-/*
  * Queue register offset macros
  */
 #define TX_QUEUE_REG_OFFSET		0x10
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 0f8b84b..4f85f7b 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,7 +45,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 1;
+static int modparam_nohwcrypt = 0;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
@@ -169,11 +169,8 @@
 	/*
 	 * Write firmware to device.
 	 */
-	rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
-					    USB_VENDOR_REQUEST_OUT,
-					    FIRMWARE_IMAGE_BASE,
-					    data + offset, length,
-					    REGISTER_TIMEOUT32(length));
+	rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
+				   data + offset, length);
 
 	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
 	rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
@@ -196,7 +193,7 @@
 	/*
 	 * Send signal to firmware during boot time.
 	 */
-	rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
+	rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
 
 	if (rt2x00_rt(rt2x00dev, RT3070) ||
 	    rt2x00_rt(rt2x00dev, RT3071) ||
@@ -246,6 +243,44 @@
 	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 }
 
+static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	int i;
+
+	/*
+	 * Wait until BBP and RF are ready.
+	 */
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+		if (reg && reg != ~0)
+			break;
+		msleep(1);
+	}
+
+	if (i == REGISTER_BUSY_COUNT) {
+		ERROR(rt2x00dev, "Unstable hardware.\n");
+		return -EBUSY;
+	}
+
+	rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
+	rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
+
+	rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+	rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
+
+	rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
+				    USB_MODE_RESET, REGISTER_TIMEOUT);
+
+	rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+	return 0;
+}
+
 static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
 {
 	u32 reg;
@@ -395,25 +430,29 @@
 /*
  * TX descriptor initialization
  */
+static void rt2800usb_write_tx_data(struct queue_entry* entry,
+				    struct txentry_desc *txdesc)
+{
+	__le32 *txwi = (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE);
+
+	rt2800_write_txwi(txwi, txdesc);
+}
+
+
 static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
 				    struct sk_buff *skb,
 				    struct txentry_desc *txdesc)
 {
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
-	__le32 *txi = (__le32 *)(skb->data - TXWI_DESC_SIZE - TXINFO_DESC_SIZE);
+	__le32 *txi = (__le32 *) skb->data;
 	u32 word;
 
 	/*
-	 * Initialize TXWI descriptor
-	 */
-	rt2800_write_txwi(skb, txdesc);
-
-	/*
 	 * Initialize TXINFO descriptor
 	 */
 	rt2x00_desc_read(txi, 0, &word);
 	rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
-			   skb->len + TXWI_DESC_SIZE);
+			   skb->len - TXINFO_DESC_SIZE);
 	rt2x00_set_field32(&word, TXINFO_W0_WIV,
 			   !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
 	rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -426,6 +465,7 @@
 	/*
 	 * Register descriptor details in skb frame descriptor.
 	 */
+	skbdesc->flags |= SKBDESC_DESC_IN_SKB;
 	skbdesc->desc = txi;
 	skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
 }
@@ -433,51 +473,6 @@
 /*
  * TX data initialization
  */
-static void rt2800usb_write_beacon(struct queue_entry *entry,
-				   struct txentry_desc *txdesc)
-{
-	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-	unsigned int beacon_base;
-	u32 reg;
-
-	/*
-	 * Disable beaconing while we are reloading the beacon data,
-	 * otherwise we might be sending out invalid data.
-	 */
-	rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
-	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-	/*
-	 * Add the TXWI for the beacon to the skb.
-	 */
-	rt2800_write_txwi(entry->skb, txdesc);
-	skb_push(entry->skb, TXWI_DESC_SIZE);
-
-	/*
-	 * Write entire beacon with descriptor to register.
-	 */
-	beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
-	rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
-					    USB_VENDOR_REQUEST_OUT, beacon_base,
-					    entry->skb->data, entry->skb->len,
-					    REGISTER_TIMEOUT32(entry->skb->len));
-
-	/*
-	 * Enable beaconing again.
-	 */
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
-	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
-	rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-	/*
-	 * Clean up the beacon skb.
-	 */
-	dev_kfree_skb(entry->skb);
-	entry->skb = NULL;
-}
-
 static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
 {
 	int length;
@@ -595,6 +590,8 @@
 	.register_multiwrite	= rt2x00usb_register_multiwrite,
 
 	.regbusy_read		= rt2x00usb_regbusy_read,
+
+	.drv_init_registers	= rt2800usb_init_registers,
 };
 
 static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -658,8 +655,8 @@
 	.reset_tuner		= rt2800_reset_tuner,
 	.link_tuner		= rt2800_link_tuner,
 	.write_tx_desc		= rt2800usb_write_tx_desc,
-	.write_tx_data		= rt2x00usb_write_tx_data,
-	.write_beacon		= rt2800usb_write_beacon,
+	.write_tx_data		= rt2800usb_write_tx_data,
+	.write_beacon		= rt2800_write_beacon,
 	.get_tx_data_len	= rt2800usb_get_tx_data_len,
 	.kick_tx_queue		= rt2x00usb_kick_tx_queue,
 	.kill_tx_queue		= rt2x00usb_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 2bca6a7..0722bad 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -32,43 +32,6 @@
 #define RT2800USB_H
 
 /*
- * USB registers.
- */
-
-/*
- * USB_DMA_CFG
- * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
- * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
- * PHY_CLEAR: phy watch dog enable.
- * TX_CLEAR: Clear USB DMA TX path.
- * TXOP_HALT: Halt TXOP count down when TX buffer is full.
- * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation.
- * RX_BULK_EN: Enable USB DMA Rx.
- * TX_BULK_EN: Enable USB DMA Tx.
- * EP_OUT_VALID: OUT endpoint data valid.
- * RX_BUSY: USB DMA RX FSM busy.
- * TX_BUSY: USB DMA TX FSM busy.
- */
-#define USB_DMA_CFG			0x02a0
-#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT	FIELD32(0x000000ff)
-#define USB_DMA_CFG_RX_BULK_AGG_LIMIT	FIELD32(0x0000ff00)
-#define USB_DMA_CFG_PHY_CLEAR		FIELD32(0x00010000)
-#define USB_DMA_CFG_TX_CLEAR		FIELD32(0x00080000)
-#define USB_DMA_CFG_TXOP_HALT		FIELD32(0x00100000)
-#define USB_DMA_CFG_RX_BULK_AGG_EN	FIELD32(0x00200000)
-#define USB_DMA_CFG_RX_BULK_EN		FIELD32(0x00400000)
-#define USB_DMA_CFG_TX_BULK_EN		FIELD32(0x00800000)
-#define USB_DMA_CFG_EP_OUT_VALID	FIELD32(0x3f000000)
-#define USB_DMA_CFG_RX_BUSY		FIELD32(0x40000000)
-#define USB_DMA_CFG_TX_BUSY		FIELD32(0x80000000)
-
-/*
- * USB_CYC_CFG
- */
-#define USB_CYC_CFG			0x02a4
-#define USB_CYC_CFG_CLOCK_CYCLE		FIELD32(0x000000ff)
-
-/*
  * 8051 firmware image.
  */
 #define FIRMWARE_RT2870			"rt2870.bin"
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 6c1ff4c..788b0e4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
 #include <net/mac80211.h>
 
 #include "rt2x00debug.h"
+#include "rt2x00dump.h"
 #include "rt2x00leds.h"
 #include "rt2x00reg.h"
 #include "rt2x00queue.h"
@@ -159,6 +160,7 @@
 
 enum rt2x00_chip_intf {
 	RT2X00_CHIP_INTF_PCI,
+	RT2X00_CHIP_INTF_PCIE,
 	RT2X00_CHIP_INTF_USB,
 	RT2X00_CHIP_INTF_SOC,
 };
@@ -175,8 +177,7 @@
 #define RT2570		0x2570
 #define RT2661		0x2661
 #define RT2573		0x2573
-#define RT2860		0x2860	/* 2.4GHz PCI/CB */
-#define RT2870		0x2870
+#define RT2860		0x2860	/* 2.4GHz */
 #define RT2872		0x2872	/* WSOC */
 #define RT2883		0x2883	/* WSOC */
 #define RT3070		0x3070
@@ -549,8 +550,8 @@
 	void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
 			       struct sk_buff *skb,
 			       struct txentry_desc *txdesc);
-	int (*write_tx_data) (struct queue_entry *entry,
-			      struct txentry_desc *txdesc);
+	void (*write_tx_data) (struct queue_entry *entry,
+			       struct txentry_desc *txdesc);
 	void (*write_beacon) (struct queue_entry *entry,
 			      struct txentry_desc *txdesc);
 	int (*get_tx_data_len) (struct queue_entry *entry);
@@ -978,7 +979,13 @@
 
 static inline bool rt2x00_is_pci(struct rt2x00_dev *rt2x00dev)
 {
-	return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
+	return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI) ||
+	       rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
+}
+
+static inline bool rt2x00_is_pcie(struct rt2x00_dev *rt2x00dev)
+{
+	return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
 }
 
 static inline bool rt2x00_is_usb(struct rt2x00_dev *rt2x00dev)
@@ -999,6 +1006,13 @@
 void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
 
 /**
+ * rt2x00queue_unmap_skb - Unmap a skb from DMA.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @skb: The skb to unmap.
+ */
+void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
+
+/**
  * rt2x00queue_get_queue - Convert queue index to queue pointer
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
  * @queue: rt2x00 queue index (see &enum data_queue_qid).
@@ -1015,6 +1029,26 @@
 					  enum queue_index index);
 
 /*
+ * Debugfs handlers.
+ */
+/**
+ * rt2x00debug_dump_frame - Dump a frame to userspace through debugfs.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @type: The type of frame that is being dumped.
+ * @skb: The skb containing the frame to be dumped.
+ */
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
+			    enum rt2x00_dump_type type, struct sk_buff *skb);
+#else
+static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
+					  enum rt2x00_dump_type type,
+					  struct sk_buff *skb)
+{
+}
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+
+/*
  * Interrupt context handlers.
  */
 void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 098315a..8dbd634 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -170,23 +170,27 @@
 		      unsigned int ieee80211_flags)
 {
 	struct rt2x00lib_conf libconf;
+	u16 hw_value;
 
 	memset(&libconf, 0, sizeof(libconf));
 
 	libconf.conf = conf;
 
 	if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
-		if (conf_is_ht40(conf))
+		if (conf_is_ht40(conf)) {
 			__set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
-		else
+			hw_value = rt2x00ht_center_channel(rt2x00dev, conf);
+		} else {
 			__clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
+			hw_value = conf->channel->hw_value;
+		}
 
 		memcpy(&libconf.rf,
-		       &rt2x00dev->spec.channels[conf->channel->hw_value],
+		       &rt2x00dev->spec.channels[hw_value],
 		       sizeof(libconf.rf));
 
 		memcpy(&libconf.channel,
-		       &rt2x00dev->spec.channels_info[conf->channel->hw_value],
+		       &rt2x00dev->spec.channels_info[hw_value],
 		       sizeof(libconf.channel));
 	}
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index e9fe93f..b0498e7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -211,6 +211,7 @@
 	if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags))
 		skb_queue_purge(&intf->frame_dump_skbqueue);
 }
+EXPORT_SYMBOL_GPL(rt2x00debug_dump_frame);
 
 static int rt2x00debug_file_open(struct inode *inode, struct file *file)
 {
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 3ae468c..12ee7bd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -216,6 +216,16 @@
 	rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
 
 	/*
+	 * Remove the extra tx headroom from the skb.
+	 */
+	skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);
+
+	/*
+	 * Signal that the TX descriptor is no longer in the skb.
+	 */
+	skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
+
+	/*
 	 * Remove L2 padding which was added during
 	 */
 	if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
@@ -224,7 +234,7 @@
 	/*
 	 * If the IV/EIV data was stripped from the frame before it was
 	 * passed to the hardware, we should now reinsert it again because
-	 * mac80211 will expect the the same data to be present it the
+	 * mac80211 will expect the same data to be present it the
 	 * frame as it was passed to us.
 	 */
 	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
@@ -241,8 +251,7 @@
 	 */
 	success =
 	    test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
-	    test_bit(TXDONE_UNKNOWN, &txdesc->flags) ||
-	    test_bit(TXDONE_FALLBACK, &txdesc->flags);
+	    test_bit(TXDONE_UNKNOWN, &txdesc->flags);
 
 	/*
 	 * Update TX statistics.
@@ -264,11 +273,22 @@
 	/*
 	 * Frame was send with retries, hardware tried
 	 * different rates to send out the frame, at each
-	 * retry it lowered the rate 1 step.
+	 * retry it lowered the rate 1 step except when the
+	 * lowest rate was used.
 	 */
 	for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) {
 		tx_info->status.rates[i].idx = rate_idx - i;
 		tx_info->status.rates[i].flags = rate_flags;
+
+		if (rate_idx - i == 0) {
+			/*
+			 * The lowest rate (index 0) was used until the
+			 * number of max retries was reached.
+			 */
+			tx_info->status.rates[i].count = retry_rates - i;
+			i++;
+			break;
+		}
 		tx_info->status.rates[i].count = 1;
 	}
 	if (i < (IEEE80211_TX_MAX_RATES - 1))
@@ -281,6 +301,21 @@
 			rt2x00dev->low_level_stats.dot11ACKFailureCount++;
 	}
 
+	/*
+	 * Every single frame has it's own tx status, hence report
+	 * every frame as ampdu of size 1.
+	 *
+	 * TODO: if we can find out how many frames were aggregated
+	 * by the hw we could provide the real ampdu_len to mac80211
+	 * which would allow the rc algorithm to better decide on
+	 * which rates are suitable.
+	 */
+	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+		tx_info->status.ampdu_len = 1;
+		tx_info->status.ampdu_ack_len = success ? 1 : 0;
+	}
+
 	if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
 		if (success)
 			rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index ed303b4..6df2e0b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -20,7 +20,12 @@
 
 /*
 	Module: rt2x00dump
-	Abstract: Data structures for the rt2x00debug & userspace.
+	Abstract:
+		Data structures for the rt2x00debug & userspace.
+
+		The declarations in this file can be used by both rt2x00
+		and userspace and therefore should be kept together in
+		this file.
  */
 
 #ifndef RT2X00DUMP_H
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index 5a40760..c004cd3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -44,11 +44,22 @@
 		txdesc->mpdu_density = 0;
 
 	txdesc->ba_size = 7;	/* FIXME: What value is needed? */
-	txdesc->stbc = 0;	/* FIXME: What value is needed? */
 
-	txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
-	if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
-		txdesc->mcs |= 0x08;
+	txdesc->stbc =
+	    (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;
+
+	/*
+	 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
+	 * mcs rate to be used
+	 */
+	if (txrate->flags & IEEE80211_TX_RC_MCS) {
+		txdesc->mcs = txrate->idx;
+	} else {
+		txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
+		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			txdesc->mcs |= 0x08;
+	}
+
 
 	/*
 	 * Convert flags
@@ -84,3 +95,31 @@
 	else
 		txdesc->txop = TXOP_HTTXOP;
 }
+
+u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
+			    struct ieee80211_conf *conf)
+{
+	struct hw_mode_spec *spec = &rt2x00dev->spec;
+	int center_channel;
+	u16 i;
+
+	/*
+	 * Initialize center channel to current channel.
+	 */
+	center_channel = spec->channels[conf->channel->hw_value].channel;
+
+	/*
+	 * Adjust center channel to HT40+ and HT40- operation.
+	 */
+	if (conf_is_ht40_plus(conf))
+		center_channel += 2;
+	else if (conf_is_ht40_minus(conf))
+		center_channel -= (center_channel == 14) ? 1 : 2;
+
+	for (i = 0; i < spec->num_channels; i++)
+		if (spec->channels[i].channel == center_channel)
+			return i;
+
+	WARN_ON(1);
+	return conf->channel->hw_value;
+}
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index be2e37f..ed27de1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -27,8 +27,6 @@
 #ifndef RT2X00LIB_H
 #define RT2X00LIB_H
 
-#include "rt2x00dump.h"
-
 /*
  * Interval defines
  */
@@ -107,13 +105,6 @@
 					struct queue_entry *entry);
 
 /**
- * rt2x00queue_unmap_skb - Unmap a skb from DMA.
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- * @skb: The skb to unmap.
- */
-void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
-
-/**
  * rt2x00queue_free_skb - free a skb
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
  * @skb: The skb to free.
@@ -296,8 +287,6 @@
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
 void rt2x00debug_register(struct rt2x00_dev *rt2x00dev);
 void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev);
-void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
-			    enum rt2x00_dump_type type, struct sk_buff *skb);
 void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
 			       struct rxdone_entry_desc *rxdesc);
 #else
@@ -309,12 +298,6 @@
 {
 }
 
-static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
-					  enum rt2x00_dump_type type,
-					  struct sk_buff *skb)
-{
-}
-
 static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
 					     struct rxdone_entry_desc *rxdesc)
 {
@@ -384,12 +367,21 @@
 void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
 				   struct txentry_desc *txdesc,
 				   const struct rt2x00_rate *hwrate);
+
+u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
+			    struct ieee80211_conf *conf);
 #else
 static inline void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
 						 struct txentry_desc *txdesc,
 						 const struct rt2x00_rate *hwrate)
 {
 }
+
+static inline u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
+					  struct ieee80211_conf *conf)
+{
+	return conf->channel->hw_value;
+}
 #endif /* CONFIG_RT2X00_LIB_HT */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 0efbf5a..2f8136c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -271,11 +271,11 @@
 
 	/*
 	 * Link tuning should only be performed when
-	 * an active sta or master interface exists.
-	 * Single monitor mode interfaces should never have
-	 * work with link tuners.
+	 * an active sta interface exists. AP interfaces
+	 * don't need link tuning and monitor mode interfaces
+	 * should never have to work with link tuners.
 	 */
-	if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)
+	if (!rt2x00dev->intf_sta_count)
 		return;
 
 	rt2x00link_reset_tuner(rt2x00dev, false);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index abbd857..3b838c0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -282,7 +282,8 @@
 	 * has been initialized. Otherwise the device can reset
 	 * the MAC registers.
 	 */
-	rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL);
+	rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
+			      intf->mac, intf->bssid);
 
 	/*
 	 * Some filters depend on the current working mode. We can force
@@ -562,7 +563,6 @@
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 	struct rt2x00_intf *intf = vif_to_intf(vif);
-	int update_bssid = 0;
 
 	/*
 	 * mac80211 might be calling this function while we are trying
@@ -577,10 +577,8 @@
 	 * conf->bssid can be NULL if coming from the internal
 	 * beacon update routine.
 	 */
-	if (changes & BSS_CHANGED_BSSID) {
-		update_bssid = 1;
+	if (changes & BSS_CHANGED_BSSID)
 		memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN);
-	}
 
 	spin_unlock(&intf->lock);
 
@@ -592,7 +590,7 @@
 	 */
 	if (changes & BSS_CHANGED_BSSID)
 		rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
-				      update_bssid ? bss_conf->bssid : NULL);
+				      bss_conf->bssid);
 
 	/*
 	 * Update the beacon.
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index f71eee6..fc9da83 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -60,34 +60,6 @@
 }
 EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
 
-/*
- * TX data handlers.
- */
-int rt2x00pci_write_tx_data(struct queue_entry *entry,
-			    struct txentry_desc *txdesc)
-{
-	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-
-	/*
-	 * This should not happen, we already checked the entry
-	 * was ours. When the hardware disagrees there has been
-	 * a queue corruption!
-	 */
-	if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) {
-		ERROR(rt2x00dev,
-		      "Corrupt queue %d, accessing entry which is not ours.\n"
-		      "Please file bug report to %s.\n",
-		      entry->queue->qid, DRV_PROJECT);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
-
-/*
- * TX/RX data handlers.
- */
 void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
 {
 	struct data_queue *queue = rt2x00dev->rx;
@@ -305,7 +277,10 @@
 	rt2x00dev->irq = pci_dev->irq;
 	rt2x00dev->name = pci_name(pci_dev);
 
-	rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
+	if (pci_dev->is_pcie)
+		rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
+	else
+		rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
 
 	retval = rt2x00pci_alloc_reg(rt2x00dev);
 	if (retval)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 51bcef3..b854d62 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -86,16 +86,6 @@
 			   u32 *reg);
 
 /**
- * rt2x00pci_write_tx_data - Initialize data for TX operation
- * @entry: The entry where the frame is located
- *
- * This function will initialize the DMA and skb descriptor
- * to prepare the entry for the actual TX operation.
- */
-int rt2x00pci_write_tx_data(struct queue_entry *entry,
-			    struct txentry_desc *txdesc);
-
-/**
  * struct queue_entry_priv_pci: Per entry PCI specific information
  *
  * @desc: Pointer to device descriptor
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 20dbdd6..5097fe0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -100,21 +100,8 @@
 {
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
 
-	/*
-	 * If device has requested headroom, we should make sure that
-	 * is also mapped to the DMA so it can be used for transfering
-	 * additional descriptor information to the hardware.
-	 */
-	skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
-
 	skbdesc->skb_dma =
 	    dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
-
-	/*
-	 * Restore data pointer to original location again.
-	 */
-	skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
-
 	skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
 }
 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -130,16 +117,12 @@
 	}
 
 	if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
-		/*
-		 * Add headroom to the skb length, it has been removed
-		 * by the driver, but it was actually mapped to DMA.
-		 */
-		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
-				 skb->len + rt2x00dev->ops->extra_tx_headroom,
+		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
 				 DMA_TO_DEVICE);
 		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
 	}
 }
+EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
 
 void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
 {
@@ -370,13 +353,18 @@
 	/*
 	 * Check if more fragments are pending
 	 */
-	if (ieee80211_has_morefrags(hdr->frame_control) ||
-	    (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) {
+	if (ieee80211_has_morefrags(hdr->frame_control)) {
 		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 		__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
 	}
 
 	/*
+	 * Check if more frames (!= fragments) are pending
+	 */
+	if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
+		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
+
+	/*
 	 * Beacons and probe responses require the tsf timestamp
 	 * to be inserted into the frame, except for a frame that has been injected
 	 * through a monitor interface. This latter is needed for testing a
@@ -416,12 +404,51 @@
 	rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
 }
 
+static int rt2x00queue_write_tx_data(struct queue_entry *entry,
+				     struct txentry_desc *txdesc)
+{
+	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+
+	/*
+	 * This should not happen, we already checked the entry
+	 * was ours. When the hardware disagrees there has been
+	 * a queue corruption!
+	 */
+	if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
+		     rt2x00dev->ops->lib->get_entry_state(entry))) {
+		ERROR(rt2x00dev,
+		      "Corrupt queue %d, accessing entry which is not ours.\n"
+		      "Please file bug report to %s.\n",
+		      entry->queue->qid, DRV_PROJECT);
+		return -EINVAL;
+	}
+
+	/*
+	 * Add the requested extra tx headroom in front of the skb.
+	 */
+	skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
+	memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
+
+	/*
+	 * Call the driver's write_tx_data function, if it exists.
+	 */
+	if (rt2x00dev->ops->lib->write_tx_data)
+		rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
+
+	/*
+	 * Map the skb to DMA.
+	 */
+	if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+		rt2x00queue_map_txskb(rt2x00dev, entry->skb);
+
+	return 0;
+}
+
 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
 					    struct txentry_desc *txdesc)
 {
 	struct data_queue *queue = entry->queue;
 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-	enum rt2x00_dump_type dump_type;
 
 	rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
 
@@ -429,9 +456,7 @@
 	 * All processing on the frame has been completed, this means
 	 * it is now ready to be dumped to userspace through debugfs.
 	 */
-	dump_type = (txdesc->queue == QID_BEACON) ?
-					DUMP_FRAME_BEACON : DUMP_FRAME_TX;
-	rt2x00debug_dump_frame(rt2x00dev, dump_type, entry->skb);
+	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
 }
 
 static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
@@ -530,16 +555,12 @@
 	 * call failed. Since we always return NETDEV_TX_OK to mac80211,
 	 * this frame will simply be dropped.
 	 */
-	if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry,
-							       &txdesc))) {
+	if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
 		clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
 		entry->skb = NULL;
 		return -EIO;
 	}
 
-	if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
-		rt2x00queue_map_txskb(queue->rt2x00dev, skb);
-
 	set_bit(ENTRY_DATA_PENDING, &entry->flags);
 
 	rt2x00queue_index_inc(queue, Q_INDEX);
@@ -595,11 +616,6 @@
 	skbdesc->entry = intf->beacon;
 
 	/*
-	 * Write TX descriptor into reserved room in front of the beacon.
-	 */
-	rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
-
-	/*
 	 * Send beacon to hardware and enable beacon genaration..
 	 */
 	rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index f791708..bd54f55 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -213,9 +213,16 @@
 /**
  * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
  *
+ * Every txdone report has to contain the basic result of the
+ * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
+ * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
+ * conjunction with all of these flags but should only be set
+ * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
+ * in conjunction with &TXDONE_FAILURE.
+ *
  * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
  * @TXDONE_SUCCESS: Frame was successfully send
- * @TXDONE_FALLBACK: Frame was successfully send using a fallback rate.
+ * @TXDONE_FALLBACK: Hardware used fallback rates for retries
  * @TXDONE_FAILURE: Frame was not successfully send
  * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
  *	frame transmission failed due to excessive retries.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index bd1546b..a22837c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -113,26 +113,6 @@
 				  const u16 offset, void *buffer,
 				  const u16 buffer_length, const int timeout)
 {
-	int status;
-
-	mutex_lock(&rt2x00dev->csr_mutex);
-
-	status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
-						requesttype, offset, buffer,
-						buffer_length, timeout);
-
-	mutex_unlock(&rt2x00dev->csr_mutex);
-
-	return status;
-}
-EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
-
-int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
-					const u8 request, const u8 requesttype,
-					const u16 offset, const void *buffer,
-					const u16 buffer_length,
-					const int timeout)
-{
 	int status = 0;
 	unsigned char *tb;
 	u16 off, len, bsize;
@@ -157,7 +137,7 @@
 
 	return status;
 }
-EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff);
+EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
 
 int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
 			   const unsigned int offset,
@@ -216,48 +196,28 @@
 	rt2x00lib_txdone(entry, &txdesc);
 }
 
-int rt2x00usb_write_tx_data(struct queue_entry *entry,
-			    struct txentry_desc *txdesc)
+static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
 {
 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 	struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
 	struct queue_entry_priv_usb *entry_priv = entry->priv_data;
 	u32 length;
 
-	/*
-	 * Add the descriptor in front of the skb.
-	 */
-	skb_push(entry->skb, entry->queue->desc_size);
-	memset(entry->skb->data, 0, entry->queue->desc_size);
+	if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) {
+		/*
+		 * USB devices cannot blindly pass the skb->len as the
+		 * length of the data to usb_fill_bulk_urb. Pass the skb
+		 * to the driver to determine what the length should be.
+		 */
+		length = rt2x00dev->ops->lib->get_tx_data_len(entry);
 
-	/*
-	 * USB devices cannot blindly pass the skb->len as the
-	 * length of the data to usb_fill_bulk_urb. Pass the skb
-	 * to the driver to determine what the length should be.
-	 */
-	length = rt2x00dev->ops->lib->get_tx_data_len(entry);
+		usb_fill_bulk_urb(entry_priv->urb, usb_dev,
+				  usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
+				  entry->skb->data, length,
+				  rt2x00usb_interrupt_txdone, entry);
 
-	usb_fill_bulk_urb(entry_priv->urb, usb_dev,
-			  usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
-			  entry->skb->data, length,
-			  rt2x00usb_interrupt_txdone, entry);
-
-	/*
-	 * Make sure the skb->data pointer points to the frame, not the
-	 * descriptor.
-	 */
-	skb_pull(entry->skb, entry->queue->desc_size);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
-
-static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
-{
-	struct queue_entry_priv_usb *entry_priv = entry->priv_data;
-
-	if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
 		usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
+	}
 }
 
 void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 621d0f8..2b7a188 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -167,25 +167,6 @@
 				   const u16 buffer_length, const int timeout);
 
 /**
- * rt2x00usb_vendor_request_large_buff - Send register command to device (buffered)
- * @rt2x00dev: Pointer to &struct rt2x00_dev
- * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
- * @requesttype: Request type &USB_VENDOR_REQUEST_*
- * @offset: Register start offset to perform action on
- * @buffer: Buffer where information will be read/written to by device
- * @buffer_length: Size of &buffer
- * @timeout: Operation timeout
- *
- * This function is used to transfer register data in blocks larger
- * then CSR_CACHE_SIZE. Use for firmware upload, keys and beacons.
- */
-int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
-					const u8 request, const u8 requesttype,
-					const u16 offset, const void *buffer,
-					const u16 buffer_length,
-					const int timeout);
-
-/**
  * rt2x00usb_vendor_request_sw - Send single register command to device
  * @rt2x00dev: Pointer to &struct rt2x00_dev
  * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
@@ -370,16 +351,6 @@
 void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
 
 /**
- * rt2x00usb_write_tx_data - Initialize URB for TX operation
- * @entry: The entry where the frame is located
- *
- * This function will initialize the URB and skb descriptor
- * to prepare the entry for the actual TX operation.
- */
-int rt2x00usb_write_tx_data(struct queue_entry *entry,
-			    struct txentry_desc *txdesc);
-
-/**
  * struct queue_entry_priv_usb: Per entry USB specific information
  *
  * @urb: Urb structure used for device communication.
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 6a74baf..0123fbc 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -931,6 +931,9 @@
 	u32 reg;
 
 	rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
+	rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
+	rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
+	rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
 	rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
 			   libconf->conf->long_frame_max_tx_count);
 	rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
@@ -1874,6 +1877,16 @@
 	rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
 
 	/*
+	 * Write the TX descriptor for the beacon.
+	 */
+	rt61pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+	/*
+	 * Dump beacon to userspace through debugfs.
+	 */
+	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+
+	/*
 	 * Write entire beacon with descriptor to register.
 	 */
 	beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -2039,29 +2052,24 @@
 	struct txdone_entry_desc txdesc;
 	u32 word;
 	u32 reg;
-	u32 old_reg;
 	int type;
 	int index;
+	int i;
 
 	/*
-	 * During each loop we will compare the freshly read
-	 * STA_CSR4 register value with the value read from
-	 * the previous loop. If the 2 values are equal then
-	 * we should stop processing because the chance is
-	 * quite big that the device has been unplugged and
-	 * we risk going into an endless loop.
+	 * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
+	 * at most X times and also stop processing once the TX_STA_FIFO_VALID
+	 * flag is not set anymore.
+	 *
+	 * The legacy drivers use X=TX_RING_SIZE but state in a comment
+	 * that the TX_STA_FIFO stack has a size of 16. We stick to our
+	 * tx ring size for now.
 	 */
-	old_reg = 0;
-
-	while (1) {
+	for (i = 0; i < TX_ENTRIES; i++) {
 		rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg);
 		if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
 			break;
 
-		if (old_reg == reg)
-			break;
-		old_reg = reg;
-
 		/*
 		 * Skip this entry when it contains an invalid
 		 * queue identication number.
@@ -2120,6 +2128,13 @@
 		}
 		txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT);
 
+		/*
+		 * the frame was retried at least once
+		 * -> hw used fallback rates
+		 */
+		if (txdesc.retry)
+			__set_bit(TXDONE_FALLBACK, &txdesc.flags);
+
 		rt2x00lib_txdone(entry, &txdesc);
 	}
 }
@@ -2185,6 +2200,12 @@
 	if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
 		rt61pci_wakeup(rt2x00dev);
 
+	/*
+	 * 5 - Beacon done interrupt.
+	 */
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
+		rt2x00lib_beacondone(rt2x00dev);
+
 	return IRQ_HANDLED;
 }
 
@@ -2577,6 +2598,18 @@
 						   EEPROM_MAC_ADDR_0));
 
 	/*
+	 * As rt61 has a global fallback table we cannot specify
+	 * more then one tx rate per frame but since the hw will
+	 * try several rates (based on the fallback table) we should
+	 * still initialize max_rates to the maximum number of rates
+	 * we are going to try. Otherwise mac80211 will truncate our
+	 * reported tx rates and the rc algortihm will end up with
+	 * incorrect data.
+	 */
+	rt2x00dev->hw->max_rates = 7;
+	rt2x00dev->hw->max_rate_tries = 1;
+
+	/*
 	 * Initialize hw_mode information.
 	 */
 	spec->supported_bands = SUPPORT_BAND_2GHZ;
@@ -2773,7 +2806,6 @@
 	.reset_tuner		= rt61pci_reset_tuner,
 	.link_tuner		= rt61pci_link_tuner,
 	.write_tx_desc		= rt61pci_write_tx_desc,
-	.write_tx_data		= rt2x00pci_write_tx_data,
 	.write_beacon		= rt61pci_write_beacon,
 	.kick_tx_queue		= rt61pci_kick_tx_queue,
 	.kill_tx_queue		= rt61pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index df80f1a..e2e728a 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -153,13 +153,13 @@
 	u8 key[16];
 	u8 tx_mic[8];
 	u8 rx_mic[8];
-} __attribute__ ((packed));
+} __packed;
 
 struct hw_pairwise_ta_entry {
 	u8 address[6];
 	u8 cipher;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * Other on-chip shared memory space.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 6e0d82e..286dd97 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -270,7 +270,6 @@
 {
 	struct hw_key_entry key_entry;
 	struct rt2x00_field32 field;
-	int timeout;
 	u32 mask;
 	u32 reg;
 
@@ -306,12 +305,8 @@
 		       sizeof(key_entry.rx_mic));
 
 		reg = SHARED_KEY_ENTRY(key->hw_key_idx);
-		timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
-		rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
-						    USB_VENDOR_REQUEST_OUT, reg,
-						    &key_entry,
-						    sizeof(key_entry),
-						    timeout);
+		rt2x00usb_register_multiwrite(rt2x00dev, reg,
+					      &key_entry, sizeof(key_entry));
 
 		/*
 		 * The cipher types are stored over 2 registers.
@@ -372,7 +367,6 @@
 {
 	struct hw_pairwise_ta_entry addr_entry;
 	struct hw_key_entry key_entry;
-	int timeout;
 	u32 mask;
 	u32 reg;
 
@@ -407,17 +401,11 @@
 		       sizeof(key_entry.rx_mic));
 
 		reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
-		timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
-		rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
-						    USB_VENDOR_REQUEST_OUT, reg,
-						    &key_entry,
-						    sizeof(key_entry),
-						    timeout);
+		rt2x00usb_register_multiwrite(rt2x00dev, reg,
+					      &key_entry, sizeof(key_entry));
 
 		/*
 		 * Send the address and cipher type to the hardware register.
-		 * This data fits within the CSR cache size, so we can use
-		 * rt2x00usb_register_multiwrite() directly.
 		 */
 		memset(&addr_entry, 0, sizeof(addr_entry));
 		memcpy(&addr_entry, crypto->address, ETH_ALEN);
@@ -828,6 +816,9 @@
 	u32 reg;
 
 	rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
+	rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
+	rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
+	rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
 	rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
 			   libconf->conf->long_frame_max_tx_count);
 	rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
@@ -1092,11 +1083,7 @@
 	/*
 	 * Write firmware to device.
 	 */
-	rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
-					    USB_VENDOR_REQUEST_OUT,
-					    FIRMWARE_IMAGE_BASE,
-					    data, len,
-					    REGISTER_TIMEOUT32(len));
+	rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len);
 
 	/*
 	 * Send firmware request to device to load firmware,
@@ -1442,7 +1429,7 @@
 				  struct txentry_desc *txdesc)
 {
 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
-	__le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
+	__le32 *txd = (__le32 *) skb->data;
 	u32 word;
 
 	/*
@@ -1505,6 +1492,7 @@
 	/*
 	 * Register descriptor details in skb frame descriptor.
 	 */
+	skbdesc->flags |= SKBDESC_DESC_IN_SKB;
 	skbdesc->desc = txd;
 	skbdesc->desc_len = TXD_DESC_SIZE;
 }
@@ -1528,18 +1516,27 @@
 	rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
 
 	/*
-	 * Take the descriptor in front of the skb into account.
+	 * Add space for the descriptor in front of the skb.
 	 */
 	skb_push(entry->skb, TXD_DESC_SIZE);
+	memset(entry->skb->data, 0, TXD_DESC_SIZE);
+
+	/*
+	 * Write the TX descriptor for the beacon.
+	 */
+	rt73usb_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+	/*
+	 * Dump beacon to userspace through debugfs.
+	 */
+	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
 
 	/*
 	 * Write entire beacon with descriptor to register.
 	 */
 	beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
-	rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
-					    USB_VENDOR_REQUEST_OUT, beacon_base,
-					    entry->skb->data, entry->skb->len,
-					    REGISTER_TIMEOUT32(entry->skb->len));
+	rt2x00usb_register_multiwrite(rt2x00dev, beacon_base,
+				      entry->skb->data, entry->skb->len);
 
 	/*
 	 * Enable beaconing again.
@@ -2252,7 +2249,6 @@
 	.reset_tuner		= rt73usb_reset_tuner,
 	.link_tuner		= rt73usb_link_tuner,
 	.write_tx_desc		= rt73usb_write_tx_desc,
-	.write_tx_data		= rt2x00usb_write_tx_data,
 	.write_beacon		= rt73usb_write_beacon,
 	.get_tx_data_len	= rt73usb_get_tx_data_len,
 	.kick_tx_queue		= rt2x00usb_kick_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7abe7eb..44d5b2b 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -108,13 +108,13 @@
 	u8 key[16];
 	u8 tx_mic[8];
 	u8 rx_mic[8];
-} __attribute__ ((packed));
+} __packed;
 
 struct hw_pairwise_ta_entry {
 	u8 address[6];
 	u8 cipher;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * Since NULL frame won't be that long (256 byte),
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 4baf0cf..3052331 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -36,7 +36,7 @@
 	u8 agc;
 	u8 flags2;
 	u32 reserved[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct rtl8180_rx_desc {
 	__le32 flags;
@@ -45,7 +45,7 @@
 		__le32 rx_buf;
 		__le64 tsft;
 	};
-} __attribute__ ((packed));
+} __packed;
 
 struct rtl8180_tx_ring {
 	struct rtl8180_tx_desc *desc;
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 515817d..4270502 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -671,7 +671,7 @@
 	       (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
 }
 
-void rtl8180_beacon_work(struct work_struct *work)
+static void rtl8180_beacon_work(struct work_struct *work)
 {
 	struct rtl8180_vif *vif_priv =
 		container_of(work, struct rtl8180_vif, beacon_work.work);
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6bb3211..9887816 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -47,7 +47,7 @@
 	u8 agc;
 	u8 reserved;
 	__le64 mac_time;
-} __attribute__((packed));
+} __packed;
 
 struct rtl8187b_rx_hdr {
 	__le32 flags;
@@ -59,7 +59,7 @@
 	__le16 snr_long2end;
 	s8 pwdb_g12;
 	u8 fot;
-} __attribute__((packed));
+} __packed;
 
 /* {rtl8187,rtl8187b}_tx_info is in skb */
 
@@ -68,7 +68,7 @@
 	__le16 rts_duration;
 	__le16 len;
 	__le32 retry;
-} __attribute__((packed));
+} __packed;
 
 struct rtl8187b_tx_hdr {
 	__le32 flags;
@@ -80,7 +80,7 @@
 	__le32 unused_3;
 	__le32 retry;
 	__le32 unused_4[2];
-} __attribute__((packed));
+} __packed;
 
 enum {
 	DEVICE_RTL8187,
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 8522490..978519d 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -185,7 +185,7 @@
 	u8	reserved_22[4];
 	__le16	TALLY_CNT;
 	u8	TALLY_SEL;
-} __attribute__((packed));
+} __packed;
 
 struct rtl818x_rf_ops {
 	char *name;
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 337fc7b..2f98058 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -41,7 +41,7 @@
 
 config WL1271
 	tristate "TI wl1271 support"
-	depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS
+	depends on WL12XX && GENERIC_HARDIRQS
 	depends on INET
 	select FW_LOADER
 	select CRC7
@@ -65,7 +65,7 @@
 
 config WL1271_SDIO
 	tristate "TI wl1271 SDIO support"
-	depends on WL1271 && MMC && ARM
+	depends on WL1271 && MMC
 	---help---
 	  This module adds support for the SDIO interface of adapters using
 	  TI wl1271 chipset.  Select this if your platform is using
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 26160c4..842df31 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -60,7 +60,7 @@
 	/* the number of missed sequence numbers in the squentially */
 	/* values of frames seq numbers */
 	u32 seq_num_miss;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_revision {
 	struct acx_header header;
@@ -89,7 +89,7 @@
 	 * bits 24 - 31: Chip ID - The WiLink chip ID.
 	 */
 	u32 hw_version;
-} __attribute__ ((packed));
+} __packed;
 
 enum wl1251_psm_mode {
 	/* Active mode */
@@ -111,7 +111,7 @@
 	/* 2 - ELP mode: Deep / Max sleep*/
 	u8  sleep_auth;
 	u8  padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 enum {
 	HOSTIF_PCI_MASTER_HOST_INDIRECT,
@@ -159,7 +159,7 @@
 	 * complete ring until an interrupt is generated.
 	 */
 	u32 tx_complete_timeout;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct acx_data_path_params_resp {
@@ -180,7 +180,7 @@
 	u32 tx_control_addr;
 
 	u32 tx_complete_addr;
-} __attribute__ ((packed));
+} __packed;
 
 #define TX_MSDU_LIFETIME_MIN       0
 #define TX_MSDU_LIFETIME_MAX       3000
@@ -197,7 +197,7 @@
 	 * firmware discards the MSDU.
 	 */
 	u32 lifetime;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * RX Config Options Table
@@ -285,7 +285,7 @@
 
 	u32 config_options;
 	u32 filter_options;
-} __attribute__ ((packed));
+} __packed;
 
 enum {
 	QOS_AC_BE = 0,
@@ -325,13 +325,13 @@
 
 	/* Lowest memory blocks guaranteed for this queue */
 	u16 low_threshold;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_packet_detection {
 	struct acx_header header;
 
 	u32 threshold;
-} __attribute__ ((packed));
+} __packed;
 
 
 enum acx_slot_type {
@@ -349,7 +349,7 @@
 	u8 wone_index; /* Reserved */
 	u8 slot_time;
 	u8 reserved[6];
-} __attribute__ ((packed));
+} __packed;
 
 
 #define ADDRESS_GROUP_MAX	(8)
@@ -362,7 +362,7 @@
 	u8 num_groups;
 	u8 pad[2];
 	u8 mac_table[ADDRESS_GROUP_MAX_LEN];
-} __attribute__ ((packed));
+} __packed;
 
 
 #define  RX_TIMEOUT_PS_POLL_MIN    0
@@ -388,7 +388,7 @@
 	 * from an UPSD enabled queue.
 	 */
 	u16 upsd_timeout;
-} __attribute__ ((packed));
+} __packed;
 
 #define RTS_THRESHOLD_MIN              0
 #define RTS_THRESHOLD_MAX              4096
@@ -399,7 +399,7 @@
 
 	u16 threshold;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_beacon_filter_option {
 	struct acx_header header;
@@ -415,7 +415,7 @@
 	 */
 	u8 max_num_beacons;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * ACXBeaconFilterEntry (not 221)
@@ -461,7 +461,7 @@
 	u8 num_ie;
 	u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
 	u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
 
 #define SYNCH_FAIL_DEFAULT_THRESHOLD    10     /* number of beacons */
 #define NO_BEACON_DEFAULT_TIMEOUT       (500) /* in microseconds */
@@ -494,7 +494,7 @@
 	 */
 	u8 enable;
 	u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
 
 #define PTA_ANTENNA_TYPE_DEF		  (0)
 #define PTA_BT_HP_MAXTIME_DEF		  (2000)
@@ -648,7 +648,7 @@
 
 	/* range: 0 - 20  default: 1 */
 	u8 bt_hp_respected_num;
-} __attribute__ ((packed));
+} __packed;
 
 #define CCA_THRSH_ENABLE_ENERGY_D       0x140A
 #define CCA_THRSH_DISABLE_ENERGY_D      0xFFEF
@@ -660,7 +660,7 @@
 	u16 rx_cca_threshold;
 	u8 tx_energy_detection;
 	u8 pad;
-} __attribute__ ((packed));
+} __packed;
 
 #define BCN_RX_TIMEOUT_DEF_VALUE        10000
 #define BROADCAST_RX_TIMEOUT_DEF_VALUE  20000
@@ -679,14 +679,14 @@
 	/* Consecutive PS Poll failures before updating the host */
 	u8 ps_poll_threshold;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_event_mask {
 	struct acx_header header;
 
 	u32 event_mask;
 	u32 high_event_mask; /* Unused */
-} __attribute__ ((packed));
+} __packed;
 
 #define CFG_RX_FCS		BIT(2)
 #define CFG_RX_ALL_GOOD		BIT(3)
@@ -729,7 +729,7 @@
 	u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */
 	u8 tx_mgt_frame_rate;
 	u8 tx_mgt_frame_mod;
-} __attribute__ ((packed));
+} __packed;
 
 /* STA MAC */
 struct acx_dot11_station_id {
@@ -737,28 +737,28 @@
 
 	u8 mac[ETH_ALEN];
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_feature_config {
 	struct acx_header header;
 
 	u32 options;
 	u32 data_flow_options;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_current_tx_power {
 	struct acx_header header;
 
 	u8  current_tx_power;
 	u8  padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_dot11_default_key {
 	struct acx_header header;
 
 	u8 id;
 	u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_tsf_info {
 	struct acx_header header;
@@ -769,7 +769,7 @@
 	u32 last_TBTT_lsb;
 	u8 last_dtim_count;
 	u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
 
 enum acx_wake_up_event {
 	WAKE_UP_EVENT_BEACON_BITMAP	= 0x01, /* Wake on every Beacon*/
@@ -785,7 +785,7 @@
 	u8 wake_up_event; /* Only one bit can be set */
 	u8 listen_interval;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_aid {
 	struct acx_header header;
@@ -795,7 +795,7 @@
 	 */
 	u16 aid;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 enum acx_preamble_type {
 	ACX_PREAMBLE_LONG = 0,
@@ -811,7 +811,7 @@
 	 */
 	u8 preamble;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 enum acx_ctsprotect_type {
 	CTSPROTECT_DISABLE = 0,
@@ -822,11 +822,11 @@
 	struct acx_header header;
 	u8 ctsprotect;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_tx_statistics {
 	u32 internal_desc_overflow;
-}  __attribute__ ((packed));
+}  __packed;
 
 struct acx_rx_statistics {
 	u32 out_of_mem;
@@ -837,14 +837,14 @@
 	u32 xfr_hint_trig;
 	u32 path_reset;
 	u32 reset_counter;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_dma_statistics {
 	u32 rx_requested;
 	u32 rx_errors;
 	u32 tx_requested;
 	u32 tx_errors;
-}  __attribute__ ((packed));
+}  __packed;
 
 struct acx_isr_statistics {
 	/* host command complete */
@@ -903,7 +903,7 @@
 
 	/* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
 	u32 low_rssi;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_wep_statistics {
 	/* WEP address keys configured */
@@ -925,7 +925,7 @@
 
 	/* WEP decrypt interrupts */
 	u32 interrupt;
-} __attribute__ ((packed));
+} __packed;
 
 #define ACX_MISSED_BEACONS_SPREAD 10
 
@@ -985,12 +985,12 @@
 
 	/* the number of beacons in awake mode */
 	u32 rcvd_awake_beacons;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_mic_statistics {
 	u32 rx_pkts;
 	u32 calc_failure;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_aes_statistics {
 	u32 encrypt_fail;
@@ -999,7 +999,7 @@
 	u32 decrypt_packets;
 	u32 encrypt_interrupt;
 	u32 decrypt_interrupt;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_event_statistics {
 	u32 heart_beat;
@@ -1010,7 +1010,7 @@
 	u32 oom_late;
 	u32 phy_transmit_error;
 	u32 tx_stuck;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_ps_statistics {
 	u32 pspoll_timeouts;
@@ -1020,7 +1020,7 @@
 	u32 pspoll_max_apturn;
 	u32 pspoll_utilization;
 	u32 upsd_utilization;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_rxpipe_statistics {
 	u32 rx_prep_beacon_drop;
@@ -1028,7 +1028,7 @@
 	u32 beacon_buffer_thres_host_int_trig_rx_data;
 	u32 missed_beacon_host_int_trig_rx_data;
 	u32 tx_xfr_host_int_trig_rx_data;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_statistics {
 	struct acx_header header;
@@ -1044,7 +1044,7 @@
 	struct acx_event_statistics event;
 	struct acx_ps_statistics ps;
 	struct acx_rxpipe_statistics rxpipe;
-} __attribute__ ((packed));
+} __packed;
 
 #define ACX_MAX_RATE_CLASSES       8
 #define ACX_RATE_MASK_UNSPECIFIED  0
@@ -1063,7 +1063,7 @@
 
 	u32 rate_class_cnt;
 	struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1251_acx_memory {
 	__le16 num_stations; /* number of STAs to be supported. */
@@ -1082,7 +1082,7 @@
 	u8 tx_min_mem_block_num;
 	u8 num_ssid_profiles;
 	__le16 debug_buffer_size;
-} __attribute__ ((packed));
+} __packed;
 
 
 #define ACX_RX_DESC_MIN                1
@@ -1094,7 +1094,7 @@
 	u8 type;
 	u8 priority;
 	__le32 dma_address;
-} __attribute__ ((packed));
+} __packed;
 
 #define ACX_TX_DESC_MIN                1
 #define ACX_TX_DESC_MAX                127
@@ -1103,7 +1103,7 @@
     u8 num_descs;
     u8 pad[2];
     u8 attributes;
-} __attribute__ ((packed));
+} __packed;
 
 #define MAX_TX_QUEUE_CONFIGS 5
 #define MAX_TX_QUEUES 4
@@ -1113,7 +1113,7 @@
 	struct wl1251_acx_memory mem_config;
 	struct wl1251_acx_rx_queue_config rx_queue_config;
 	struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1251_acx_mem_map {
 	struct acx_header header;
@@ -1147,7 +1147,7 @@
 
 	/* Number of blocks FW allocated for RX packets */
 	u32 num_rx_mem_blocks;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct wl1251_acx_wr_tbtt_and_dtim {
@@ -1164,7 +1164,7 @@
 	*/
 	u8  dtim;
 	u8  padding;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1251_acx_ac_cfg {
 	struct acx_header header;
@@ -1194,7 +1194,7 @@
 
 	/* The TX Op Limit (in microseconds) for the access class. */
 	u16 txop_limit;
-} __attribute__ ((packed));
+} __packed;
 
 
 enum wl1251_acx_channel_type {
@@ -1245,7 +1245,7 @@
 
 	/* not supported */
 	u32 apsdconf[2];
-} __attribute__ ((packed));
+} __packed;
 
 /*************************************************************************
 
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index 4ad67ca..7e70dd5 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -106,7 +106,7 @@
 	u16 status;
 	/* payload */
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct  wl1251_command {
 	struct wl1251_cmd_header header;
@@ -201,7 +201,7 @@
 	u8 ssid_len;
 	u8 ssid[32];
 
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1251_scan_ch_parameters {
 	u32 min_duration; /* in TU */
@@ -218,7 +218,7 @@
 	u8 tx_power_att;
 	u8 channel;
 	u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
 
 /* SCAN parameters */
 #define SCAN_MAX_NUM_OF_CHANNELS 16
@@ -228,7 +228,7 @@
 
 	struct wl1251_scan_parameters params;
 	struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
-} __attribute__ ((packed));
+} __packed;
 
 enum {
 	BSS_TYPE_IBSS = 0,
@@ -276,14 +276,14 @@
 	u8 tx_mgt_frame_rate; /* OBSOLETE */
 	u8 tx_mgt_frame_mod;  /* OBSOLETE */
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_enabledisable_path {
 	struct wl1251_cmd_header header;
 
 	u8 channel;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 #define WL1251_MAX_TEMPLATE_SIZE 300
 
@@ -292,7 +292,7 @@
 
 	__le16 size;
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 #define TIM_ELE_ID    5
 #define PARTIAL_VBM_MAX    251
@@ -304,7 +304,7 @@
 	u8 dtim_period;
 	u8 bitmap_ctrl;
 	u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */
-} __attribute__ ((packed));
+} __packed;
 
 /* Virtual Bit Map update */
 struct wl1251_cmd_vbm_update {
@@ -312,7 +312,7 @@
 	__le16 len;
 	u8  padding[2];
 	struct wl1251_tim tim;
-} __attribute__ ((packed));
+} __packed;
 
 enum wl1251_cmd_ps_mode {
 	STATION_ACTIVE_MODE,
@@ -333,7 +333,7 @@
 	u8 hang_over_period;
 	u16 null_data_rate;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1251_cmd_trigger_scan_to {
 	struct wl1251_cmd_header header;
@@ -411,7 +411,7 @@
 	u8 key[MAX_KEY_SIZE];
 	u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
 	u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
-} __attribute__ ((packed));
+} __packed;
 
 
 #endif /* __WL1251_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl12xx/wl1251_event.h
index be0ac54..f48a2b6 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.h
+++ b/drivers/net/wireless/wl12xx/wl1251_event.h
@@ -82,7 +82,7 @@
 	u32 report_1;
 	u32 report_2;
 	u32 report_3;
-} __attribute__ ((packed));
+} __packed;
 
 struct event_mailbox {
 	u32 events_vector;
@@ -112,7 +112,7 @@
 	struct event_debug_report report;
 	u8 average_snr_level;
 	u8 padding[19];
-} __attribute__ ((packed));
+} __packed;
 
 int wl1251_event_unmask(struct wl1251 *wl);
 void wl1251_event_mbox_config(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 00b2428..c8f2689 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -124,7 +124,7 @@
 	}
 
 	wl->nvs_len = fw->size;
-	wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL);
+	wl->nvs = kmemdup(fw->data, wl->nvs_len, GFP_KERNEL);
 
 	if (!wl->nvs) {
 		wl1251_error("could not allocate memory for the nvs file");
@@ -132,8 +132,6 @@
 		goto out;
 	}
 
-	memcpy(wl->nvs, fw->data, wl->nvs_len);
-
 	ret = 0;
 
 out:
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl12xx/wl1251_rx.h
index 563a3fd..da4e534 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.h
@@ -117,7 +117,7 @@
 	s8 rssi; /* in dB */
 	u8 rcpi; /* in dB */
 	u8 snr; /* in dB */
-} __attribute__ ((packed));
+} __packed;
 
 void wl1251_rx(struct wl1251 *wl);
 
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index c561332..b901b61 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -37,11 +37,17 @@
 #define SDIO_DEVICE_ID_TI_WL1251	0x9066
 #endif
 
+struct wl1251_sdio {
+	struct sdio_func *func;
+	u32 elp_val;
+};
+
 static struct wl12xx_platform_data *wl12xx_board_data;
 
 static struct sdio_func *wl_to_func(struct wl1251 *wl)
 {
-	return wl->if_priv;
+	struct wl1251_sdio *wl_sdio = wl->if_priv;
+	return wl_sdio->func;
 }
 
 static void wl1251_sdio_interrupt(struct sdio_func *func)
@@ -90,10 +96,17 @@
 static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
 {
 	int ret = 0;
-	struct sdio_func *func = wl_to_func(wl);
+	struct wl1251_sdio *wl_sdio = wl->if_priv;
+	struct sdio_func *func = wl_sdio->func;
 
+	/*
+	 * The hardware only supports RAW (read after write) access for
+	 * reading, regular sdio_readb won't work here (it interprets
+	 * the unused bits of CMD52 as write data even if we send read
+	 * request).
+	 */
 	sdio_claim_host(func);
-	*val = sdio_readb(func, addr, &ret);
+	*val = sdio_writeb_readb(func, wl_sdio->elp_val, addr, &ret);
 	sdio_release_host(func);
 
 	if (ret)
@@ -103,7 +116,8 @@
 static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
 {
 	int ret = 0;
-	struct sdio_func *func = wl_to_func(wl);
+	struct wl1251_sdio *wl_sdio = wl->if_priv;
+	struct sdio_func *func = wl_sdio->func;
 
 	sdio_claim_host(func);
 	sdio_writeb(func, val, addr, &ret);
@@ -111,6 +125,8 @@
 
 	if (ret)
 		wl1251_error("sdio_writeb failed (%d)", ret);
+	else
+		wl_sdio->elp_val = val;
 }
 
 static void wl1251_sdio_reset(struct wl1251 *wl)
@@ -197,6 +213,7 @@
 	int ret;
 	struct wl1251 *wl;
 	struct ieee80211_hw *hw;
+	struct wl1251_sdio *wl_sdio;
 
 	hw = wl1251_alloc_hw();
 	if (IS_ERR(hw))
@@ -204,6 +221,12 @@
 
 	wl = hw->priv;
 
+	wl_sdio = kzalloc(sizeof(*wl_sdio), GFP_KERNEL);
+	if (wl_sdio == NULL) {
+		ret = -ENOMEM;
+		goto out_free_hw;
+	}
+
 	sdio_claim_host(func);
 	ret = sdio_enable_func(func);
 	if (ret)
@@ -213,7 +236,8 @@
 	sdio_release_host(func);
 
 	SET_IEEE80211_DEV(hw, &func->dev);
-	wl->if_priv = func;
+	wl_sdio->func = func;
+	wl->if_priv = wl_sdio;
 	wl->if_ops = &wl1251_sdio_ops;
 	wl->set_power = wl1251_sdio_set_power;
 
@@ -259,6 +283,8 @@
 	sdio_disable_func(func);
 release:
 	sdio_release_host(func);
+	kfree(wl_sdio);
+out_free_hw:
 	wl1251_free_hw(wl);
 	return ret;
 }
@@ -266,9 +292,11 @@
 static void __devexit wl1251_sdio_remove(struct sdio_func *func)
 {
 	struct wl1251 *wl = sdio_get_drvdata(func);
+	struct wl1251_sdio *wl_sdio = wl->if_priv;
 
 	if (wl->irq)
 		free_irq(wl->irq, wl);
+	kfree(wl_sdio);
 	wl1251_free_hw(wl);
 
 	sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 55856c6..65c4be8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -109,7 +109,7 @@
 	unsigned xfer_pad:1;
 
 	unsigned reserved:7;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct tx_double_buffer_desc {
@@ -156,7 +156,7 @@
 	u8 num_mem_blocks;
 
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 enum {
 	TX_SUCCESS              = 0,
@@ -208,7 +208,7 @@
 
 	/* See done_1 */
 	u8 done_2;
-} __attribute__ ((packed));
+} __packed;
 
 static inline int wl1251_tx_get_queue(int queue)
 {
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 6f1b6b5..ec09f0d 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -33,6 +33,7 @@
 #include <net/mac80211.h>
 
 #include "wl1271_conf.h"
+#include "wl1271_ini.h"
 
 #define DRIVER_NAME "wl1271"
 #define DRIVER_PREFIX DRIVER_NAME ": "
@@ -116,33 +117,6 @@
 #define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
 #define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
 
-/* NVS data structure */
-#define WL1271_NVS_SECTION_SIZE                  468
-
-#define WL1271_NVS_GENERAL_PARAMS_SIZE            57
-#define WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED \
-	(WL1271_NVS_GENERAL_PARAMS_SIZE + 1)
-#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE         17
-#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED \
-	(WL1271_NVS_STAT_RADIO_PARAMS_SIZE + 1)
-#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE          65
-#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED \
-	(WL1271_NVS_DYN_RADIO_PARAMS_SIZE + 1)
-#define WL1271_NVS_FEM_COUNT                       2
-#define WL1271_NVS_INI_SPARE_SIZE                124
-
-struct wl1271_nvs_file {
-	/* NVS section */
-	u8 nvs[WL1271_NVS_SECTION_SIZE];
-
-	/* INI section */
-	u8 general_params[WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED];
-	u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED];
-	u8 dyn_radio_params[WL1271_NVS_FEM_COUNT]
-			   [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED];
-	u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE];
-} __attribute__ ((packed));
-
 /*
  * Enable/disable 802.11a support for WL1273
  */
@@ -317,7 +291,7 @@
 	__le32 tx_released_blks[NUM_TX_QUEUES];
 	__le32 fw_localtime;
 	__le32 padding[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1271_rx_mem_pool_addr {
 	u32 addr;
@@ -325,6 +299,7 @@
 };
 
 struct wl1271_scan {
+	struct cfg80211_scan_request *req;
 	u8 state;
 	u8 ssid[IW_ESSID_MAX_SIZE+1];
 	size_t ssid_len;
@@ -375,6 +350,7 @@
 #define WL1271_FLAG_IRQ_PENDING        (9)
 #define WL1271_FLAG_IRQ_RUNNING       (10)
 #define WL1271_FLAG_IDLE              (11)
+#define WL1271_FLAG_IDLE_REQUESTED    (12)
 	unsigned long flags;
 
 	struct wl1271_partition_set part;
@@ -421,6 +397,7 @@
 
 	/* Pending TX frames */
 	struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
+	int tx_frames_cnt;
 
 	/* Security sequence number counters */
 	u8 tx_security_last_seq;
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 420e7e2..4c87e60 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -75,7 +75,7 @@
 
 	/* payload length (not including headers */
 	__le16 len;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_error_counter {
 	struct acx_header header;
@@ -98,7 +98,7 @@
 	/* the number of missed sequence numbers in the squentially */
 	/* values of frames seq numbers */
 	__le32 seq_num_miss;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_revision {
 	struct acx_header header;
@@ -127,7 +127,7 @@
 	 * bits 24 - 31: Chip ID - The WiLink chip ID.
 	 */
 	__le32 hw_version;
-} __attribute__ ((packed));
+} __packed;
 
 enum wl1271_psm_mode {
 	/* Active mode */
@@ -149,7 +149,7 @@
 	/* 2 - ELP mode: Deep / Max sleep*/
 	u8  sleep_auth;
 	u8  padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 enum {
 	HOSTIF_PCI_MASTER_HOST_INDIRECT,
@@ -187,7 +187,7 @@
 	 * firmware discards the MSDU.
 	 */
 	__le32 lifetime;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * RX Config Options Table
@@ -275,13 +275,13 @@
 
 	__le32 config_options;
 	__le32 filter_options;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_packet_detection {
 	struct acx_header header;
 
 	__le32 threshold;
-} __attribute__ ((packed));
+} __packed;
 
 
 enum acx_slot_type {
@@ -299,7 +299,7 @@
 	u8 wone_index; /* Reserved */
 	u8 slot_time;
 	u8 reserved[6];
-} __attribute__ ((packed));
+} __packed;
 
 
 #define ACX_MC_ADDRESS_GROUP_MAX	(8)
@@ -312,21 +312,21 @@
 	u8 num_groups;
 	u8 pad[2];
 	u8 mac_table[ADDRESS_GROUP_MAX_LEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_rx_timeout {
 	struct acx_header header;
 
 	__le16 ps_poll_timeout;
 	__le16 upsd_timeout;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_rts_threshold {
 	struct acx_header header;
 
 	__le16 threshold;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_beacon_filter_option {
 	struct acx_header header;
@@ -342,7 +342,7 @@
 	 */
 	u8 max_num_beacons;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * ACXBeaconFilterEntry (not 221)
@@ -383,21 +383,21 @@
 	u8 num_ie;
 	u8 pad[3];
 	u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_conn_monit_params {
        struct acx_header header;
 
        __le32 synch_fail_thold; /* number of beacons missed */
        __le32 bss_lose_timeout; /* number of TU's from synch fail */
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_bt_wlan_coex {
 	struct acx_header header;
 
 	u8 enable;
 	u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_bt_wlan_coex_param {
 	struct acx_header header;
@@ -405,7 +405,7 @@
 	__le32 params[CONF_SG_PARAMS_MAX];
 	u8 param_idx;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_dco_itrim_params {
 	struct acx_header header;
@@ -413,7 +413,7 @@
 	u8 enable;
 	u8 padding[3];
 	__le32 timeout;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_energy_detection {
 	struct acx_header header;
@@ -422,7 +422,7 @@
 	__le16 rx_cca_threshold;
 	u8 tx_energy_detection;
 	u8 pad;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_beacon_broadcast {
 	struct acx_header header;
@@ -436,14 +436,14 @@
 	/* Consecutive PS Poll failures before updating the host */
 	u8 ps_poll_threshold;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_event_mask {
 	struct acx_header header;
 
 	__le32 event_mask;
 	__le32 high_event_mask; /* Unused */
-} __attribute__ ((packed));
+} __packed;
 
 #define CFG_RX_FCS		BIT(2)
 #define CFG_RX_ALL_GOOD		BIT(3)
@@ -488,14 +488,14 @@
 
 	__le32 options;
 	__le32 data_flow_options;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_current_tx_power {
 	struct acx_header header;
 
 	u8  current_tx_power;
 	u8  padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_wake_up_condition {
 	struct acx_header header;
@@ -503,7 +503,7 @@
 	u8 wake_up_event; /* Only one bit can be set */
 	u8 listen_interval;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_aid {
 	struct acx_header header;
@@ -513,7 +513,7 @@
 	 */
 	__le16 aid;
 	u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
 
 enum acx_preamble_type {
 	ACX_PREAMBLE_LONG = 0,
@@ -529,7 +529,7 @@
 	 */
 	u8 preamble;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 enum acx_ctsprotect_type {
 	CTSPROTECT_DISABLE = 0,
@@ -540,11 +540,11 @@
 	struct acx_header header;
 	u8 ctsprotect;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_tx_statistics {
 	__le32 internal_desc_overflow;
-}  __attribute__ ((packed));
+}  __packed;
 
 struct acx_rx_statistics {
 	__le32 out_of_mem;
@@ -555,14 +555,14 @@
 	__le32 xfr_hint_trig;
 	__le32 path_reset;
 	__le32 reset_counter;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_dma_statistics {
 	__le32 rx_requested;
 	__le32 rx_errors;
 	__le32 tx_requested;
 	__le32 tx_errors;
-}  __attribute__ ((packed));
+}  __packed;
 
 struct acx_isr_statistics {
 	/* host command complete */
@@ -621,7 +621,7 @@
 
 	/* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
 	__le32 low_rssi;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_wep_statistics {
 	/* WEP address keys configured */
@@ -643,7 +643,7 @@
 
 	/* WEP decrypt interrupts */
 	__le32 interrupt;
-} __attribute__ ((packed));
+} __packed;
 
 #define ACX_MISSED_BEACONS_SPREAD 10
 
@@ -703,12 +703,12 @@
 
 	/* the number of beacons in awake mode */
 	__le32 rcvd_awake_beacons;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_mic_statistics {
 	__le32 rx_pkts;
 	__le32 calc_failure;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_aes_statistics {
 	__le32 encrypt_fail;
@@ -717,7 +717,7 @@
 	__le32 decrypt_packets;
 	__le32 encrypt_interrupt;
 	__le32 decrypt_interrupt;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_event_statistics {
 	__le32 heart_beat;
@@ -728,7 +728,7 @@
 	__le32 oom_late;
 	__le32 phy_transmit_error;
 	__le32 tx_stuck;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_ps_statistics {
 	__le32 pspoll_timeouts;
@@ -738,7 +738,7 @@
 	__le32 pspoll_max_apturn;
 	__le32 pspoll_utilization;
 	__le32 upsd_utilization;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_rxpipe_statistics {
 	__le32 rx_prep_beacon_drop;
@@ -746,7 +746,7 @@
 	__le32 beacon_buffer_thres_host_int_trig_rx_data;
 	__le32 missed_beacon_host_int_trig_rx_data;
 	__le32 tx_xfr_host_int_trig_rx_data;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_statistics {
 	struct acx_header header;
@@ -762,7 +762,7 @@
 	struct acx_event_statistics event;
 	struct acx_ps_statistics ps;
 	struct acx_rxpipe_statistics rxpipe;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_rate_class {
 	__le32 enabled_rates;
@@ -780,7 +780,7 @@
 
 	__le32 rate_class_cnt;
 	struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_ac_cfg {
 	struct acx_header header;
@@ -790,7 +790,7 @@
 	u8 aifsn;
 	u8 reserved;
 	__le16 tx_op_limit;
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_tid_config {
 	struct acx_header header;
@@ -801,19 +801,19 @@
 	u8 ack_policy;
 	u8 padding[3];
 	__le32 apsd_conf[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_frag_threshold {
 	struct acx_header header;
 	__le16 frag_threshold;
 	u8 padding[2];
-} __attribute__ ((packed));
+} __packed;
 
 struct acx_tx_config_options {
 	struct acx_header header;
 	__le16 tx_compl_timeout;     /* msec */
 	__le16 tx_compl_threshold;   /* number of packets */
-} __attribute__ ((packed));
+} __packed;
 
 #define ACX_RX_MEM_BLOCKS     70
 #define ACX_TX_MIN_MEM_BLOCKS 40
@@ -828,7 +828,7 @@
 	u8 num_stations;
 	u8 num_ssid_profiles;
 	__le32 total_tx_descriptors;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1271_acx_mem_map {
 	struct acx_header header;
@@ -872,7 +872,7 @@
 	u8 *rx_cbuf;
 	__le32 rx_ctrl;
 	__le32 tx_ctrl;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1271_acx_rx_config_opt {
 	struct acx_header header;
@@ -882,7 +882,7 @@
 	__le16 timeout;
 	u8 queue_type;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct wl1271_acx_bet_enable {
@@ -891,7 +891,7 @@
 	u8 enable;
 	u8 max_consecutive;
 	u8 padding[2];
-} __attribute__ ((packed));
+} __packed;
 
 #define ACX_IPV4_VERSION 4
 #define ACX_IPV6_VERSION 6
@@ -905,7 +905,7 @@
 			       requests directed to this IP address will pass
 			       through. For IPv4, the first four bytes are
 			       used. */
-} __attribute__((packed));
+} __packed;
 
 struct wl1271_acx_pm_config {
 	struct acx_header header;
@@ -913,14 +913,14 @@
 	__le32 host_clk_settling_time;
 	u8 host_fast_wakeup_support;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1271_acx_keep_alive_mode {
 	struct acx_header header;
 
 	u8 enabled;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 enum {
 	ACX_KEEP_ALIVE_NO_TX = 0,
@@ -940,7 +940,7 @@
 	u8 tpl_validation;
 	u8 trigger;
 	u8 padding;
-} __attribute__ ((packed));
+} __packed;
 
 enum {
 	WL1271_ACX_TRIG_TYPE_LEVEL = 0,
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 19393e2..530678e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -212,8 +212,8 @@
 
 	gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
 
-	memcpy(gen_parms->params, wl->nvs->general_params,
-	       WL1271_NVS_GENERAL_PARAMS_SIZE);
+	memcpy(&gen_parms->general_params, &wl->nvs->general_params,
+	       sizeof(struct wl1271_ini_general_params));
 
 	ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
 	if (ret < 0)
@@ -238,13 +238,20 @@
 
 	radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
 
-	memcpy(radio_parms->stat_radio_params, wl->nvs->stat_radio_params,
-	       WL1271_NVS_STAT_RADIO_PARAMS_SIZE);
-	memcpy(radio_parms->dyn_radio_params,
-	       wl->nvs->dyn_radio_params[rparam->fem],
-	       WL1271_NVS_DYN_RADIO_PARAMS_SIZE);
+	/* 2.4GHz parameters */
+	memcpy(&radio_parms->static_params_2, &wl->nvs->stat_radio_params_2,
+	       sizeof(struct wl1271_ini_band_params_2));
+	memcpy(&radio_parms->dyn_params_2,
+	       &wl->nvs->dyn_radio_params_2[rparam->fem].params,
+	       sizeof(struct wl1271_ini_fem_params_2));
 
-	/* FIXME: current NVS is missing 5GHz parameters */
+	/* 5GHz parameters */
+	memcpy(&radio_parms->static_params_5,
+	       &wl->nvs->stat_radio_params_5,
+	       sizeof(struct wl1271_ini_band_params_5));
+	memcpy(&radio_parms->dyn_params_5,
+	       &wl->nvs->dyn_radio_params_5[rparam->fem].params,
+	       sizeof(struct wl1271_ini_fem_params_5));
 
 	wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
 		    radio_parms, sizeof(*radio_parms));
@@ -329,12 +336,6 @@
 	join->channel = wl->channel;
 	join->ssid_len = wl->ssid_len;
 	memcpy(join->ssid, wl->ssid, wl->ssid_len);
-	join->ctrl = WL1271_JOIN_CMD_CTRL_TX_FLUSH;
-
-	/* increment the session counter */
-	wl->session_counter++;
-	if (wl->session_counter >= SESSION_COUNTER_MAX)
-		wl->session_counter = 0;
 
 	join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
 
@@ -517,7 +518,7 @@
 	ps_params->send_null_data = send;
 	ps_params->retries = 5;
 	ps_params->hang_over_period = 1;
-	ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
+	ps_params->null_data_rate = cpu_to_le32(wl->basic_rate_set);
 
 	ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
 			      sizeof(*ps_params), 0);
@@ -567,7 +568,7 @@
 }
 
 int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
-		    const u8 *ie, size_t ie_len, u8 active_scan,
+		    struct cfg80211_scan_request *req, u8 active_scan,
 		    u8 high_prio, u8 band, u8 probe_requests)
 {
 
@@ -648,7 +649,7 @@
 	}
 
 	ret = wl1271_cmd_build_probe_req(wl, ssid, ssid_len,
-					 ie, ie_len, ieee_band);
+					 req->ie, req->ie_len, ieee_band);
 	if (ret < 0) {
 		wl1271_error("PROBE request template failed");
 		goto out;
@@ -684,7 +685,9 @@
 				memcpy(wl->scan.ssid, ssid, ssid_len);
 			} else
 				wl->scan.ssid_len = 0;
-		}
+			wl->scan.req = req;
+		} else
+			wl->scan.req = NULL;
 	}
 
 	ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index f2820b4..f5745d8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -42,7 +42,7 @@
 int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
 			   size_t len);
 int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
-		    const u8 *ie, size_t ie_len, u8 active_scan,
+		    struct cfg80211_scan_request *req, u8 active_scan,
 		    u8 high_prio, u8 band, u8 probe_requests);
 int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
 			    void *buf, size_t buf_len, int index, u32 rates);
@@ -136,14 +136,14 @@
 	__le16 status;
 	/* payload */
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 #define WL1271_CMD_MAX_PARAMS 572
 
 struct wl1271_command {
 	struct wl1271_cmd_header header;
 	u8  parameters[WL1271_CMD_MAX_PARAMS];
-} __attribute__ ((packed));
+} __packed;
 
 enum {
 	CMD_MAILBOX_IDLE		=  0,
@@ -196,7 +196,7 @@
 	   of this field is the Host in WRITE command or the Wilink in READ
 	   command. */
 	u8 value[MAX_READ_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 #define CMDMBOX_HEADER_LEN 4
 #define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -243,14 +243,14 @@
 	u8 ssid[IW_ESSID_MAX_SIZE];
 	u8 ctrl; /* JOIN_CMD_CTRL_* */
 	u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 struct cmd_enabledisable_path {
 	struct wl1271_cmd_header header;
 
 	u8 channel;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 #define WL1271_RATE_AUTOMATIC  0
 
@@ -266,7 +266,7 @@
 	u8 aflags;
 	u8 reserved;
 	u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 #define TIM_ELE_ID    5
 #define PARTIAL_VBM_MAX    251
@@ -278,7 +278,7 @@
 	u8 dtim_period;
 	u8 bitmap_ctrl;
 	u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */
-} __attribute__ ((packed));
+} __packed;
 
 enum wl1271_cmd_ps_mode {
 	STATION_ACTIVE_MODE,
@@ -298,7 +298,7 @@
 	  */
 	u8 hang_over_period;
 	__le32 null_data_rate;
-} __attribute__ ((packed));
+} __packed;
 
 /* HW encryption keys */
 #define NUM_ACCESS_CATEGORIES_COPY 4
@@ -348,7 +348,7 @@
 	u8 key[MAX_KEY_SIZE];
 	__le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
 	__le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
-} __attribute__ ((packed));
+} __packed;
 
 
 #define WL1271_SCAN_MAX_CHANNELS       24
@@ -385,7 +385,7 @@
 	u8 use_ssid_list;
 	u8 scan_tag;
 	u8 padding2;
-} __attribute__ ((packed));
+} __packed;
 
 struct basic_scan_channel_params {
 	/* Duration in TU to wait for frames on a channel for active scan */
@@ -400,25 +400,25 @@
 	u8 dfs_candidate;
 	u8 activity_detected;
 	u8 pad;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1271_cmd_scan {
 	struct wl1271_cmd_header header;
 
 	struct basic_scan_params params;
 	struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1271_cmd_trigger_scan_to {
 	struct wl1271_cmd_header header;
 
 	__le32 timeout;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1271_cmd_test_header {
 	u8 id;
 	u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 enum wl1271_channel_tune_bands {
 	WL1271_CHANNEL_TUNE_BAND_2_4,
@@ -439,25 +439,31 @@
 
 	struct wl1271_cmd_test_header test;
 
-	u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE];
-	s8 reserved[23];
-} __attribute__ ((packed));
+	struct wl1271_ini_general_params general_params;
 
-#define WL1271_STAT_RADIO_PARAMS_5_SIZE    29
-#define WL1271_DYN_RADIO_PARAMS_5_SIZE    104
+	u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+	u8 sr_sen_n_p;
+	u8 sr_sen_n_p_gain;
+	u8 sr_sen_nrn;
+	u8 sr_sen_prn;
+	u8 padding[3];
+} __packed;
 
 struct wl1271_radio_parms_cmd {
 	struct wl1271_cmd_header header;
 
 	struct wl1271_cmd_test_header test;
 
-	u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE];
-	u8 stat_radio_params_5[WL1271_STAT_RADIO_PARAMS_5_SIZE];
+	/* Static radio parameters */
+	struct wl1271_ini_band_params_2 static_params_2;
+	struct wl1271_ini_band_params_5 static_params_5;
 
-	u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE];
-	u8 reserved;
-	u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE];
-} __attribute__ ((packed));
+	/* Dynamic radio parameters */
+	struct wl1271_ini_fem_params_2 dyn_params_2;
+	u8 padding2;
+	struct wl1271_ini_fem_params_5 dyn_params_5;
+	u8 padding3[2];
+} __packed;
 
 struct wl1271_cmd_cal_channel_tune {
 	struct wl1271_cmd_header header;
@@ -468,7 +474,7 @@
 	u8 channel;
 
 	__le16 radio_status;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1271_cmd_cal_update_ref_point {
 	struct wl1271_cmd_header header;
@@ -479,7 +485,7 @@
 	__le32 ref_detector;
 	u8  sub_band;
 	u8  padding[3];
-} __attribute__ ((packed));
+} __packed;
 
 #define MAX_TLV_LENGTH         400
 #define	MAX_NVS_VERSION_LENGTH 12
@@ -501,7 +507,7 @@
 
 	u8  sub_band_mask;
 	u8  padding2;
-} __attribute__ ((packed));
+} __packed;
 
 
 /*
@@ -529,6 +535,6 @@
 	u8  type;
 
 	u8  padding;
-} __attribute__ ((packed));
+} __packed;
 
 #endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index cf37aa6..ca52cde 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -43,11 +43,11 @@
 			clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
 			/* FIXME: ie missing! */
 			wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
-						NULL, 0,
-						wl->scan.active,
-						wl->scan.high_prio,
-						WL1271_SCAN_BAND_5_GHZ,
-						wl->scan.probe_requests);
+					wl->scan.req,
+					wl->scan.active,
+					wl->scan.high_prio,
+					WL1271_SCAN_BAND_5_GHZ,
+					wl->scan.probe_requests);
 		} else {
 			mutex_unlock(&wl->mutex);
 			ieee80211_scan_completed(wl->hw, false);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 5837100..43d5aea 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -85,7 +85,7 @@
 	__le32 report_1;
 	__le32 report_2;
 	__le32 report_3;
-} __attribute__ ((packed));
+} __packed;
 
 #define NUM_OF_RSSI_SNR_TRIGGERS 8
 
@@ -116,7 +116,7 @@
 	u8 ps_status;
 
 	u8 reserved_5[29];
-} __attribute__ ((packed));
+} __packed;
 
 int wl1271_event_unmask(struct wl1271 *wl);
 void wl1271_event_mbox_config(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ini.h b/drivers/net/wireless/wl12xx/wl1271_ini.h
new file mode 100644
index 0000000..2313047
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_ini.h
@@ -0,0 +1,123 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL1271_INI_H__
+#define __WL1271_INI_H__
+
+#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16
+
+struct wl1271_ini_general_params {
+	u8 ref_clock;
+	u8 settling_time;
+	u8 clk_valid_on_wakeup;
+	u8 dc2dc_mode;
+	u8 dual_mode_select;
+	u8 tx_bip_fem_auto_detect;
+	u8 tx_bip_fem_manufacturer;
+	u8 general_settings;
+	u8 sr_state;
+	u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+	u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+	u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+} __packed;
+
+#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15
+
+struct wl1271_ini_band_params_2 {
+	u8 rx_trace_insertion_loss;
+	u8 tx_trace_loss;
+	u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
+} __packed;
+
+#define WL1271_INI_RATE_GROUP_COUNT 6
+#define WL1271_INI_CHANNEL_COUNT_2 14
+
+struct wl1271_ini_fem_params_2 {
+	__le16 tx_bip_ref_pd_voltage;
+	u8 tx_bip_ref_power;
+	u8 tx_bip_ref_offset;
+	u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT];
+	u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT];
+	u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT];
+	u8 tx_per_chan_pwr_limits_11b[WL1271_INI_CHANNEL_COUNT_2];
+	u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_2];
+	u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT];
+	u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT];
+	u8 rx_fem_insertion_loss;
+	u8 degraded_low_to_normal_thr;
+	u8 normal_to_degraded_high_thr;
+} __packed;
+
+#define WL1271_INI_CHANNEL_COUNT_5 35
+#define WL1271_INI_SUB_BAND_COUNT_5 7
+
+struct wl1271_ini_band_params_5 {
+	u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
+	u8 tx_trace_loss[WL1271_INI_SUB_BAND_COUNT_5];
+	u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
+} __packed;
+
+struct wl1271_ini_fem_params_5 {
+	__le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5];
+	u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5];
+	u8 tx_bip_ref_offset[WL1271_INI_SUB_BAND_COUNT_5];
+	u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT];
+	u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT];
+	u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT];
+	u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_5];
+	u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT];
+	u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT];
+	u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
+	u8 degraded_low_to_normal_thr;
+	u8 normal_to_degraded_high_thr;
+} __packed;
+
+
+/* NVS data structure */
+#define WL1271_INI_NVS_SECTION_SIZE		     468
+#define WL1271_INI_FEM_MODULE_COUNT                  2
+
+#define WL1271_INI_LEGACY_NVS_FILE_SIZE              800
+
+struct wl1271_nvs_file {
+	/* NVS section */
+	u8 nvs[WL1271_INI_NVS_SECTION_SIZE];
+
+	/* INI section */
+	struct wl1271_ini_general_params general_params;
+	u8 padding1;
+	struct wl1271_ini_band_params_2 stat_radio_params_2;
+	u8 padding2;
+	struct {
+		struct wl1271_ini_fem_params_2 params;
+		u8 padding;
+	} dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT];
+	struct wl1271_ini_band_params_5 stat_radio_params_5;
+	u8 padding3;
+	struct {
+		struct wl1271_ini_fem_params_5 params;
+		u8 padding;
+	} dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b7d9137..7a14da5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -566,14 +566,21 @@
 		return ret;
 	}
 
-	if (fw->size != sizeof(struct wl1271_nvs_file)) {
+	/*
+	 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
+	 * configurations) can be removed when those NVS files stop floating
+	 * around.
+	 */
+	if (fw->size != sizeof(struct wl1271_nvs_file) &&
+	    (fw->size != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
+	     wl1271_11a_enabled())) {
 		wl1271_error("nvs size is not as expected: %zu != %zu",
 			     fw->size, sizeof(struct wl1271_nvs_file));
 		ret = -EILSEQ;
 		goto out;
 	}
 
-	wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+	wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
 
 	if (!wl->nvs) {
 		wl1271_error("could not allocate memory for the nvs file");
@@ -581,7 +588,7 @@
 		goto out;
 	}
 
-	memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
+	memcpy(wl->nvs, fw->data, fw->size);
 
 out:
 	release_firmware(fw);
@@ -1044,7 +1051,7 @@
 	mutex_lock(&wl->mutex);
 
 	/* let's notify MAC80211 about the remaining pending TX frames */
-	wl1271_tx_flush(wl);
+	wl1271_tx_reset(wl);
 	wl1271_power_off(wl);
 
 	memset(wl->bssid, 0, ETH_ALEN);
@@ -1241,6 +1248,42 @@
 	return rate;
 }
 
+static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
+{
+	int ret;
+
+	if (idle) {
+		if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+			ret = wl1271_unjoin(wl);
+			if (ret < 0)
+				goto out;
+		}
+		wl->rate_set = wl1271_min_rate_get(wl);
+		wl->sta_rate_set = 0;
+		ret = wl1271_acx_rate_policies(wl);
+		if (ret < 0)
+			goto out;
+		ret = wl1271_acx_keep_alive_config(
+			wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+			ACX_KEEP_ALIVE_TPL_INVALID);
+		if (ret < 0)
+			goto out;
+		set_bit(WL1271_FLAG_IDLE, &wl->flags);
+	} else {
+		/* increment the session counter */
+		wl->session_counter++;
+		if (wl->session_counter >= SESSION_COUNTER_MAX)
+			wl->session_counter = 0;
+		ret = wl1271_dummy_join(wl);
+		if (ret < 0)
+			goto out;
+		clear_bit(WL1271_FLAG_IDLE, &wl->flags);
+	}
+
+out:
+	return ret;
+}
+
 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
 {
 	struct wl1271 *wl = hw->priv;
@@ -1255,6 +1298,15 @@
 		     conf->power_level,
 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
 
+	/*
+	 * mac80211 will go to idle nearly immediately after transmitting some
+	 * frames, such as the deauth. To make sure those frames reach the air,
+	 * wait here until the TX queue is fully flushed.
+	 */
+	if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+	    (conf->flags & IEEE80211_CONF_IDLE))
+		wl1271_tx_flush(wl);
+
 	mutex_lock(&wl->mutex);
 
 	if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -1295,22 +1347,9 @@
 	}
 
 	if (changed & IEEE80211_CONF_CHANGE_IDLE) {
-		if (conf->flags & IEEE80211_CONF_IDLE &&
-		    test_bit(WL1271_FLAG_JOINED, &wl->flags))
-			wl1271_unjoin(wl);
-		else if (!(conf->flags & IEEE80211_CONF_IDLE))
-			wl1271_dummy_join(wl);
-
-		if (conf->flags & IEEE80211_CONF_IDLE) {
-			wl->rate_set = wl1271_min_rate_get(wl);
-			wl->sta_rate_set = 0;
-			wl1271_acx_rate_policies(wl);
-			wl1271_acx_keep_alive_config(
-				wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
-				ACX_KEEP_ALIVE_TPL_INVALID);
-			set_bit(WL1271_FLAG_IDLE, &wl->flags);
-		} else
-			clear_bit(WL1271_FLAG_IDLE, &wl->flags);
+		ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE);
+		if (ret < 0)
+			wl1271_warning("idle mode change failed %d", ret);
 	}
 
 	if (conf->flags & IEEE80211_CONF_PS &&
@@ -1595,13 +1634,11 @@
 		goto out;
 
 	if (wl1271_11a_enabled())
-		ret = wl1271_cmd_scan(hw->priv, ssid, len,
-				      req->ie, req->ie_len, 1, 0,
-				      WL1271_SCAN_BAND_DUAL, 3);
+		ret = wl1271_cmd_scan(hw->priv, ssid, len, req,
+				      1, 0, WL1271_SCAN_BAND_DUAL, 3);
 	else
-		ret = wl1271_cmd_scan(hw->priv, ssid, len,
-				      req->ie, req->ie_len, 1, 0,
-				      WL1271_SCAN_BAND_2_4_GHZ, 3);
+		ret = wl1271_cmd_scan(hw->priv, ssid, len, req,
+				      1, 0, WL1271_SCAN_BAND_2_4_GHZ, 3);
 
 	wl1271_ps_elp_sleep(wl);
 
@@ -1991,7 +2028,7 @@
 };
 
 /* mapping to indexes for wl1271_rates */
-const static u8 wl1271_rate_to_idx_2ghz[] = {
+static const u8 wl1271_rate_to_idx_2ghz[] = {
 	/* MCS rates are used only with 11n */
 	CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
 	CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
@@ -2103,7 +2140,7 @@
 };
 
 /* mapping to indexes for wl1271_rates_5ghz */
-const static u8 wl1271_rate_to_idx_5ghz[] = {
+static const u8 wl1271_rate_to_idx_5ghz[] = {
 	/* MCS rates are used only with 11n */
 	CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
 	CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
@@ -2139,7 +2176,7 @@
 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
 };
 
-const static u8 *wl1271_band_rate_to_idx[] = {
+static const u8 *wl1271_band_rate_to_idx[] = {
 	[IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
 	[IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
 };
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index b89be47..13a2323 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -113,7 +113,7 @@
 	u8  process_id;
 	u8  pad_len;
 	u8  reserved;
-} __attribute__ ((packed));
+} __packed;
 
 void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
 u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
index d3d6f30..7059b5c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -28,7 +28,7 @@
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/card.h>
-#include <plat/gpio.h>
+#include <linux/gpio.h>
 
 #include "wl1271.h"
 #include "wl12xx_80211.h"
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
index 554deb4..6e0952f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -199,7 +199,14 @@
 	buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
 	len = nla_len(tb[WL1271_TM_ATTR_DATA]);
 
-	if (len != sizeof(struct wl1271_nvs_file)) {
+	/*
+	 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
+	 * configurations) can be removed when those NVS files stop floating
+	 * around.
+	 */
+	if (len != sizeof(struct wl1271_nvs_file) &&
+	    (len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
+	     wl1271_11a_enabled())) {
 		wl1271_error("nvs size is not as expected: %zu != %zu",
 			     len, sizeof(struct wl1271_nvs_file));
 		return -EMSGSIZE;
@@ -209,7 +216,7 @@
 
 	kfree(wl->nvs);
 
-	wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+	wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
 	if (!wl->nvs) {
 		wl1271_error("could not allocate memory for the nvs file");
 		ret = -ENOMEM;
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 62db795..c592cc2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -36,6 +36,7 @@
 	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
 		if (wl->tx_frames[i] == NULL) {
 			wl->tx_frames[i] = skb;
+			wl->tx_frames_cnt++;
 			return i;
 		}
 
@@ -73,8 +74,10 @@
 		wl1271_debug(DEBUG_TX,
 			     "tx_allocate: size: %d, blocks: %d, id: %d",
 			     total_len, total_blocks, id);
-	} else
+	} else {
 		wl->tx_frames[id] = NULL;
+		wl->tx_frames_cnt--;
+	}
 
 	return ret;
 }
@@ -358,6 +361,7 @@
 	/* return the packet to the stack */
 	ieee80211_tx_status(wl->hw, skb);
 	wl->tx_frames[result->id] = NULL;
+	wl->tx_frames_cnt--;
 }
 
 /* Called upon reception of a TX complete interrupt */
@@ -412,7 +416,7 @@
 }
 
 /* caller must hold wl->mutex */
-void wl1271_tx_flush(struct wl1271 *wl)
+void wl1271_tx_reset(struct wl1271 *wl)
 {
 	int i;
 	struct sk_buff *skb;
@@ -421,7 +425,7 @@
 /* 	control->flags = 0; FIXME */
 
 	while ((skb = skb_dequeue(&wl->tx_queue))) {
-		wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb);
+		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
 		ieee80211_tx_status(wl->hw, skb);
 	}
 
@@ -429,6 +433,32 @@
 		if (wl->tx_frames[i] != NULL) {
 			skb = wl->tx_frames[i];
 			wl->tx_frames[i] = NULL;
+			wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
 			ieee80211_tx_status(wl->hw, skb);
 		}
+	wl->tx_frames_cnt = 0;
+}
+
+#define WL1271_TX_FLUSH_TIMEOUT 500000
+
+/* caller must *NOT* hold wl->mutex */
+void wl1271_tx_flush(struct wl1271 *wl)
+{
+	unsigned long timeout;
+	timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
+
+	while (!time_after(jiffies, timeout)) {
+		mutex_lock(&wl->mutex);
+		wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
+			     wl->tx_frames_cnt);
+		if ((wl->tx_frames_cnt == 0) &&
+		    skb_queue_empty(&wl->tx_queue)) {
+			mutex_unlock(&wl->mutex);
+			return;
+		}
+		mutex_unlock(&wl->mutex);
+		msleep(1);
+	}
+
+	wl1271_warning("Unable to flush all TX buffers, timed out.");
 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 3b8b7ac..48bf926 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -80,7 +80,7 @@
 	/* Identifier of the remote STA in IBSS, 1 in infra-BSS */
 	u8 aid;
 	u8 reserved;
-} __attribute__ ((packed));
+} __packed;
 
 enum wl1271_tx_hw_res_status {
 	TX_SUCCESS          = 0,
@@ -115,13 +115,13 @@
 	u8 rate_class_index;
 	/* for 4-byte alignment. */
 	u8 spare;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl1271_tx_hw_res_if {
 	__le32 tx_result_fw_counter;
 	__le32 tx_result_host_counter;
 	struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
-} __attribute__ ((packed));
+} __packed;
 
 static inline int wl1271_tx_get_queue(int queue)
 {
@@ -158,6 +158,7 @@
 
 void wl1271_tx_work(struct work_struct *work);
 void wl1271_tx_complete(struct wl1271 *wl);
+void wl1271_tx_reset(struct wl1271 *wl);
 void wl1271_tx_flush(struct wl1271 *wl);
 u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 055d7bc..1846280 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -66,41 +66,41 @@
 	u8 bssid[ETH_ALEN];
 	__le16 seq_ctl;
 	u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl12xx_ie_header {
 	u8 id;
 	u8 len;
-} __attribute__ ((packed));
+} __packed;
 
 /* IEs */
 
 struct wl12xx_ie_ssid {
 	struct wl12xx_ie_header header;
 	char ssid[IW_ESSID_MAX_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl12xx_ie_rates {
 	struct wl12xx_ie_header header;
 	u8 rates[MAX_SUPPORTED_RATES];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl12xx_ie_ds_params {
 	struct wl12xx_ie_header header;
 	u8 channel;
-} __attribute__ ((packed));
+} __packed;
 
 struct country_triplet {
 	u8 channel;
 	u8 num_channels;
 	u8 max_tx_power;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl12xx_ie_country {
 	struct wl12xx_ie_header header;
 	u8 country_string[COUNTRY_STRING_LEN];
 	struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
-} __attribute__ ((packed));
+} __packed;
 
 
 /* Templates */
@@ -115,30 +115,30 @@
 	struct wl12xx_ie_rates ext_rates;
 	struct wl12xx_ie_ds_params ds_params;
 	struct wl12xx_ie_country country;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl12xx_null_data_template {
 	struct ieee80211_header header;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl12xx_ps_poll_template {
 	__le16 fc;
 	__le16 aid;
 	u8 bssid[ETH_ALEN];
 	u8 ta[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct wl12xx_qos_null_data_template {
 	struct ieee80211_header header;
 	__le16 qos_ctl;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl12xx_probe_req_template {
 	struct ieee80211_header header;
 	struct wl12xx_ie_ssid ssid;
 	struct wl12xx_ie_rates rates;
 	struct wl12xx_ie_rates ext_rates;
-} __attribute__ ((packed));
+} __packed;
 
 
 struct wl12xx_probe_resp_template {
@@ -151,6 +151,6 @@
 	struct wl12xx_ie_rates ext_rates;
 	struct wl12xx_ie_ds_params ds_params;
 	struct wl12xx_ie_country country;
-} __attribute__ ((packed));
+} __packed;
 
 #endif
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8816e37..3fbfd19 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -231,12 +231,12 @@
 		  but sizeof(enum) > sizeof(u8) :-( */
 	u8 len;
 	u8 data[0];
-} __attribute__ ((packed));
+} __packed;
 
 struct iw_mgmt_essid_pset {
 	struct iw_mgmt_info_element el;
 	u8 			    essid[IW_ESSID_MAX_SIZE];
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly
@@ -247,12 +247,12 @@
 struct iw_mgmt_data_rset {
 	struct iw_mgmt_info_element el;
 	u8 			    data_rate_labels[IW_DATA_RATE_MAX_LABELS];
-} __attribute__ ((packed));
+} __packed;
 
 struct iw_mgmt_ds_pset {
 	struct iw_mgmt_info_element el;
 	u8 			    chan;
-} __attribute__ ((packed));
+} __packed;
 
 struct iw_mgmt_cf_pset {
 	struct iw_mgmt_info_element el;
@@ -260,12 +260,12 @@
 	u8 			    cfp_period;
 	u16 			    cfp_max_duration;
 	u16 			    cfp_dur_remaining;
-} __attribute__ ((packed));
+} __packed;
 
 struct iw_mgmt_ibss_pset {
 	struct iw_mgmt_info_element el;
 	u16 			    atim_window;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl3501_tx_hdr {
 	u16	tx_cnt;
@@ -544,12 +544,12 @@
 	u8	service;
 	u16	len;
 	u16	crc16;
-} __attribute__ ((packed));
+} __packed;
 
 struct wl3501_80211_tx_hdr {
 	struct wl3501_80211_tx_plcp_hdr	pclp_hdr;
 	struct ieee80211_hdr		mac_hdr;
-} __attribute__ ((packed));
+} __packed;
 
 /*
    Reserve the beginning Tx space for descriptor use.
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index b0b6660..43307bd 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -42,7 +42,8 @@
 	{ ZD_REGDOMAIN_IC, "CA" },
 	{ ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
 	{ ZD_REGDOMAIN_JAPAN, "JP" },
-	{ ZD_REGDOMAIN_JAPAN_ADD, "JP" },
+	{ ZD_REGDOMAIN_JAPAN_2, "JP" },
+	{ ZD_REGDOMAIN_JAPAN_3, "JP" },
 	{ ZD_REGDOMAIN_SPAIN, "ES" },
 	{ ZD_REGDOMAIN_FRANCE, "FR" },
 };
@@ -855,7 +856,7 @@
 	if (skb == NULL)
 		return -ENOMEM;
 	if (need_padding) {
-		/* Make sure the the payload data is 4 byte aligned. */
+		/* Make sure the payload data is 4 byte aligned. */
 		skb_reserve(skb, 2);
 	}
 
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 630c298..a6d86b9 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -35,7 +35,7 @@
 	__le16 current_length;
 	u8     service;
 	__le16  next_frame_length;
-} __attribute__((packed));
+} __packed;
 
 #define ZD_CS_RESERVED_SIZE	25
 
@@ -106,7 +106,7 @@
 struct rx_length_info {
 	__le16 length[3];
 	__le16 tag;
-} __attribute__((packed));
+} __packed;
 
 #define RX_LENGTH_INFO_TAG		0x697e
 
@@ -117,7 +117,7 @@
 	u8 signal_quality_ofdm;
 	u8 decryption_type;
 	u8 frame_status;
-} __attribute__((packed));
+} __packed;
 
 /* rx_status field decryption_type */
 #define ZD_RX_NO_WEP	0
@@ -153,7 +153,7 @@
 	u8 mac[ETH_ALEN];
 	u8 retry;
 	u8 failure;
-} __attribute__((packed));
+} __packed;
 
 enum mac_flags {
 	MAC_FIXED_CHANNEL = 0x01,
@@ -212,8 +212,9 @@
 #define ZD_REGDOMAIN_ETSI	0x30
 #define ZD_REGDOMAIN_SPAIN	0x31
 #define ZD_REGDOMAIN_FRANCE	0x32
-#define ZD_REGDOMAIN_JAPAN_ADD	0x40
+#define ZD_REGDOMAIN_JAPAN_2	0x40
 #define ZD_REGDOMAIN_JAPAN	0x41
+#define ZD_REGDOMAIN_JAPAN_3	0x49
 
 enum {
 	MIN_CHANNEL24 = 1,
@@ -225,7 +226,7 @@
 struct ofdm_plcp_header {
 	u8 prefix[3];
 	__le16 service;
-} __attribute__((packed));
+} __packed;
 
 static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
 {
@@ -252,7 +253,7 @@
 	u8 service;
 	__le16 length;
 	__le16 crc16;
-} __attribute__((packed));
+} __packed;
 
 static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
 {
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index c257940..818e1480 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -844,7 +844,7 @@
  * @usb: a &struct zd_usb pointer
  * @urb: URB to be freed
  *
- * Frees the the transmission URB, which means to put it on the free URB
+ * Frees the transmission URB, which means to put it on the free URB
  * list.
  */
 static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 049f8b9..1b1655c 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -79,17 +79,17 @@
 struct usb_req_read_regs {
 	__le16 id;
 	__le16 addr[0];
-} __attribute__((packed));
+} __packed;
 
 struct reg_data {
 	__le16 addr;
 	__le16 value;
-} __attribute__((packed));
+} __packed;
 
 struct usb_req_write_regs {
 	__le16 id;
 	struct reg_data reg_writes[0];
-} __attribute__((packed));
+} __packed;
 
 enum {
 	RF_IF_LE = 0x02,
@@ -106,7 +106,7 @@
 	/* RF2595: 24 */
 	__le16 bit_values[0];
 	/* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
-} __attribute__((packed));
+} __packed;
 
 /* USB interrupt */
 
@@ -123,12 +123,12 @@
 struct usb_int_header {
 	u8 type;	/* must always be 1 */
 	u8 id;
-} __attribute__((packed));
+} __packed;
 
 struct usb_int_regs {
 	struct usb_int_header hdr;
 	struct reg_data regs[0];
-} __attribute__((packed));
+} __packed;
 
 struct usb_int_retry_fail {
 	struct usb_int_header hdr;
@@ -136,7 +136,7 @@
 	u8 _dummy;
 	u8 addr[ETH_ALEN];
 	u8 ibss_wakeup_dest;
-} __attribute__((packed));
+} __packed;
 
 struct read_regs_int {
 	struct completion completion;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d504e2b..b50fedc 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1621,6 +1621,7 @@
 		if (xennet_connect(netdev) != 0)
 			break;
 		xenbus_switch_state(dev, XenbusStateConnected);
+		netif_notify_peers(netdev);
 		break;
 
 	case XenbusStateClosing:
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 188bc84..18dff43 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -355,12 +355,13 @@
 	rcu_read_lock();
 	for_each_netdev_rcu(&init_net, dev) {
 	    const struct net_device_stats *stats;
+	    struct rtnl_link_stats64 temp;
 	    struct in_device *in_dev = __in_dev_get_rcu(dev);
 	    if (!in_dev || !in_dev->ifa_list)
 		continue;
 	    if (ipv4_is_loopback(in_dev->ifa_list->ifa_local))
 		continue;
-	    stats = dev_get_stats(dev);
+	    stats = dev_get_stats(dev, &temp);
 	    rx_total += stats->rx_packets;
 	    tx_total += stats->tx_packets;
 	}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 7a44c38..d798927 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -40,11 +40,7 @@
  */
 enum qeth_dbf_names {
 	QETH_DBF_SETUP,
-	QETH_DBF_QERR,
-	QETH_DBF_TRACE,
 	QETH_DBF_MSG,
-	QETH_DBF_SENSE,
-	QETH_DBF_MISC,
 	QETH_DBF_CTRL,
 	QETH_DBF_INFOS	/* must be last element */
 };
@@ -71,7 +67,19 @@
 	debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
 
 #define QETH_DBF_TEXT_(name, level, text...) \
-	qeth_dbf_longtext(QETH_DBF_##name, level, text)
+	qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_CARD_TEXT(card, level, text) \
+	debug_text_event(card->debug, level, text)
+
+#define QETH_CARD_HEX(card, level, addr, len) \
+	debug_event(card->debug, level, (void *)(addr), len)
+
+#define QETH_CARD_MESSAGE(card, text...) \
+	debug_sprintf_event(card->debug, level, text)
+
+#define QETH_CARD_TEXT_(card, level, text...) \
+	qeth_dbf_longtext(card->debug, level, text)
 
 #define SENSE_COMMAND_REJECT_BYTE 0
 #define SENSE_COMMAND_REJECT_FLAG 0x80
@@ -738,6 +746,7 @@
 	atomic_t force_alloc_skb;
 	struct service_level qeth_service_level;
 	struct qdio_ssqd_desc ssqd;
+	debug_info_t *debug;
 	struct mutex conf_mutex;
 };
 
@@ -857,9 +866,10 @@
 				struct ethtool_stats *, u64 *);
 void qeth_core_get_strings(struct net_device *, u32, u8 *);
 void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
-void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
+void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
 int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
 int qeth_set_access_ctrl_online(struct qeth_card *card);
+int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
 
 /* exports for OSN */
 int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 13ef46b..b701906 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -32,16 +32,8 @@
 	/*                   N  P  A    M  L  V                      H  */
 	[QETH_DBF_SETUP] = {"qeth_setup",
 				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
-	[QETH_DBF_QERR]  = {"qeth_qerr",
-				2, 1,   8, 2, &debug_hex_ascii_view, NULL},
-	[QETH_DBF_TRACE] = {"qeth_trace",
-				4, 1,   8, 3, &debug_hex_ascii_view, NULL},
 	[QETH_DBF_MSG]   = {"qeth_msg",
 				8, 1, 128, 3, &debug_sprintf_view,   NULL},
-	[QETH_DBF_SENSE] = {"qeth_sense",
-				2, 1,  64, 2, &debug_hex_ascii_view, NULL},
-	[QETH_DBF_MISC]	 = {"qeth_misc",
-				2, 1, 256, 2, &debug_hex_ascii_view, NULL},
 	[QETH_DBF_CTRL]  = {"qeth_control",
 		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
 };
@@ -65,48 +57,6 @@
 static int qeth_qdio_establish(struct qeth_card *);
 
 
-static inline void __qeth_fill_buffer_frag(struct sk_buff *skb,
-		struct qdio_buffer *buffer, int is_tso,
-		int *next_element_to_fill)
-{
-	struct skb_frag_struct *frag;
-	int fragno;
-	unsigned long addr;
-	int element, cnt, dlen;
-
-	fragno = skb_shinfo(skb)->nr_frags;
-	element = *next_element_to_fill;
-	dlen = 0;
-
-	if (is_tso)
-		buffer->element[element].flags =
-			SBAL_FLAGS_MIDDLE_FRAG;
-	else
-		buffer->element[element].flags =
-			SBAL_FLAGS_FIRST_FRAG;
-	dlen = skb->len - skb->data_len;
-	if (dlen) {
-		buffer->element[element].addr = skb->data;
-		buffer->element[element].length = dlen;
-		element++;
-	}
-	for (cnt = 0; cnt < fragno; cnt++) {
-		frag = &skb_shinfo(skb)->frags[cnt];
-		addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
-			frag->page_offset;
-		buffer->element[element].addr = (char *)addr;
-		buffer->element[element].length = frag->size;
-		if (cnt < (fragno - 1))
-			buffer->element[element].flags =
-				SBAL_FLAGS_MIDDLE_FRAG;
-		else
-			buffer->element[element].flags =
-				SBAL_FLAGS_LAST_FRAG;
-		element++;
-	}
-	*next_element_to_fill = element;
-}
-
 static inline const char *qeth_get_cardname(struct qeth_card *card)
 {
 	if (card->info.guestlan) {
@@ -232,7 +182,7 @@
 {
 	struct qeth_buffer_pool_entry *pool_entry, *tmp;
 
-	QETH_DBF_TEXT(TRACE, 5, "clwrklst");
+	QETH_CARD_TEXT(card, 5, "clwrklst");
 	list_for_each_entry_safe(pool_entry, tmp,
 			    &card->qdio.in_buf_pool.entry_list, list){
 			list_del(&pool_entry->list);
@@ -246,7 +196,7 @@
 	void *ptr;
 	int i, j;
 
-	QETH_DBF_TEXT(TRACE, 5, "alocpool");
+	QETH_CARD_TEXT(card, 5, "alocpool");
 	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
 		pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
 		if (!pool_entry) {
@@ -273,7 +223,7 @@
 
 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
 {
-	QETH_DBF_TEXT(TRACE, 2, "realcbp");
+	QETH_CARD_TEXT(card, 2, "realcbp");
 
 	if ((card->state != CARD_STATE_DOWN) &&
 	    (card->state != CARD_STATE_RECOVER))
@@ -293,7 +243,7 @@
 	int rc;
 	struct qeth_cmd_buffer *iob;
 
-	QETH_DBF_TEXT(TRACE, 5, "issnxrd");
+	QETH_CARD_TEXT(card, 5, "issnxrd");
 	if (card->read.state != CH_STATE_UP)
 		return -EIO;
 	iob = qeth_get_buffer(&card->read);
@@ -305,7 +255,7 @@
 		return -ENOMEM;
 	}
 	qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
-	QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
+	QETH_CARD_TEXT(card, 6, "noirqpnd");
 	rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
 			      (addr_t) iob, 0, 0);
 	if (rc) {
@@ -364,7 +314,7 @@
 {
 	struct qeth_ipa_cmd *cmd = NULL;
 
-	QETH_DBF_TEXT(TRACE, 5, "chkipad");
+	QETH_CARD_TEXT(card, 5, "chkipad");
 	if (IS_IPA(iob->data)) {
 		cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
 		if (IS_IPA_REPLY(cmd)) {
@@ -400,10 +350,10 @@
 			case IPA_CMD_MODCCID:
 				return cmd;
 			case IPA_CMD_REGISTER_LOCAL_ADDR:
-				QETH_DBF_TEXT(TRACE, 3, "irla");
+				QETH_CARD_TEXT(card, 3, "irla");
 				break;
 			case IPA_CMD_UNREGISTER_LOCAL_ADDR:
-				QETH_DBF_TEXT(TRACE, 3, "urla");
+				QETH_CARD_TEXT(card, 3, "urla");
 				break;
 			default:
 				QETH_DBF_MESSAGE(2, "Received data is IPA "
@@ -420,7 +370,7 @@
 	struct qeth_reply *reply, *r;
 	unsigned long flags;
 
-	QETH_DBF_TEXT(TRACE, 4, "clipalst");
+	QETH_CARD_TEXT(card, 4, "clipalst");
 
 	spin_lock_irqsave(&card->lock, flags);
 	list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
@@ -448,9 +398,9 @@
 			   buffer[4],
 			   ((buffer[4] == 0x22) ?
 			    " -- try another portname" : ""));
-		QETH_DBF_TEXT(TRACE, 2, "ckidxres");
-		QETH_DBF_TEXT(TRACE, 2, " idxterm");
-		QETH_DBF_TEXT_(TRACE, 2, "  rc%d", -EIO);
+		QETH_CARD_TEXT(card, 2, "ckidxres");
+		QETH_CARD_TEXT(card, 2, " idxterm");
+		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
 		if (buffer[4] == 0xf6) {
 			dev_err(&card->gdev->dev,
 			"The qeth device is not configured "
@@ -467,8 +417,8 @@
 {
 	struct qeth_card *card;
 
-	QETH_DBF_TEXT(TRACE, 4, "setupccw");
 	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 4, "setupccw");
 	if (channel == &card->read)
 		memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
 	else
@@ -481,7 +431,7 @@
 {
 	__u8 index;
 
-	QETH_DBF_TEXT(TRACE, 6, "getbuff");
+	QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
 	index = channel->io_buf_no;
 	do {
 		if (channel->iob[index].state == BUF_STATE_FREE) {
@@ -502,7 +452,7 @@
 {
 	unsigned long flags;
 
-	QETH_DBF_TEXT(TRACE, 6, "relbuff");
+	QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
 	spin_lock_irqsave(&channel->iob_lock, flags);
 	memset(iob->data, 0, QETH_BUFSIZE);
 	iob->state = BUF_STATE_FREE;
@@ -553,9 +503,8 @@
 	int keep_reply;
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 4, "sndctlcb");
-
 	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 4, "sndctlcb");
 	rc = qeth_check_idx_response(card, iob->data);
 	switch (rc) {
 	case 0:
@@ -563,6 +512,7 @@
 	case -EIO:
 		qeth_clear_ipacmd_list(card);
 		qeth_schedule_recovery(card);
+		/* fall through */
 	default:
 		goto out;
 	}
@@ -722,7 +672,7 @@
 
 void qeth_schedule_recovery(struct qeth_card *card)
 {
-	QETH_DBF_TEXT(TRACE, 2, "startrec");
+	QETH_CARD_TEXT(card, 2, "startrec");
 	if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
 		schedule_work(&card->kernel_thread_starter);
 }
@@ -732,15 +682,17 @@
 {
 	int dstat, cstat;
 	char *sense;
+	struct qeth_card *card;
 
 	sense = (char *) irb->ecw;
 	cstat = irb->scsw.cmd.cstat;
 	dstat = irb->scsw.cmd.dstat;
+	card = CARD_FROM_CDEV(cdev);
 
 	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
 		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
 		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
-		QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
+		QETH_CARD_TEXT(card, 2, "CGENCHK");
 		dev_warn(&cdev->dev, "The qeth device driver "
 			"failed to recover an error on the device\n");
 		QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
@@ -753,23 +705,23 @@
 	if (dstat & DEV_STAT_UNIT_CHECK) {
 		if (sense[SENSE_RESETTING_EVENT_BYTE] &
 		    SENSE_RESETTING_EVENT_FLAG) {
-			QETH_DBF_TEXT(TRACE, 2, "REVIND");
+			QETH_CARD_TEXT(card, 2, "REVIND");
 			return 1;
 		}
 		if (sense[SENSE_COMMAND_REJECT_BYTE] &
 		    SENSE_COMMAND_REJECT_FLAG) {
-			QETH_DBF_TEXT(TRACE, 2, "CMDREJi");
+			QETH_CARD_TEXT(card, 2, "CMDREJi");
 			return 1;
 		}
 		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
-			QETH_DBF_TEXT(TRACE, 2, "AFFE");
+			QETH_CARD_TEXT(card, 2, "AFFE");
 			return 1;
 		}
 		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
-			QETH_DBF_TEXT(TRACE, 2, "ZEROSEN");
+			QETH_CARD_TEXT(card, 2, "ZEROSEN");
 			return 0;
 		}
-		QETH_DBF_TEXT(TRACE, 2, "DGENCHK");
+		QETH_CARD_TEXT(card, 2, "DGENCHK");
 			return 1;
 	}
 	return 0;
@@ -778,6 +730,10 @@
 static long __qeth_check_irb_error(struct ccw_device *cdev,
 		unsigned long intparm, struct irb *irb)
 {
+	struct qeth_card *card;
+
+	card = CARD_FROM_CDEV(cdev);
+
 	if (!IS_ERR(irb))
 		return 0;
 
@@ -785,17 +741,15 @@
 	case -EIO:
 		QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
 			dev_name(&cdev->dev));
-		QETH_DBF_TEXT(TRACE, 2, "ckirberr");
-		QETH_DBF_TEXT_(TRACE, 2, "  rc%d", -EIO);
+		QETH_CARD_TEXT(card, 2, "ckirberr");
+		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
 		break;
 	case -ETIMEDOUT:
 		dev_warn(&cdev->dev, "A hardware operation timed out"
 			" on the device\n");
-		QETH_DBF_TEXT(TRACE, 2, "ckirberr");
-		QETH_DBF_TEXT_(TRACE, 2, "  rc%d", -ETIMEDOUT);
+		QETH_CARD_TEXT(card, 2, "ckirberr");
+		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
 		if (intparm == QETH_RCD_PARM) {
-			struct qeth_card *card = CARD_FROM_CDEV(cdev);
-
 			if (card && (card->data.ccwdev == cdev)) {
 				card->data.state = CH_STATE_DOWN;
 				wake_up(&card->wait_q);
@@ -805,8 +759,8 @@
 	default:
 		QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
 			dev_name(&cdev->dev), PTR_ERR(irb));
-		QETH_DBF_TEXT(TRACE, 2, "ckirberr");
-		QETH_DBF_TEXT(TRACE, 2, "  rc???");
+		QETH_CARD_TEXT(card, 2, "ckirberr");
+		QETH_CARD_TEXT(card, 2, "  rc???");
 	}
 	return PTR_ERR(irb);
 }
@@ -822,8 +776,6 @@
 	struct qeth_cmd_buffer *iob;
 	__u8 index;
 
-	QETH_DBF_TEXT(TRACE, 5, "irq");
-
 	if (__qeth_check_irb_error(cdev, intparm, irb))
 		return;
 	cstat = irb->scsw.cmd.cstat;
@@ -833,15 +785,17 @@
 	if (!card)
 		return;
 
+	QETH_CARD_TEXT(card, 5, "irq");
+
 	if (card->read.ccwdev == cdev) {
 		channel = &card->read;
-		QETH_DBF_TEXT(TRACE, 5, "read");
+		QETH_CARD_TEXT(card, 5, "read");
 	} else if (card->write.ccwdev == cdev) {
 		channel = &card->write;
-		QETH_DBF_TEXT(TRACE, 5, "write");
+		QETH_CARD_TEXT(card, 5, "write");
 	} else {
 		channel = &card->data;
-		QETH_DBF_TEXT(TRACE, 5, "data");
+		QETH_CARD_TEXT(card, 5, "data");
 	}
 	atomic_set(&channel->irq_pending, 0);
 
@@ -857,12 +811,12 @@
 		goto out;
 
 	if (intparm == QETH_CLEAR_CHANNEL_PARM) {
-		QETH_DBF_TEXT(TRACE, 6, "clrchpar");
+		QETH_CARD_TEXT(card, 6, "clrchpar");
 		/* we don't have to handle this further */
 		intparm = 0;
 	}
 	if (intparm == QETH_HALT_CHANNEL_PARM) {
-		QETH_DBF_TEXT(TRACE, 6, "hltchpar");
+		QETH_CARD_TEXT(card, 6, "hltchpar");
 		/* we don't have to handle this further */
 		intparm = 0;
 	}
@@ -963,7 +917,7 @@
 {
 	int i, j;
 
-	QETH_DBF_TEXT(TRACE, 2, "clearqdbf");
+	QETH_CARD_TEXT(card, 2, "clearqdbf");
 	/* clear outbound buffers to free skbs */
 	for (i = 0; i < card->qdio.no_out_queues; ++i)
 		if (card->qdio.out_qs[i]) {
@@ -978,7 +932,6 @@
 {
 	struct qeth_buffer_pool_entry *pool_entry, *tmp;
 	int i = 0;
-	QETH_DBF_TEXT(TRACE, 5, "freepool");
 	list_for_each_entry_safe(pool_entry, tmp,
 				 &card->qdio.init_pool.entry_list, init_list){
 		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
@@ -992,7 +945,6 @@
 {
 	int i, j;
 
-	QETH_DBF_TEXT(TRACE, 2, "freeqdbf");
 	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
 		QETH_QDIO_UNINITIALIZED)
 		return;
@@ -1089,7 +1041,7 @@
 	int rc = 0;
 
 	spin_lock_irqsave(&card->thread_mask_lock, flags);
-	QETH_DBF_TEXT_(TRACE, 4, "  %02x%02x%02x",
+	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
 			(u8) card->thread_start_mask,
 			(u8) card->thread_allowed_mask,
 			(u8) card->thread_running_mask);
@@ -1102,7 +1054,7 @@
 {
 	struct qeth_card *card = container_of(work, struct qeth_card,
 					kernel_thread_starter);
-	QETH_DBF_TEXT(TRACE , 2, "strthrd");
+	QETH_CARD_TEXT(card , 2, "strthrd");
 
 	if (card->read.state != CH_STATE_UP &&
 	    card->write.state != CH_STATE_UP)
@@ -1229,8 +1181,8 @@
 	struct qeth_card *card;
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "clearch");
 	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 3, "clearch");
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
 	rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1253,8 +1205,8 @@
 	struct qeth_card *card;
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "haltch");
 	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 3, "haltch");
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
 	rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1274,7 +1226,7 @@
 {
 	int rc1 = 0, rc2 = 0, rc3 = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "haltchs");
+	QETH_CARD_TEXT(card, 3, "haltchs");
 	rc1 = qeth_halt_channel(&card->read);
 	rc2 = qeth_halt_channel(&card->write);
 	rc3 = qeth_halt_channel(&card->data);
@@ -1289,7 +1241,7 @@
 {
 	int rc1 = 0, rc2 = 0, rc3 = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "clearchs");
+	QETH_CARD_TEXT(card, 3, "clearchs");
 	rc1 = qeth_clear_channel(&card->read);
 	rc2 = qeth_clear_channel(&card->write);
 	rc3 = qeth_clear_channel(&card->data);
@@ -1304,8 +1256,7 @@
 {
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "clhacrd");
-	QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *));
+	QETH_CARD_TEXT(card, 3, "clhacrd");
 
 	if (halt)
 		rc = qeth_halt_channels(card);
@@ -1318,7 +1269,7 @@
 {
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "qdioclr");
+	QETH_CARD_TEXT(card, 3, "qdioclr");
 	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
 		QETH_QDIO_CLEANING)) {
 	case QETH_QDIO_ESTABLISHED:
@@ -1329,7 +1280,7 @@
 			rc = qdio_shutdown(CARD_DDEV(card),
 				QDIO_FLAG_CLEANUP_USING_CLEAR);
 		if (rc)
-			QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
+			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
 		qdio_free(CARD_DDEV(card));
 		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
 		break;
@@ -1340,7 +1291,7 @@
 	}
 	rc = qeth_clear_halt_card(card, use_halt);
 	if (rc)
-		QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc);
+		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
 	card->state = CARD_STATE_DOWN;
 	return rc;
 }
@@ -1440,6 +1391,7 @@
 				QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
 		break;
 	case QETH_CARD_TYPE_OSD:
+	case QETH_CARD_TYPE_OSN:
 		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
 		break;
 	default:
@@ -1637,15 +1589,18 @@
 				"host\n");
 			break;
 		case QETH_IDX_ACT_ERR_AUTH:
+		case QETH_IDX_ACT_ERR_AUTH_USER:
 			dev_err(&card->read.ccwdev->dev,
 				"Setting the device online failed because of "
-				"insufficient LPAR authorization\n");
+				"insufficient authorization\n");
 			break;
 		default:
 			QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
 				" negative reply\n",
 				dev_name(&card->read.ccwdev->dev));
 		}
+		QETH_CARD_TEXT_(card, 2, "idxread%c",
+			QETH_IDX_ACT_CAUSE_CODE(iob->data));
 		goto out;
 	}
 
@@ -1705,7 +1660,7 @@
 	unsigned long timeout, event_timeout;
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 2, "sendctl");
+	QETH_CARD_TEXT(card, 2, "sendctl");
 
 	reply = qeth_alloc_reply(card);
 	if (!reply) {
@@ -1732,7 +1687,7 @@
 		event_timeout = QETH_TIMEOUT;
 	timeout = jiffies + event_timeout;
 
-	QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
+	QETH_CARD_TEXT(card, 6, "noirqpnd");
 	spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
 	rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
 			      (addr_t) iob, 0, 0);
@@ -1741,7 +1696,7 @@
 		QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
 			"ccw_device_start rc = %i\n",
 			dev_name(&card->write.ccwdev->dev), rc);
-		QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
+		QETH_CARD_TEXT_(card, 2, " err%d", rc);
 		spin_lock_irqsave(&card->lock, flags);
 		list_del_init(&reply->list);
 		qeth_put_reply(reply);
@@ -1978,7 +1933,7 @@
 		card->info.link_type = link_type;
 	} else
 		card->info.link_type = 0;
-	QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type);
+	QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
 	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
 	return 0;
 }
@@ -2335,7 +2290,7 @@
 {
 	struct qeth_buffer_pool_entry *entry;
 
-	QETH_DBF_TEXT(TRACE, 5, "inwrklst");
+	QETH_CARD_TEXT(card, 5, "inwrklst");
 
 	list_for_each_entry(entry,
 			    &card->qdio.init_pool.entry_list, init_list) {
@@ -2522,7 +2477,7 @@
 	int rc;
 	char prot_type;
 
-	QETH_DBF_TEXT(TRACE, 4, "sendipa");
+	QETH_CARD_TEXT(card, 4, "sendipa");
 
 	if (card->options.layer2)
 		if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -2582,7 +2537,7 @@
 {
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 4, "defadpcb");
+	QETH_CARD_TEXT(card, 4, "defadpcb");
 
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (cmd->hdr.return_code == 0)
@@ -2597,7 +2552,7 @@
 {
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 3, "quyadpcb");
+	QETH_CARD_TEXT(card, 3, "quyadpcb");
 
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
@@ -2633,7 +2588,7 @@
 	int rc;
 	struct qeth_cmd_buffer *iob;
 
-	QETH_DBF_TEXT(TRACE, 3, "queryadp");
+	QETH_CARD_TEXT(card, 3, "queryadp");
 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
 				   sizeof(struct qeth_ipacmd_setadpparms));
 	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
@@ -2645,13 +2600,12 @@
 		unsigned int qdio_error, const char *dbftext)
 {
 	if (qdio_error) {
-		QETH_DBF_TEXT(TRACE, 2, dbftext);
-		QETH_DBF_TEXT(QERR, 2, dbftext);
-		QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
+		QETH_CARD_TEXT(card, 2, dbftext);
+		QETH_CARD_TEXT_(card, 2, " F15=%02X",
 			       buf->element[15].flags & 0xff);
-		QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
+		QETH_CARD_TEXT_(card, 2, " F14=%02X",
 			       buf->element[14].flags & 0xff);
-		QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
+		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
 		if ((buf->element[15].flags & 0xff) == 0x12) {
 			card->stats.rx_dropped++;
 			return 0;
@@ -2717,8 +2671,7 @@
 		if (rc) {
 			dev_warn(&card->gdev->dev,
 				"QDIO reported an error, rc=%i\n", rc);
-			QETH_DBF_TEXT(TRACE, 2, "qinberr");
-			QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+			QETH_CARD_TEXT(card, 2, "qinberr");
 		}
 		queue->next_buf_to_init = (queue->next_buf_to_init + count) %
 					  QDIO_MAX_BUFFERS_PER_Q;
@@ -2731,7 +2684,7 @@
 {
 	int sbalf15 = buffer->buffer->element[15].flags & 0xff;
 
-	QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
+	QETH_CARD_TEXT(card, 6, "hdsnderr");
 	if (card->info.type == QETH_CARD_TYPE_IQD) {
 		if (sbalf15 == 0) {
 			qdio_err = 0;
@@ -2747,9 +2700,8 @@
 	if ((sbalf15 >= 15) && (sbalf15 <= 31))
 		return QETH_SEND_ERROR_RETRY;
 
-	QETH_DBF_TEXT(TRACE, 1, "lnkfail");
-	QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
-	QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
+	QETH_CARD_TEXT(card, 1, "lnkfail");
+	QETH_CARD_TEXT_(card, 1, "%04x %02x",
 		       (u16)qdio_err, (u8)sbalf15);
 	return QETH_SEND_ERROR_LINK_FAILURE;
 }
@@ -2764,7 +2716,7 @@
 		if (atomic_read(&queue->used_buffers)
 		    >= QETH_HIGH_WATERMARK_PACK){
 			/* switch non-PACKING -> PACKING */
-			QETH_DBF_TEXT(TRACE, 6, "np->pack");
+			QETH_CARD_TEXT(queue->card, 6, "np->pack");
 			if (queue->card->options.performance_stats)
 				queue->card->perf_stats.sc_dp_p++;
 			queue->do_pack = 1;
@@ -2787,7 +2739,7 @@
 		if (atomic_read(&queue->used_buffers)
 		    <= QETH_LOW_WATERMARK_PACK) {
 			/* switch PACKING -> non-PACKING */
-			QETH_DBF_TEXT(TRACE, 6, "pack->np");
+			QETH_CARD_TEXT(queue->card, 6, "pack->np");
 			if (queue->card->options.performance_stats)
 				queue->card->perf_stats.sc_p_dp++;
 			queue->do_pack = 0;
@@ -2896,9 +2848,8 @@
 		/* ignore temporary SIGA errors without busy condition */
 		if (rc == QDIO_ERROR_SIGA_TARGET)
 			return;
-		QETH_DBF_TEXT(TRACE, 2, "flushbuf");
-		QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
-		QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card));
+		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
+		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
 
 		/* this must not happen under normal circumstances. if it
 		 * happens something is really wrong -> recover */
@@ -2960,10 +2911,9 @@
 	int i;
 	unsigned qeth_send_err;
 
-	QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
+	QETH_CARD_TEXT(card, 6, "qdouhdl");
 	if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
-		QETH_DBF_TEXT(TRACE, 2, "achkcond");
-		QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+		QETH_CARD_TEXT(card, 2, "achkcond");
 		netif_stop_queue(card->dev);
 		qeth_schedule_recovery(card);
 		return;
@@ -3033,13 +2983,11 @@
 int qeth_get_elements_no(struct qeth_card *card, void *hdr,
 		     struct sk_buff *skb, int elems)
 {
-	int elements_needed = 0;
+	int dlen = skb->len - skb->data_len;
+	int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
+		PFN_DOWN((unsigned long)skb->data);
 
-	if (skb_shinfo(skb)->nr_frags > 0)
-		elements_needed = (skb_shinfo(skb)->nr_frags + 1);
-	if (elements_needed == 0)
-		elements_needed = 1 + (((((unsigned long) skb->data) %
-				PAGE_SIZE) + skb->len) >> PAGE_SHIFT);
+	elements_needed += skb_shinfo(skb)->nr_frags;
 	if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
 		QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
 			"(Number=%d / Length=%d). Discarded.\n",
@@ -3050,15 +2998,35 @@
 }
 EXPORT_SYMBOL_GPL(qeth_get_elements_no);
 
+int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
+{
+	int hroom, inpage, rest;
+
+	if (((unsigned long)skb->data & PAGE_MASK) !=
+	    (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
+		hroom = skb_headroom(skb);
+		inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
+		rest = len - inpage;
+		if (rest > hroom)
+			return 1;
+		memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
+		skb->data -= rest;
+		QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
+
 static inline void __qeth_fill_buffer(struct sk_buff *skb,
 	struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
 	int offset)
 {
-	int length = skb->len;
+	int length = skb->len - skb->data_len;
 	int length_here;
 	int element;
 	char *data;
-	int first_lap ;
+	int first_lap, cnt;
+	struct skb_frag_struct *frag;
 
 	element = *next_element_to_fill;
 	data = skb->data;
@@ -3081,10 +3049,14 @@
 		length -= length_here;
 		if (!length) {
 			if (first_lap)
-				buffer->element[element].flags = 0;
+				if (skb_shinfo(skb)->nr_frags)
+					buffer->element[element].flags =
+						SBAL_FLAGS_FIRST_FRAG;
+				else
+					buffer->element[element].flags = 0;
 			else
 				buffer->element[element].flags =
-				    SBAL_FLAGS_LAST_FRAG;
+				    SBAL_FLAGS_MIDDLE_FRAG;
 		} else {
 			if (first_lap)
 				buffer->element[element].flags =
@@ -3097,6 +3069,18 @@
 		element++;
 		first_lap = 0;
 	}
+
+	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+		frag = &skb_shinfo(skb)->frags[cnt];
+		buffer->element[element].addr = (char *)page_to_phys(frag->page)
+			+ frag->page_offset;
+		buffer->element[element].length = frag->size;
+		buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG;
+		element++;
+	}
+
+	if (buffer->element[element - 1].flags)
+		buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG;
 	*next_element_to_fill = element;
 }
 
@@ -3137,20 +3121,16 @@
 		buf->next_element_to_fill++;
 	}
 
-	if (skb_shinfo(skb)->nr_frags == 0)
-		__qeth_fill_buffer(skb, buffer, large_send,
-				(int *)&buf->next_element_to_fill, offset);
-	else
-		__qeth_fill_buffer_frag(skb, buffer, large_send,
-					(int *)&buf->next_element_to_fill);
+	__qeth_fill_buffer(skb, buffer, large_send,
+		(int *)&buf->next_element_to_fill, offset);
 
 	if (!queue->do_pack) {
-		QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
+		QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
 		/* set state to PRIMED -> will be flushed */
 		atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
 		flush_cnt = 1;
 	} else {
-		QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
+		QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
 		if (queue->card->options.performance_stats)
 			queue->card->perf_stats.skbs_sent_pack++;
 		if (buf->next_element_to_fill >=
@@ -3210,7 +3190,7 @@
 			rc = dev_queue_xmit(skb);
 		} else {
 			dev_kfree_skb_any(skb);
-			QETH_DBF_TEXT(QERR, 2, "qrdrop");
+			QETH_CARD_TEXT(card, 2, "qrdrop");
 		}
 	}
 	return 0;
@@ -3312,14 +3292,14 @@
 	struct qeth_ipa_cmd *cmd;
 	struct qeth_ipacmd_setadpparms *setparms;
 
-	QETH_DBF_TEXT(TRACE, 4, "prmadpcb");
+	QETH_CARD_TEXT(card, 4, "prmadpcb");
 
 	cmd = (struct qeth_ipa_cmd *) data;
 	setparms = &(cmd->data.setadapterparms);
 
 	qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
 	if (cmd->hdr.return_code) {
-		QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code);
 		setparms->data.mode = SET_PROMISC_MODE_OFF;
 	}
 	card->info.promisc_mode = setparms->data.mode;
@@ -3333,7 +3313,7 @@
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 4, "setprom");
+	QETH_CARD_TEXT(card, 4, "setprom");
 
 	if (((dev->flags & IFF_PROMISC) &&
 	     (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
@@ -3343,7 +3323,7 @@
 	mode = SET_PROMISC_MODE_OFF;
 	if (dev->flags & IFF_PROMISC)
 		mode = SET_PROMISC_MODE_ON;
-	QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode);
+	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
 
 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
 			sizeof(struct qeth_ipacmd_setadpparms));
@@ -3360,9 +3340,9 @@
 
 	card = dev->ml_priv;
 
-	QETH_DBF_TEXT(TRACE, 4, "chgmtu");
+	QETH_CARD_TEXT(card, 4, "chgmtu");
 	sprintf(dbf_text, "%8x", new_mtu);
-	QETH_DBF_TEXT(TRACE, 4, dbf_text);
+	QETH_CARD_TEXT(card, 4, dbf_text);
 
 	if (new_mtu < 64)
 		return -EINVAL;
@@ -3382,7 +3362,7 @@
 
 	card = dev->ml_priv;
 
-	QETH_DBF_TEXT(TRACE, 5, "getstat");
+	QETH_CARD_TEXT(card, 5, "getstat");
 
 	return &card->stats;
 }
@@ -3393,7 +3373,7 @@
 {
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 4, "chgmaccb");
+	QETH_CARD_TEXT(card, 4, "chgmaccb");
 
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (!card->options.layer2 ||
@@ -3413,7 +3393,7 @@
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 4, "chgmac");
+	QETH_CARD_TEXT(card, 4, "chgmac");
 
 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
 				   sizeof(struct qeth_ipacmd_setadpparms));
@@ -3435,7 +3415,7 @@
 	struct qeth_set_access_ctrl *access_ctrl_req;
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 4, "setaccb");
+	QETH_CARD_TEXT(card, 4, "setaccb");
 
 	cmd = (struct qeth_ipa_cmd *) data;
 	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
@@ -3533,7 +3513,7 @@
 	struct qeth_ipa_cmd *cmd;
 	struct qeth_set_access_ctrl *access_ctrl_req;
 
-	QETH_DBF_TEXT(TRACE, 4, "setacctl");
+	QETH_CARD_TEXT(card, 4, "setacctl");
 
 	QETH_DBF_TEXT_(SETUP, 2, "setacctl");
 	QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -3555,7 +3535,7 @@
 {
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 4, "setactlo");
+	QETH_CARD_TEXT(card, 4, "setactlo");
 
 	if ((card->info.type == QETH_CARD_TYPE_OSD ||
 	     card->info.type == QETH_CARD_TYPE_OSX) &&
@@ -3583,8 +3563,8 @@
 {
 	struct qeth_card *card;
 
-	QETH_DBF_TEXT(TRACE, 4, "txtimeo");
 	card = dev->ml_priv;
+	QETH_CARD_TEXT(card, 4, "txtimeo");
 	card->stats.tx_errors++;
 	qeth_schedule_recovery(card);
 }
@@ -3663,7 +3643,7 @@
 {
 	u16 s1, s2;
 
-	QETH_DBF_TEXT(TRACE, 4, "sendsnmp");
+	QETH_CARD_TEXT(card, 4, "sendsnmp");
 
 	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
 	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -3688,7 +3668,7 @@
 	unsigned char *data;
 	__u16 data_len;
 
-	QETH_DBF_TEXT(TRACE, 3, "snpcmdcb");
+	QETH_CARD_TEXT(card, 3, "snpcmdcb");
 
 	cmd = (struct qeth_ipa_cmd *) sdata;
 	data = (unsigned char *)((char *)cmd - reply->offset);
@@ -3696,13 +3676,13 @@
 	snmp = &cmd->data.setadapterparms.data.snmp;
 
 	if (cmd->hdr.return_code) {
-		QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code);
 		return 0;
 	}
 	if (cmd->data.setadapterparms.hdr.return_code) {
 		cmd->hdr.return_code =
 			cmd->data.setadapterparms.hdr.return_code;
-		QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code);
 		return 0;
 	}
 	data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
@@ -3713,13 +3693,13 @@
 
 	/* check if there is enough room in userspace */
 	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
-		QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM);
+		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
 		cmd->hdr.return_code = -ENOMEM;
 		return 0;
 	}
-	QETH_DBF_TEXT_(TRACE, 4, "snore%i",
+	QETH_CARD_TEXT_(card, 4, "snore%i",
 		       cmd->data.setadapterparms.hdr.used_total);
-	QETH_DBF_TEXT_(TRACE, 4, "sseqn%i",
+	QETH_CARD_TEXT_(card, 4, "sseqn%i",
 		cmd->data.setadapterparms.hdr.seq_no);
 	/*copy entries to user buffer*/
 	if (cmd->data.setadapterparms.hdr.seq_no == 1) {
@@ -3733,9 +3713,9 @@
 	}
 	qinfo->udata_offset += data_len;
 	/* check if all replies received ... */
-		QETH_DBF_TEXT_(TRACE, 4, "srtot%i",
+		QETH_CARD_TEXT_(card, 4, "srtot%i",
 			       cmd->data.setadapterparms.hdr.used_total);
-		QETH_DBF_TEXT_(TRACE, 4, "srseq%i",
+		QETH_CARD_TEXT_(card, 4, "srseq%i",
 			       cmd->data.setadapterparms.hdr.seq_no);
 	if (cmd->data.setadapterparms.hdr.seq_no <
 	    cmd->data.setadapterparms.hdr.used_total)
@@ -3752,7 +3732,7 @@
 	struct qeth_arp_query_info qinfo = {0, };
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "snmpcmd");
+	QETH_CARD_TEXT(card, 3, "snmpcmd");
 
 	if (card->info.guestlan)
 		return -EOPNOTSUPP;
@@ -3766,7 +3746,7 @@
 		return -EFAULT;
 	ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
 	if (!ureq) {
-		QETH_DBF_TEXT(TRACE, 2, "snmpnome");
+		QETH_CARD_TEXT(card, 2, "snmpnome");
 		return -ENOMEM;
 	}
 	if (copy_from_user(ureq, udata,
@@ -4120,13 +4100,8 @@
 		skb_len -= data_len;
 		if (skb_len) {
 			if (qeth_is_last_sbale(element)) {
-				QETH_DBF_TEXT(TRACE, 4, "unexeob");
-				QETH_DBF_TEXT_(TRACE, 4, "%s",
-					CARD_BUS_ID(card));
-				QETH_DBF_TEXT(QERR, 2, "unexeob");
-				QETH_DBF_TEXT_(QERR, 2, "%s",
-					CARD_BUS_ID(card));
-				QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer));
+				QETH_CARD_TEXT(card, 4, "unexeob");
+				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
 				dev_kfree_skb_any(skb);
 				card->stats.rx_errors++;
 				return NULL;
@@ -4147,8 +4122,7 @@
 	return skb;
 no_mem:
 	if (net_ratelimit()) {
-		QETH_DBF_TEXT(TRACE, 2, "noskbmem");
-		QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+		QETH_CARD_TEXT(card, 2, "noskbmem");
 	}
 	card->stats.rx_dropped++;
 	return NULL;
@@ -4164,17 +4138,17 @@
 	}
 }
 
-void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...)
+void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
 {
 	char dbf_txt_buf[32];
 	va_list args;
 
-	if (level > (qeth_dbf[dbf_nix].id)->level)
+	if (level > id->level)
 		return;
 	va_start(args, fmt);
 	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
 	va_end(args);
-	debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf);
+	debug_text_event(id, level, dbf_txt_buf);
 }
 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
 
@@ -4282,6 +4256,7 @@
 	struct device *dev;
 	int rc;
 	unsigned long flags;
+	char dbf_name[20];
 
 	QETH_DBF_TEXT(SETUP, 2, "probedev");
 
@@ -4297,6 +4272,17 @@
 		rc = -ENOMEM;
 		goto err_dev;
 	}
+
+	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
+		dev_name(&gdev->dev));
+	card->debug = debug_register(dbf_name, 2, 1, 8);
+	if (!card->debug) {
+		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
+		rc = -ENOMEM;
+		goto err_card;
+	}
+	debug_register_view(card->debug, &debug_hex_ascii_view);
+
 	card->read.ccwdev  = gdev->cdev[0];
 	card->write.ccwdev = gdev->cdev[1];
 	card->data.ccwdev  = gdev->cdev[2];
@@ -4309,12 +4295,12 @@
 	rc = qeth_determine_card_type(card);
 	if (rc) {
 		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
-		goto err_card;
+		goto err_dbf;
 	}
 	rc = qeth_setup_card(card);
 	if (rc) {
 		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
-		goto err_card;
+		goto err_dbf;
 	}
 
 	if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -4322,7 +4308,7 @@
 	else
 		rc = qeth_core_create_device_attributes(dev);
 	if (rc)
-		goto err_card;
+		goto err_dbf;
 	switch (card->info.type) {
 	case QETH_CARD_TYPE_OSN:
 	case QETH_CARD_TYPE_OSM:
@@ -4352,6 +4338,8 @@
 		qeth_core_remove_osn_attributes(dev);
 	else
 		qeth_core_remove_device_attributes(dev);
+err_dbf:
+	debug_unregister(card->debug);
 err_card:
 	qeth_core_free_card(card);
 err_dev:
@@ -4375,6 +4363,7 @@
 	} else {
 		qeth_core_remove_device_attributes(&gdev->dev);
 	}
+	debug_unregister(card->debug);
 	write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
 	list_del(&card->list);
 	write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index f9ed24d..e37dd8c 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -616,8 +616,9 @@
 #define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
 #define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
 #define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
-#define QETH_IDX_ACT_ERR_EXCL	0x19
-#define QETH_IDX_ACT_ERR_AUTH	0x1E
+#define QETH_IDX_ACT_ERR_EXCL		0x19
+#define QETH_IDX_ACT_ERR_AUTH		0x1E
+#define QETH_IDX_ACT_ERR_AUTH_USER	0x20
 
 #define PDU_ENCAPSULATION(buffer) \
 	(buffer + *(buffer + (*(buffer + 0x0b)) + \
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d43f57a..32d07c2 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -79,7 +79,7 @@
 		rc = -EOPNOTSUPP;
 	}
 	if (rc)
-		QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
+		QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
 	return rc;
 }
 
@@ -130,7 +130,7 @@
 	struct qeth_ipa_cmd *cmd;
 	__u8 *mac;
 
-	QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb");
+	QETH_CARD_TEXT(card, 2, "L2Sgmacb");
 	cmd = (struct qeth_ipa_cmd *) data;
 	mac = &cmd->data.setdelmac.mac[0];
 	/* MAC already registered, needed in couple/uncouple case */
@@ -147,7 +147,7 @@
 
 static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
 {
-	QETH_DBF_TEXT(TRACE, 2, "L2Sgmac");
+	QETH_CARD_TEXT(card, 2, "L2Sgmac");
 	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
 					  qeth_l2_send_setgroupmac_cb);
 }
@@ -159,7 +159,7 @@
 	struct qeth_ipa_cmd *cmd;
 	__u8 *mac;
 
-	QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb");
+	QETH_CARD_TEXT(card, 2, "L2Dgmacb");
 	cmd = (struct qeth_ipa_cmd *) data;
 	mac = &cmd->data.setdelmac.mac[0];
 	if (cmd->hdr.return_code)
@@ -170,7 +170,7 @@
 
 static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
 {
-	QETH_DBF_TEXT(TRACE, 2, "L2Dgmac");
+	QETH_CARD_TEXT(card, 2, "L2Dgmac");
 	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
 					  qeth_l2_send_delgroupmac_cb);
 }
@@ -262,15 +262,14 @@
 {
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 2, "L2sdvcb");
+	QETH_CARD_TEXT(card, 2, "L2sdvcb");
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (cmd->hdr.return_code) {
 		QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
 			  "Continuing\n", cmd->data.setdelvlan.vlan_id,
 			  QETH_CARD_IFNAME(card), cmd->hdr.return_code);
-		QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command);
-		QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card));
-		QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
+		QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
 	}
 	return 0;
 }
@@ -281,7 +280,7 @@
 	struct qeth_ipa_cmd *cmd;
 	struct qeth_cmd_buffer *iob;
 
-	QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd);
+	QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
 	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
 	cmd->data.setdelvlan.vlan_id = i;
@@ -292,7 +291,7 @@
 static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
 {
 	struct qeth_vlan_vid *id;
-	QETH_DBF_TEXT(TRACE, 3, "L2prcvln");
+	QETH_CARD_TEXT(card, 3, "L2prcvln");
 	spin_lock_bh(&card->vlanlock);
 	list_for_each_entry(id, &card->vid_list, list) {
 		if (clear)
@@ -310,13 +309,13 @@
 	struct qeth_card *card = dev->ml_priv;
 	struct qeth_vlan_vid *id;
 
-	QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
+	QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
 	if (card->info.type == QETH_CARD_TYPE_OSM) {
-		QETH_DBF_TEXT(TRACE, 3, "aidOSM");
+		QETH_CARD_TEXT(card, 3, "aidOSM");
 		return;
 	}
 	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
-		QETH_DBF_TEXT(TRACE, 3, "aidREC");
+		QETH_CARD_TEXT(card, 3, "aidREC");
 		return;
 	}
 	id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
@@ -334,13 +333,13 @@
 	struct qeth_vlan_vid *id, *tmpid = NULL;
 	struct qeth_card *card = dev->ml_priv;
 
-	QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
 	if (card->info.type == QETH_CARD_TYPE_OSM) {
-		QETH_DBF_TEXT(TRACE, 3, "kidOSM");
+		QETH_CARD_TEXT(card, 3, "kidOSM");
 		return;
 	}
 	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
-		QETH_DBF_TEXT(TRACE, 3, "kidREC");
+		QETH_CARD_TEXT(card, 3, "kidREC");
 		return;
 	}
 	spin_lock_bh(&card->vlanlock);
@@ -456,7 +455,7 @@
 			/* else unknown */
 		default:
 			dev_kfree_skb_any(skb);
-			QETH_DBF_TEXT(TRACE, 3, "inbunkno");
+			QETH_CARD_TEXT(card, 3, "inbunkno");
 			QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
 			continue;
 		}
@@ -474,7 +473,7 @@
 	struct qeth_ipa_cmd *cmd;
 	struct qeth_cmd_buffer *iob;
 
-	QETH_DBF_TEXT(TRACE, 2, "L2sdmac");
+	QETH_CARD_TEXT(card, 2, "L2sdmac");
 	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
 	cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
@@ -488,10 +487,10 @@
 {
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 2, "L2Smaccb");
+	QETH_CARD_TEXT(card, 2, "L2Smaccb");
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (cmd->hdr.return_code) {
-		QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
 		card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
 		switch (cmd->hdr.return_code) {
 		case IPA_RC_L2_DUP_MAC:
@@ -523,7 +522,7 @@
 
 static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 {
-	QETH_DBF_TEXT(TRACE, 2, "L2Setmac");
+	QETH_CARD_TEXT(card, 2, "L2Setmac");
 	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
 					  qeth_l2_send_setmac_cb);
 }
@@ -534,10 +533,10 @@
 {
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb");
+	QETH_CARD_TEXT(card, 2, "L2Dmaccb");
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (cmd->hdr.return_code) {
-		QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
 		cmd->hdr.return_code = -EIO;
 		return 0;
 	}
@@ -548,7 +547,7 @@
 
 static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
 {
-	QETH_DBF_TEXT(TRACE, 2, "L2Delmac");
+	QETH_CARD_TEXT(card, 2, "L2Delmac");
 	if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
 		return 0;
 	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
@@ -594,23 +593,22 @@
 	struct qeth_card *card = dev->ml_priv;
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "setmac");
+	QETH_CARD_TEXT(card, 3, "setmac");
 
 	if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
-		QETH_DBF_TEXT(TRACE, 3, "setmcINV");
+		QETH_CARD_TEXT(card, 3, "setmcINV");
 		return -EOPNOTSUPP;
 	}
 
 	if (card->info.type == QETH_CARD_TYPE_OSN ||
 	    card->info.type == QETH_CARD_TYPE_OSM ||
 	    card->info.type == QETH_CARD_TYPE_OSX) {
-		QETH_DBF_TEXT(TRACE, 3, "setmcTYP");
+		QETH_CARD_TEXT(card, 3, "setmcTYP");
 		return -EOPNOTSUPP;
 	}
-	QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
-	QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN);
+	QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
 	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
-		QETH_DBF_TEXT(TRACE, 3, "setmcREC");
+		QETH_CARD_TEXT(card, 3, "setmcREC");
 		return -ERESTARTSYS;
 	}
 	rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
@@ -627,7 +625,7 @@
 	if (card->info.type == QETH_CARD_TYPE_OSN)
 		return ;
 
-	QETH_DBF_TEXT(TRACE, 3, "setmulti");
+	QETH_CARD_TEXT(card, 3, "setmulti");
 	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
 	    (card->state != CARD_STATE_UP))
 		return;
@@ -714,10 +712,13 @@
 		goto tx_drop;
 	}
 
-	if (card->info.type != QETH_CARD_TYPE_IQD)
+	if (card->info.type != QETH_CARD_TYPE_IQD) {
+		if (qeth_hdr_chk_and_bounce(new_skb,
+		    sizeof(struct qeth_hdr_layer2)))
+			goto tx_drop;
 		rc = qeth_do_send_packet(card, queue, new_skb, hdr,
 					 elements);
-	else
+	} else
 		rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
 					elements, data_offset, hd_len);
 	if (!rc) {
@@ -771,11 +772,10 @@
 		card->perf_stats.inbound_start_time = qeth_get_micros();
 	}
 	if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
-		QETH_DBF_TEXT(TRACE, 1, "qdinchk");
-		QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
-		QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
+		QETH_CARD_TEXT(card, 1, "qdinchk");
+		QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element,
 				count);
-		QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+		QETH_CARD_TEXT_(card, 1, "%04X", queue);
 		qeth_schedule_recovery(card);
 		return;
 	}
@@ -799,13 +799,13 @@
 {
 	struct qeth_card *card = dev->ml_priv;
 
-	QETH_DBF_TEXT(TRACE, 4, "qethopen");
+	QETH_CARD_TEXT(card, 4, "qethopen");
 	if (card->state != CARD_STATE_SOFTSETUP)
 		return -ENODEV;
 
 	if ((card->info.type != QETH_CARD_TYPE_OSN) &&
 	     (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
-		QETH_DBF_TEXT(TRACE, 4, "nomacadr");
+		QETH_CARD_TEXT(card, 4, "nomacadr");
 		return -EPERM;
 	}
 	card->data.state = CH_STATE_UP;
@@ -822,7 +822,7 @@
 {
 	struct qeth_card *card = dev->ml_priv;
 
-	QETH_DBF_TEXT(TRACE, 4, "qethstop");
+	QETH_CARD_TEXT(card, 4, "qethstop");
 	netif_tx_disable(dev);
 	if (card->state == CARD_STATE_UP)
 		card->state = CARD_STATE_SOFTSETUP;
@@ -1074,11 +1074,10 @@
 	int rc = 0;
 
 	card = (struct qeth_card *) ptr;
-	QETH_DBF_TEXT(TRACE, 2, "recover1");
-	QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+	QETH_CARD_TEXT(card, 2, "recover1");
 	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
 		return 0;
-	QETH_DBF_TEXT(TRACE, 2, "recover2");
+	QETH_CARD_TEXT(card, 2, "recover2");
 	dev_warn(&card->gdev->dev,
 		"A recovery process has been started for the device\n");
 	card->use_hard_stop = 1;
@@ -1181,12 +1180,12 @@
 	unsigned long flags;
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 5, "osndctrd");
+	QETH_CARD_TEXT(card, 5, "osndctrd");
 
 	wait_event(card->wait_q,
 		   atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
 	qeth_prepare_control_data(card, len, iob);
-	QETH_DBF_TEXT(TRACE, 6, "osnoirqp");
+	QETH_CARD_TEXT(card, 6, "osnoirqp");
 	spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
 	rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
 			      (addr_t) iob, 0, 0);
@@ -1194,7 +1193,7 @@
 	if (rc) {
 		QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
 			   "ccw_device_start rc = %i\n", rc);
-		QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
+		QETH_CARD_TEXT_(card, 2, " err%d", rc);
 		qeth_release_buffer(iob->channel, iob);
 		atomic_set(&card->write.irq_pending, 0);
 		wake_up(&card->wait_q);
@@ -1207,7 +1206,7 @@
 {
 	u16 s1, s2;
 
-	QETH_DBF_TEXT(TRACE, 4, "osndipa");
+	QETH_CARD_TEXT(card, 4, "osndipa");
 
 	qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
 	s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
@@ -1225,12 +1224,12 @@
 	struct qeth_card *card;
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 2, "osnsdmc");
 	if (!dev)
 		return -ENODEV;
 	card = dev->ml_priv;
 	if (!card)
 		return -ENODEV;
+	QETH_CARD_TEXT(card, 2, "osnsdmc");
 	if ((card->state != CARD_STATE_UP) &&
 	    (card->state != CARD_STATE_SOFTSETUP))
 		return -ENODEV;
@@ -1247,13 +1246,13 @@
 {
 	struct qeth_card *card;
 
-	QETH_DBF_TEXT(TRACE, 2, "osnreg");
 	*dev = qeth_l2_netdev_by_devno(read_dev_no);
 	if (*dev == NULL)
 		return -ENODEV;
 	card = (*dev)->ml_priv;
 	if (!card)
 		return -ENODEV;
+	QETH_CARD_TEXT(card, 2, "osnreg");
 	if ((assist_cb == NULL) || (data_cb == NULL))
 		return -EINVAL;
 	card->osn_info.assist_cb = assist_cb;
@@ -1266,12 +1265,12 @@
 {
 	struct qeth_card *card;
 
-	QETH_DBF_TEXT(TRACE, 2, "osndereg");
 	if (!dev)
 		return;
 	card = dev->ml_priv;
 	if (!card)
 		return;
+	QETH_CARD_TEXT(card, 2, "osndereg");
 	card->osn_info.assist_cb = NULL;
 	card->osn_info.data_cb = NULL;
 	return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 61adae2..61d348e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -287,7 +287,7 @@
 				addr->users += add ? 1 : -1;
 			if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
 			    qeth_l3_is_addr_covered_by_ipato(card, addr)) {
-				QETH_DBF_TEXT(TRACE, 2, "tkovaddr");
+				QETH_CARD_TEXT(card, 2, "tkovaddr");
 				addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
 			}
 			list_add_tail(&addr->entry, card->ip_tbd_list);
@@ -301,13 +301,13 @@
 	unsigned long flags;
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 4, "delip");
+	QETH_CARD_TEXT(card, 4, "delip");
 
 	if (addr->proto == QETH_PROT_IPV4)
-		QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
+		QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
 	else {
-		QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
-		QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+		QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
 	}
 	spin_lock_irqsave(&card->ip_lock, flags);
 	rc = __qeth_l3_insert_ip_todo(card, addr, 0);
@@ -320,12 +320,12 @@
 	unsigned long flags;
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 4, "addip");
+	QETH_CARD_TEXT(card, 4, "addip");
 	if (addr->proto == QETH_PROT_IPV4)
-		QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
+		QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
 	else {
-		QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
-		QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+		QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
 	}
 	spin_lock_irqsave(&card->ip_lock, flags);
 	rc = __qeth_l3_insert_ip_todo(card, addr, 1);
@@ -353,10 +353,10 @@
 	struct qeth_ipaddr *iptodo;
 	unsigned long flags;
 
-	QETH_DBF_TEXT(TRACE, 4, "delmc");
+	QETH_CARD_TEXT(card, 4, "delmc");
 	iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
 	if (!iptodo) {
-		QETH_DBF_TEXT(TRACE, 2, "dmcnomem");
+		QETH_CARD_TEXT(card, 2, "dmcnomem");
 		return;
 	}
 	iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
@@ -457,8 +457,8 @@
 	unsigned long flags;
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 2, "sdiplist");
-	QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+	QETH_CARD_TEXT(card, 2, "sdiplist");
+	QETH_CARD_HEX(card, 2, &card, sizeof(void *));
 
 	if (card->options.sniffer)
 		return;
@@ -466,7 +466,7 @@
 	tbd_list = card->ip_tbd_list;
 	card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
 	if (!card->ip_tbd_list) {
-		QETH_DBF_TEXT(TRACE, 0, "silnomem");
+		QETH_CARD_TEXT(card, 0, "silnomem");
 		card->ip_tbd_list = tbd_list;
 		spin_unlock_irqrestore(&card->ip_lock, flags);
 		return;
@@ -517,7 +517,7 @@
 	struct qeth_ipaddr *addr, *tmp;
 	unsigned long flags;
 
-	QETH_DBF_TEXT(TRACE, 4, "clearip");
+	QETH_CARD_TEXT(card, 4, "clearip");
 	if (recover && card->options.sniffer)
 		return;
 	spin_lock_irqsave(&card->ip_lock, flags);
@@ -577,7 +577,7 @@
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 4, "setdelmc");
+	QETH_CARD_TEXT(card, 4, "setdelmc");
 
 	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -615,8 +615,8 @@
 	struct qeth_ipa_cmd *cmd;
 	__u8 netmask[16];
 
-	QETH_DBF_TEXT(TRACE, 4, "setdelip");
-	QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags);
+	QETH_CARD_TEXT(card, 4, "setdelip");
+	QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
 
 	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -645,7 +645,7 @@
 	struct qeth_ipa_cmd *cmd;
 	struct qeth_cmd_buffer *iob;
 
-	QETH_DBF_TEXT(TRACE, 4, "setroutg");
+	QETH_CARD_TEXT(card, 4, "setroutg");
 	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
 	cmd->data.setrtg.type = (type);
@@ -689,7 +689,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "setrtg4");
+	QETH_CARD_TEXT(card, 3, "setrtg4");
 
 	qeth_l3_correct_routing_type(card, &card->options.route4.type,
 				  QETH_PROT_IPV4);
@@ -709,7 +709,7 @@
 {
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "setrtg6");
+	QETH_CARD_TEXT(card, 3, "setrtg6");
 #ifdef CONFIG_QETH_IPV6
 
 	if (!qeth_is_supported(card, IPA_IPV6))
@@ -753,7 +753,7 @@
 	unsigned long flags;
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 2, "addipato");
+	QETH_CARD_TEXT(card, 2, "addipato");
 	spin_lock_irqsave(&card->ip_lock, flags);
 	list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
 		if (ipatoe->proto != new->proto)
@@ -778,7 +778,7 @@
 	struct qeth_ipato_entry *ipatoe, *tmp;
 	unsigned long flags;
 
-	QETH_DBF_TEXT(TRACE, 2, "delipato");
+	QETH_CARD_TEXT(card, 2, "delipato");
 	spin_lock_irqsave(&card->ip_lock, flags);
 	list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
 		if (ipatoe->proto != proto)
@@ -806,11 +806,11 @@
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
 		if (proto == QETH_PROT_IPV4) {
-			QETH_DBF_TEXT(TRACE, 2, "addvipa4");
+			QETH_CARD_TEXT(card, 2, "addvipa4");
 			memcpy(&ipaddr->u.a4.addr, addr, 4);
 			ipaddr->u.a4.mask = 0;
 		} else if (proto == QETH_PROT_IPV6) {
-			QETH_DBF_TEXT(TRACE, 2, "addvipa6");
+			QETH_CARD_TEXT(card, 2, "addvipa6");
 			memcpy(&ipaddr->u.a6.addr, addr, 16);
 			ipaddr->u.a6.pfxlen = 0;
 		}
@@ -841,11 +841,11 @@
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
 		if (proto == QETH_PROT_IPV4) {
-			QETH_DBF_TEXT(TRACE, 2, "delvipa4");
+			QETH_CARD_TEXT(card, 2, "delvipa4");
 			memcpy(&ipaddr->u.a4.addr, addr, 4);
 			ipaddr->u.a4.mask = 0;
 		} else if (proto == QETH_PROT_IPV6) {
-			QETH_DBF_TEXT(TRACE, 2, "delvipa6");
+			QETH_CARD_TEXT(card, 2, "delvipa6");
 			memcpy(&ipaddr->u.a6.addr, addr, 16);
 			ipaddr->u.a6.pfxlen = 0;
 		}
@@ -870,11 +870,11 @@
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
 		if (proto == QETH_PROT_IPV4) {
-			QETH_DBF_TEXT(TRACE, 2, "addrxip4");
+			QETH_CARD_TEXT(card, 2, "addrxip4");
 			memcpy(&ipaddr->u.a4.addr, addr, 4);
 			ipaddr->u.a4.mask = 0;
 		} else if (proto == QETH_PROT_IPV6) {
-			QETH_DBF_TEXT(TRACE, 2, "addrxip6");
+			QETH_CARD_TEXT(card, 2, "addrxip6");
 			memcpy(&ipaddr->u.a6.addr, addr, 16);
 			ipaddr->u.a6.pfxlen = 0;
 		}
@@ -905,11 +905,11 @@
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
 		if (proto == QETH_PROT_IPV4) {
-			QETH_DBF_TEXT(TRACE, 2, "addrxip4");
+			QETH_CARD_TEXT(card, 2, "addrxip4");
 			memcpy(&ipaddr->u.a4.addr, addr, 4);
 			ipaddr->u.a4.mask = 0;
 		} else if (proto == QETH_PROT_IPV6) {
-			QETH_DBF_TEXT(TRACE, 2, "addrxip6");
+			QETH_CARD_TEXT(card, 2, "addrxip6");
 			memcpy(&ipaddr->u.a6.addr, addr, 16);
 			ipaddr->u.a6.pfxlen = 0;
 		}
@@ -929,15 +929,15 @@
 	int cnt = 3;
 
 	if (addr->proto == QETH_PROT_IPV4) {
-		QETH_DBF_TEXT(TRACE, 2, "setaddr4");
-		QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
+		QETH_CARD_TEXT(card, 2, "setaddr4");
+		QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
 	} else if (addr->proto == QETH_PROT_IPV6) {
-		QETH_DBF_TEXT(TRACE, 2, "setaddr6");
-		QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
-		QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+		QETH_CARD_TEXT(card, 2, "setaddr6");
+		QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
 	} else {
-		QETH_DBF_TEXT(TRACE, 2, "setaddr?");
-		QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
+		QETH_CARD_TEXT(card, 2, "setaddr?");
+		QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
 	}
 	do {
 		if (addr->is_multicast)
@@ -946,10 +946,10 @@
 			rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
 					addr->set_flags);
 		if (rc)
-			QETH_DBF_TEXT(TRACE, 2, "failed");
+			QETH_CARD_TEXT(card, 2, "failed");
 	} while ((--cnt > 0) && rc);
 	if (rc) {
-		QETH_DBF_TEXT(TRACE, 2, "FAILED");
+		QETH_CARD_TEXT(card, 2, "FAILED");
 		qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
 		dev_warn(&card->gdev->dev,
 			"Registering IP address %s failed\n", buf);
@@ -963,15 +963,15 @@
 	int rc = 0;
 
 	if (addr->proto == QETH_PROT_IPV4) {
-		QETH_DBF_TEXT(TRACE, 2, "deladdr4");
-		QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
+		QETH_CARD_TEXT(card, 2, "deladdr4");
+		QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
 	} else if (addr->proto == QETH_PROT_IPV6) {
-		QETH_DBF_TEXT(TRACE, 2, "deladdr6");
-		QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
-		QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+		QETH_CARD_TEXT(card, 2, "deladdr6");
+		QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
 	} else {
-		QETH_DBF_TEXT(TRACE, 2, "deladdr?");
-		QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
+		QETH_CARD_TEXT(card, 2, "deladdr?");
+		QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
 	}
 	if (addr->is_multicast)
 		rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
@@ -979,7 +979,7 @@
 		rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
 					addr->del_flags);
 	if (rc)
-		QETH_DBF_TEXT(TRACE, 2, "failed");
+		QETH_CARD_TEXT(card, 2, "failed");
 
 	return rc;
 }
@@ -1012,7 +1012,7 @@
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 4, "adpmode");
+	QETH_CARD_TEXT(card, 4, "adpmode");
 
 	iob = qeth_get_adapter_cmd(card, command,
 				   sizeof(struct qeth_ipacmd_setadpparms));
@@ -1027,7 +1027,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 4, "adphstr");
+	QETH_CARD_TEXT(card, 4, "adphstr");
 
 	if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
 		rc = qeth_l3_send_setadp_mode(card,
@@ -1093,7 +1093,7 @@
 {
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 4, "defadpcb");
+	QETH_CARD_TEXT(card, 4, "defadpcb");
 
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (cmd->hdr.return_code == 0) {
@@ -1106,13 +1106,13 @@
 	if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
 	    cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
 		card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
-		QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
+		QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
 	}
 	if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
 	    cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
 		card->info.tx_csum_mask =
 			cmd->data.setassparms.data.flags_32bit;
-		QETH_DBF_TEXT_(TRACE, 3, "tcsu:%d", card->info.tx_csum_mask);
+		QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
 	}
 
 	return 0;
@@ -1125,7 +1125,7 @@
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 4, "getasscm");
+	QETH_CARD_TEXT(card, 4, "getasscm");
 	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
 
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1147,7 +1147,7 @@
 	int rc;
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 4, "sendassp");
+	QETH_CARD_TEXT(card, 4, "sendassp");
 
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
 	if (len <= sizeof(__u32))
@@ -1166,7 +1166,7 @@
 	int rc;
 	struct qeth_cmd_buffer *iob;
 
-	QETH_DBF_TEXT(TRACE, 4, "simassp6");
+	QETH_CARD_TEXT(card, 4, "simassp6");
 	iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
 				       0, QETH_PROT_IPV6);
 	rc = qeth_l3_send_setassparms(card, iob, 0, 0,
@@ -1182,7 +1182,7 @@
 	int length = 0;
 	struct qeth_cmd_buffer *iob;
 
-	QETH_DBF_TEXT(TRACE, 4, "simassp4");
+	QETH_CARD_TEXT(card, 4, "simassp4");
 	if (data)
 		length = sizeof(__u32);
 	iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
@@ -1196,7 +1196,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "ipaarp");
+	QETH_CARD_TEXT(card, 3, "ipaarp");
 
 	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
 		dev_info(&card->gdev->dev,
@@ -1218,7 +1218,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "ipaipfrg");
+	QETH_CARD_TEXT(card, 3, "ipaipfrg");
 
 	if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
 		dev_info(&card->gdev->dev,
@@ -1243,7 +1243,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "stsrcmac");
+	QETH_CARD_TEXT(card, 3, "stsrcmac");
 
 	if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
 		dev_info(&card->gdev->dev,
@@ -1265,7 +1265,7 @@
 {
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "strtvlan");
+	QETH_CARD_TEXT(card, 3, "strtvlan");
 
 	if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
 		dev_info(&card->gdev->dev,
@@ -1289,7 +1289,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "stmcast");
+	QETH_CARD_TEXT(card, 3, "stmcast");
 
 	if (!qeth_is_supported(card, IPA_MULTICASTING)) {
 		dev_info(&card->gdev->dev,
@@ -1349,7 +1349,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "softipv6");
+	QETH_CARD_TEXT(card, 3, "softipv6");
 
 	if (card->info.type == QETH_CARD_TYPE_IQD)
 		goto out;
@@ -1395,7 +1395,7 @@
 {
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "strtipv6");
+	QETH_CARD_TEXT(card, 3, "strtipv6");
 
 	if (!qeth_is_supported(card, IPA_IPV6)) {
 		dev_info(&card->gdev->dev,
@@ -1412,7 +1412,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "stbrdcst");
+	QETH_CARD_TEXT(card, 3, "stbrdcst");
 	card->info.broadcast_capable = 0;
 	if (!qeth_is_supported(card, IPA_FILTERING)) {
 		dev_info(&card->gdev->dev,
@@ -1512,7 +1512,7 @@
 {
 	int rc = 0;
 
-	QETH_DBF_TEXT(TRACE, 3, "strtcsum");
+	QETH_CARD_TEXT(card, 3, "strtcsum");
 
 	if (card->options.checksum_type == NO_CHECKSUMMING) {
 		dev_info(&card->gdev->dev,
@@ -1569,7 +1569,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "sttso");
+	QETH_CARD_TEXT(card, 3, "sttso");
 
 	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
 		dev_info(&card->gdev->dev,
@@ -1596,7 +1596,7 @@
 
 static int qeth_l3_start_ipassists(struct qeth_card *card)
 {
-	QETH_DBF_TEXT(TRACE, 3, "strtipas");
+	QETH_CARD_TEXT(card, 3, "strtipas");
 
 	qeth_set_access_ctrl_online(card);	/* go on*/
 	qeth_l3_start_ipa_arp_processing(card);	/* go on*/
@@ -1619,7 +1619,7 @@
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
 
-	QETH_DBF_TEXT(TRACE, 2, "puniqeid");
+	QETH_CARD_TEXT(card, 2, "puniqeid");
 
 	if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
 		UNIQUE_ID_NOT_BY_CARD)
@@ -1723,7 +1723,7 @@
 	cmd = (struct qeth_ipa_cmd *)data;
 	rc = cmd->hdr.return_code;
 	if (rc)
-		QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
+		QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
 	switch (cmd->data.diagass.action) {
 	case QETH_DIAGS_CMD_TRACE_QUERY:
 		break;
@@ -1800,7 +1800,7 @@
 	struct ip_mc_list *im4;
 	char buf[MAX_ADDR_LEN];
 
-	QETH_DBF_TEXT(TRACE, 4, "addmc");
+	QETH_CARD_TEXT(card, 4, "addmc");
 	for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
 		qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
 		ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@@ -1820,7 +1820,7 @@
 	struct vlan_group *vg;
 	int i;
 
-	QETH_DBF_TEXT(TRACE, 4, "addmcvl");
+	QETH_CARD_TEXT(card, 4, "addmcvl");
 	if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
 		return;
 
@@ -1844,7 +1844,7 @@
 {
 	struct in_device *in4_dev;
 
-	QETH_DBF_TEXT(TRACE, 4, "chkmcv4");
+	QETH_CARD_TEXT(card, 4, "chkmcv4");
 	in4_dev = in_dev_get(card->dev);
 	if (in4_dev == NULL)
 		return;
@@ -1862,7 +1862,7 @@
 	struct ifmcaddr6 *im6;
 	char buf[MAX_ADDR_LEN];
 
-	QETH_DBF_TEXT(TRACE, 4, "addmc6");
+	QETH_CARD_TEXT(card, 4, "addmc6");
 	for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
 		ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
 		ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
@@ -1883,7 +1883,7 @@
 	struct vlan_group *vg;
 	int i;
 
-	QETH_DBF_TEXT(TRACE, 4, "admc6vl");
+	QETH_CARD_TEXT(card, 4, "admc6vl");
 	if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
 		return;
 
@@ -1907,7 +1907,7 @@
 {
 	struct inet6_dev *in6_dev;
 
-	QETH_DBF_TEXT(TRACE, 4, "chkmcv6");
+	QETH_CARD_TEXT(card, 4, "chkmcv6");
 	if (!qeth_is_supported(card, IPA_IPV6))
 		return ;
 	in6_dev = in6_dev_get(card->dev);
@@ -1928,7 +1928,7 @@
 	struct in_ifaddr *ifa;
 	struct qeth_ipaddr *addr;
 
-	QETH_DBF_TEXT(TRACE, 4, "frvaddr4");
+	QETH_CARD_TEXT(card, 4, "frvaddr4");
 
 	in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
 	if (!in_dev)
@@ -1954,7 +1954,7 @@
 	struct inet6_ifaddr *ifa;
 	struct qeth_ipaddr *addr;
 
-	QETH_DBF_TEXT(TRACE, 4, "frvaddr6");
+	QETH_CARD_TEXT(card, 4, "frvaddr6");
 
 	in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
 	if (!in6_dev)
@@ -1989,7 +1989,7 @@
 	struct qeth_card *card = dev->ml_priv;
 	unsigned long flags;
 
-	QETH_DBF_TEXT(TRACE, 4, "vlanreg");
+	QETH_CARD_TEXT(card, 4, "vlanreg");
 	spin_lock_irqsave(&card->vlanlock, flags);
 	card->vlangrp = grp;
 	spin_unlock_irqrestore(&card->vlanlock, flags);
@@ -2005,9 +2005,9 @@
 	struct qeth_card *card = dev->ml_priv;
 	unsigned long flags;
 
-	QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
 	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
-		QETH_DBF_TEXT(TRACE, 3, "kidREC");
+		QETH_CARD_TEXT(card, 3, "kidREC");
 		return;
 	}
 	spin_lock_irqsave(&card->vlanlock, flags);
@@ -2162,7 +2162,7 @@
 			break;
 		default:
 			dev_kfree_skb_any(skb);
-			QETH_DBF_TEXT(TRACE, 3, "inbunkno");
+			QETH_CARD_TEXT(card, 3, "inbunkno");
 			QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
 			continue;
 		}
@@ -2229,7 +2229,8 @@
 		card = vlan_dev_real_dev(dev)->ml_priv;
 	if (card && card->options.layer2)
 		card = NULL;
-	QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
+	if (card)
+		QETH_CARD_TEXT_(card, 4, "%d", rc);
 	return card ;
 }
 
@@ -2307,10 +2308,10 @@
 	} else if (card->options.sniffer &&	/* HiperSockets trace */
 		   qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
 		if (dev->flags & IFF_PROMISC) {
-			QETH_DBF_TEXT(TRACE, 3, "+promisc");
+			QETH_CARD_TEXT(card, 3, "+promisc");
 			qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
 		} else {
-			QETH_DBF_TEXT(TRACE, 3, "-promisc");
+			QETH_CARD_TEXT(card, 3, "-promisc");
 			qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
 		}
 	}
@@ -2320,7 +2321,7 @@
 {
 	struct qeth_card *card = dev->ml_priv;
 
-	QETH_DBF_TEXT(TRACE, 3, "setmulti");
+	QETH_CARD_TEXT(card, 3, "setmulti");
 	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
 	    (card->state != CARD_STATE_UP))
 		return;
@@ -2365,7 +2366,7 @@
 	int tmp;
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "arpstnoe");
+	QETH_CARD_TEXT(card, 3, "arpstnoe");
 
 	/*
 	 * currently GuestLAN only supports the ARP assist function
@@ -2417,17 +2418,17 @@
 	int uentry_size;
 	int i;
 
-	QETH_DBF_TEXT(TRACE, 4, "arpquecb");
+	QETH_CARD_TEXT(card, 4, "arpquecb");
 
 	qinfo = (struct qeth_arp_query_info *) reply->param;
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (cmd->hdr.return_code) {
-		QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 4, "qaer1%i", cmd->hdr.return_code);
 		return 0;
 	}
 	if (cmd->data.setassparms.hdr.return_code) {
 		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
-		QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 4, "qaer2%i", cmd->hdr.return_code);
 		return 0;
 	}
 	qdata = &cmd->data.setassparms.data.query_arp;
@@ -2449,14 +2450,14 @@
 	/* check if there is enough room in userspace */
 	if ((qinfo->udata_len - qinfo->udata_offset) <
 			qdata->no_entries * uentry_size){
-		QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM);
+		QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
 		cmd->hdr.return_code = -ENOMEM;
 		goto out_error;
 	}
-	QETH_DBF_TEXT_(TRACE, 4, "anore%i",
+	QETH_CARD_TEXT_(card, 4, "anore%i",
 		       cmd->data.setassparms.hdr.number_of_replies);
-	QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
-	QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries);
+	QETH_CARD_TEXT_(card, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
+	QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
 
 	if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
 		/* strip off "media specific information" */
@@ -2492,7 +2493,7 @@
 			unsigned long),
 		void *reply_param)
 {
-	QETH_DBF_TEXT(TRACE, 4, "sendarp");
+	QETH_CARD_TEXT(card, 4, "sendarp");
 
 	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
 	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -2508,7 +2509,7 @@
 	int tmp;
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "arpquery");
+	QETH_CARD_TEXT(card, 3, "arpquery");
 
 	if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
 			       IPA_ARP_PROCESSING)) {
@@ -2551,7 +2552,7 @@
 	int tmp;
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "arpadent");
+	QETH_CARD_TEXT(card, 3, "arpadent");
 
 	/*
 	 * currently GuestLAN only supports the ARP assist function
@@ -2590,7 +2591,7 @@
 	int tmp;
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 3, "arprment");
+	QETH_CARD_TEXT(card, 3, "arprment");
 
 	/*
 	 * currently GuestLAN only supports the ARP assist function
@@ -2626,7 +2627,7 @@
 	int rc;
 	int tmp;
 
-	QETH_DBF_TEXT(TRACE, 3, "arpflush");
+	QETH_CARD_TEXT(card, 3, "arpflush");
 
 	/*
 	 * currently GuestLAN only supports the ARP assist function
@@ -2734,7 +2735,7 @@
 		rc = -EOPNOTSUPP;
 	}
 	if (rc)
-		QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
+		QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
 	return rc;
 }
 
@@ -2903,19 +2904,11 @@
 	unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
 		tcp_hdr(skb)->doff * 4;
 	int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
-	int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
+	int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
 	elements += skb_shinfo(skb)->nr_frags;
 	return elements;
 }
 
-static inline int qeth_l3_tso_check(struct sk_buff *skb)
-{
-	int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
-		(unsigned long)skb->data;
-	return (((unsigned long)skb->data & PAGE_MASK) !=
-		(((unsigned long)skb->data + len) & PAGE_MASK));
-}
-
 static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	int rc;
@@ -3015,8 +3008,6 @@
 	    (cast_type == RTN_UNSPEC)) {
 		hdr = (struct qeth_hdr *)skb_push(new_skb,
 						sizeof(struct qeth_hdr_tso));
-		if (qeth_l3_tso_check(new_skb))
-			QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
 		memset(hdr, 0, sizeof(struct qeth_hdr_tso));
 		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
 		qeth_tso_fill_header(card, hdr, new_skb);
@@ -3047,10 +3038,20 @@
 	elements_needed += elems;
 	nr_frags = skb_shinfo(new_skb)->nr_frags;
 
-	if (card->info.type != QETH_CARD_TYPE_IQD)
+	if (card->info.type != QETH_CARD_TYPE_IQD) {
+		int len;
+		if (large_send == QETH_LARGE_SEND_TSO)
+			len = ((unsigned long)tcp_hdr(new_skb) +
+				tcp_hdr(new_skb)->doff * 4) -
+				(unsigned long)new_skb->data;
+		else
+			len = sizeof(struct qeth_hdr_layer3);
+
+		if (qeth_hdr_chk_and_bounce(new_skb, len))
+			goto tx_drop;
 		rc = qeth_do_send_packet(card, queue, new_skb, hdr,
 					 elements_needed);
-	else
+	} else
 		rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
 					elements_needed, data_offset, 0);
 
@@ -3103,7 +3104,7 @@
 {
 	struct qeth_card *card = dev->ml_priv;
 
-	QETH_DBF_TEXT(TRACE, 4, "qethopen");
+	QETH_CARD_TEXT(card, 4, "qethopen");
 	if (card->state != CARD_STATE_SOFTSETUP)
 		return -ENODEV;
 	card->data.state = CH_STATE_UP;
@@ -3119,7 +3120,7 @@
 {
 	struct qeth_card *card = dev->ml_priv;
 
-	QETH_DBF_TEXT(TRACE, 4, "qethstop");
+	QETH_CARD_TEXT(card, 4, "qethstop");
 	netif_tx_disable(dev);
 	if (card->state == CARD_STATE_UP)
 		card->state = CARD_STATE_SOFTSETUP;
@@ -3312,11 +3313,10 @@
 		card->perf_stats.inbound_start_time = qeth_get_micros();
 	}
 	if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
-		QETH_DBF_TEXT(TRACE, 1, "qdinchk");
-		QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
-		QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
+		QETH_CARD_TEXT(card, 1, "qdinchk");
+		QETH_CARD_TEXT_(card, 1, "%04X%04X",
 				first_element, count);
-		QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+		QETH_CARD_TEXT_(card, 1, "%04X", queue);
 		qeth_schedule_recovery(card);
 		return;
 	}
@@ -3522,11 +3522,11 @@
 	int rc = 0;
 
 	card = (struct qeth_card *) ptr;
-	QETH_DBF_TEXT(TRACE, 2, "recover1");
-	QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+	QETH_CARD_TEXT(card, 2, "recover1");
+	QETH_CARD_HEX(card, 2, &card, sizeof(void *));
 	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
 		return 0;
-	QETH_DBF_TEXT(TRACE, 2, "recover2");
+	QETH_CARD_TEXT(card, 2, "recover2");
 	dev_warn(&card->gdev->dev,
 		"A recovery process has been started for the device\n");
 	card->use_hard_stop = 1;
@@ -3624,8 +3624,8 @@
 	if (dev_net(dev) != &init_net)
 		return NOTIFY_DONE;
 
-	QETH_DBF_TEXT(TRACE, 3, "ipevent");
 	card = qeth_l3_get_card_from_dev(dev);
+	QETH_CARD_TEXT(card, 3, "ipevent");
 	if (!card)
 		return NOTIFY_DONE;
 
@@ -3671,11 +3671,11 @@
 	struct qeth_ipaddr *addr;
 	struct qeth_card *card;
 
-	QETH_DBF_TEXT(TRACE, 3, "ip6event");
 
 	card = qeth_l3_get_card_from_dev(dev);
 	if (!card)
 		return NOTIFY_DONE;
+	QETH_CARD_TEXT(card, 3, "ip6event");
 	if (!qeth_is_supported(card, IPA_IPV6))
 		return NOTIFY_DONE;
 
@@ -3714,7 +3714,7 @@
 {
 	int rc;
 
-	QETH_DBF_TEXT(TRACE, 5, "regnotif");
+	QETH_DBF_TEXT(SETUP, 5, "regnotif");
 	rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
 	if (rc)
 		return rc;
@@ -3733,7 +3733,7 @@
 static void qeth_l3_unregister_notifiers(void)
 {
 
-	QETH_DBF_TEXT(TRACE, 5, "unregnot");
+	QETH_DBF_TEXT(SETUP, 5, "unregnot");
 	BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
 #ifdef CONFIG_QETH_IPV6
 	BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 7049127..65e1cf1 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -47,6 +47,7 @@
 
 static DEFINE_SPINLOCK(smsg_list_lock);
 static LIST_HEAD(smsg_list);
+static int iucv_path_connected;
 
 static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
 static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
@@ -142,8 +143,10 @@
 #ifdef CONFIG_PM_DEBUG
 	printk(KERN_WARNING "smsg_pm_freeze\n");
 #endif
-	if (smsg_path)
+	if (smsg_path && iucv_path_connected) {
 		iucv_path_sever(smsg_path, NULL);
+		iucv_path_connected = 0;
+	}
 	return 0;
 }
 
@@ -154,7 +157,7 @@
 #ifdef CONFIG_PM_DEBUG
 	printk(KERN_WARNING "smsg_pm_restore_thaw\n");
 #endif
-	if (smsg_path) {
+	if (smsg_path && iucv_path_connected) {
 		memset(smsg_path, 0, sizeof(*smsg_path));
 		smsg_path->msglim = 255;
 		smsg_path->flags = 0;
@@ -165,6 +168,8 @@
 			printk(KERN_ERR
 			       "iucv_path_connect returned with rc %i\n", rc);
 #endif
+		if (!rc)
+			iucv_path_connected = 1;
 		cpcmd("SET SMSG IUCV", NULL, 0, NULL);
 	}
 	return 0;
@@ -214,6 +219,8 @@
 			       NULL, NULL, NULL);
 	if (rc)
 		goto out_free_path;
+	else
+		iucv_path_connected = 1;
 	smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
 	if (!smsg_dev) {
 		rc = -ENOMEM;
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index a175be9..3b6a06e 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1587,7 +1587,7 @@
 
 	err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
 	if (!err)
-		return (&rt->u.dst)->dev;
+		return (&rt->dst)->dev;
 
 	return NULL;
 }
@@ -1649,7 +1649,7 @@
 		c3cn->saddr.sin_addr.s_addr = rt->rt_src;
 
 	/* now commit destination to connection */
-	c3cn->dst_cache = &rt->u.dst;
+	c3cn->dst_cache = &rt->dst;
 
 	/* try to establish an offloaded connection */
 	dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 44a0759..1a429ed 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2653,6 +2653,7 @@
 	u32 lfc, vlfc, mdac;
 	struct fcoe_dev_stats *devst;
 	struct fcoe_fc_els_lesb *lesb;
+	struct rtnl_link_stats64 temp;
 	struct net_device *netdev = fcoe_netdev(lport);
 
 	lfc = 0;
@@ -2669,7 +2670,7 @@
 	lesb->lesb_link_fail = htonl(lfc);
 	lesb->lesb_vlink_fail = htonl(vlfc);
 	lesb->lesb_miss_fka = htonl(mdac);
-	lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors);
+	lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
 }
 
 /**
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 59ae76b..7c031fd 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -209,6 +209,24 @@
 	}
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
+static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
+{
+	struct ssb_bus *bus = cc->dev->bus;
+
+	switch (bus->chip_id) {
+	case 0x4312:
+	case 0x4322:
+	case 0x4328:
+		return 7000;
+	case 0x4325:
+		/* TODO: */
+	default:
+		return 15000;
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
 static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
 {
 	struct ssb_bus *bus = cc->dev->bus;
@@ -218,6 +236,12 @@
 
 	if (bus->bustype != SSB_BUSTYPE_PCI)
 		return;
+
+	if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
+		cc->fast_pwrup_delay = pmu_fast_powerup_delay(cc);
+		return;
+	}
+
 	if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
 		return;
 
@@ -235,6 +259,7 @@
 		return; /* We don't have a ChipCommon */
 	if (cc->dev->id.revision >= 11)
 		cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
+	ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
 	ssb_pmu_init(cc);
 	chipco_powercontrol_init(cc);
 	ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 3d55124..5732bb2 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -502,9 +502,9 @@
 		chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
 void ssb_pmu_init(struct ssb_chipcommon *cc)
 {
-	struct ssb_bus *bus = cc->dev->bus;
 	u32 pmucap;
 
 	if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
@@ -516,15 +516,12 @@
 	ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
 		    cc->pmu.rev, pmucap);
 
-	if (cc->pmu.rev >= 1) {
-		if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
-			chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
-				      ~SSB_CHIPCO_PMU_CTL_NOILPONW);
-		} else {
-			chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
-				     SSB_CHIPCO_PMU_CTL_NOILPONW);
-		}
-	}
+	if (cc->pmu.rev == 1)
+		chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
+			      ~SSB_CHIPCO_PMU_CTL_NOILPONW);
+	else
+		chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
+			     SSB_CHIPCO_PMU_CTL_NOILPONW);
 	ssb_pmu_pll_init(cc);
 	ssb_pmu_resources_init(cc);
 }
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 51275aa..7cee7f4 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -486,6 +486,7 @@
 #ifdef CONFIG_SSB_PCIHOST
 			sdev->irq = bus->host_pci->irq;
 			dev->parent = &bus->host_pci->dev;
+			sdev->dma_dev = dev->parent;
 #endif
 			break;
 		case SSB_BUSTYPE_PCMCIA:
@@ -501,6 +502,7 @@
 			break;
 		case SSB_BUSTYPE_SSB:
 			dev->dma_mask = &dev->coherent_dma_mask;
+			sdev->dma_dev = dev;
 			break;
 		}
 
@@ -1226,80 +1228,6 @@
 }
 EXPORT_SYMBOL(ssb_dma_translation);
 
-int ssb_dma_set_mask(struct ssb_device *dev, u64 mask)
-{
-#ifdef CONFIG_SSB_PCIHOST
-	int err;
-#endif
-
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		err = pci_set_dma_mask(dev->bus->host_pci, mask);
-		if (err)
-			return err;
-		err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask);
-		return err;
-#endif
-	case SSB_BUSTYPE_SSB:
-		return dma_set_mask(dev->dev, mask);
-	default:
-		__ssb_dma_not_implemented(dev);
-	}
-	return -ENOSYS;
-}
-EXPORT_SYMBOL(ssb_dma_set_mask);
-
-void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t gfp_flags)
-{
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		if (gfp_flags & GFP_DMA) {
-			/* Workaround: The PCI API does not support passing
-			 * a GFP flag. */
-			return dma_alloc_coherent(&dev->bus->host_pci->dev,
-						  size, dma_handle, gfp_flags);
-		}
-		return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle);
-#endif
-	case SSB_BUSTYPE_SSB:
-		return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags);
-	default:
-		__ssb_dma_not_implemented(dev);
-	}
-	return NULL;
-}
-EXPORT_SYMBOL(ssb_dma_alloc_consistent);
-
-void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
-			     void *vaddr, dma_addr_t dma_handle,
-			     gfp_t gfp_flags)
-{
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		if (gfp_flags & GFP_DMA) {
-			/* Workaround: The PCI API does not support passing
-			 * a GFP flag. */
-			dma_free_coherent(&dev->bus->host_pci->dev,
-					  size, vaddr, dma_handle);
-			return;
-		}
-		pci_free_consistent(dev->bus->host_pci, size,
-				    vaddr, dma_handle);
-		return;
-#endif
-	case SSB_BUSTYPE_SSB:
-		dma_free_coherent(dev->dev, size, vaddr, dma_handle);
-		return;
-	default:
-		__ssb_dma_not_implemented(dev);
-	}
-}
-EXPORT_SYMBOL(ssb_dma_free_consistent);
-
 int ssb_bus_may_powerdown(struct ssb_bus *bus)
 {
 	struct ssb_chipcommon *cc;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 6dcda86..6e88d2b 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -626,11 +626,22 @@
 		return -ENODEV;
 	}
 	if (bus->chipco.dev) {	/* can be unavailible! */
-		bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
-			SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
+		/*
+		 * get SPROM offset: SSB_SPROM_BASE1 except for
+		 * chipcommon rev >= 31 or chip ID is 0x4312 and
+		 * chipcommon status & 3 == 2
+		 */
+		if (bus->chipco.dev->id.revision >= 31)
+			bus->sprom_offset = SSB_SPROM_BASE31;
+		else if (bus->chip_id == 0x4312 &&
+			 (bus->chipco.status & 0x03) == 2)
+			bus->sprom_offset = SSB_SPROM_BASE31;
+		else
+			bus->sprom_offset = SSB_SPROM_BASE1;
 	} else {
 		bus->sprom_offset = SSB_SPROM_BASE1;
 	}
+	ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
 
 	buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
 	if (!buf)
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 7a582e8..96c86c8 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -71,7 +71,7 @@
 #endif
 
 	/* Device is being bridged */
-	/* if (net_dev->br_port != NULL)
+	/* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
 		return 0; */
 
 	return 1;
@@ -440,6 +440,7 @@
 	struct batman_packet *batman_packet;
 	struct batman_if *batman_if;
 	struct net_device_stats *stats;
+	struct rtnl_link_stats64 temp;
 	int ret;
 
 	skb = skb_share_check(skb, GFP_ATOMIC);
@@ -468,7 +469,7 @@
 	if (batman_if->if_status != IF_ACTIVE)
 		goto err_free;
 
-	stats = (struct net_device_stats *)dev_get_stats(skb->dev);
+	stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp);
 	if (stats) {
 		stats->rx_packets++;
 		stats->rx_bytes += skb->len;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 5c0d06c..020fa5a 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -171,7 +171,8 @@
 	int			i, count;
 	rndis_query_cmplt_type	*resp;
 	struct net_device	*net;
-	const struct net_device_stats	*stats;
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *stats;
 
 	if (!r) return -ENOMEM;
 	resp = (rndis_query_cmplt_type *) r->buf;
@@ -194,7 +195,7 @@
 	resp->InformationBufferOffset = cpu_to_le32 (16);
 
 	net = rndis_per_dev_params[configNr].dev;
-	stats = dev_get_stats(net);
+	stats = dev_get_stats(net, &temp);
 
 	switch (OID) {
 
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2406377..107af9e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -527,13 +527,12 @@
 
 	/* start polling new socket */
 	oldsock = vq->private_data;
-	if (sock == oldsock)
-		goto done;
+	if (sock != oldsock){
+                vhost_net_disable_vq(n, vq);
+                rcu_assign_pointer(vq->private_data, sock);
+                vhost_net_enable_vq(n, vq);
+	}
 
-	vhost_net_disable_vq(n, vq);
-	rcu_assign_pointer(vq->private_data, sock);
-	vhost_net_enable_vq(n, vq);
-done:
 	if (oldsock) {
 		vhost_net_flush_vq(n, index);
 		fput(oldsock->file);
@@ -634,7 +633,7 @@
 }
 #endif
 
-const static struct file_operations vhost_net_fops = {
+static const struct file_operations vhost_net_fops = {
 	.owner          = THIS_MODULE,
 	.release        = vhost_net_release,
 	.unlocked_ioctl = vhost_net_ioctl,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 0b99783..248ed2d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -237,8 +237,8 @@
 {
 	int i;
 
-        if (!mem)
-                return 0;
+	if (!mem)
+		return 0;
 
 	for (i = 0; i < mem->nregions; ++i) {
 		struct vhost_memory_region *m = mem->regions + i;
diff --git a/firmware/Makefile b/firmware/Makefile
index 243409f..6e0dd3e 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -42,7 +42,7 @@
 fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin
 fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \
 				   cxgb3/t3c_psram-1.1.0.bin \
-				   cxgb3/t3fw-7.4.0.bin \
+				   cxgb3/t3fw-7.10.0.bin \
 				   cxgb3/ael2005_opt_edc.bin \
 				   cxgb3/ael2005_twx_edc.bin \
 				   cxgb3/ael2020_twx_edc.bin
diff --git a/firmware/cxgb3/t3fw-7.10.0.bin.ihex b/firmware/cxgb3/t3fw-7.10.0.bin.ihex
new file mode 100644
index 0000000..96399d8
--- /dev/null
+++ b/firmware/cxgb3/t3fw-7.10.0.bin.ihex
@@ -0,0 +1,1935 @@
+:1000000060007400200380002003700000001000D6
+:1000100000002000E100028400070000E1000288E7
+:1000200000010000E0000000E00000A0010000006E
+:1000300044444440E3000183200200002001E0002A
+:100040002001FF101FFFD0001FFFC000E300043C91
+:100050000200000020006C841FFFC2A020006CCCB6
+:100060001FFFC2A420006D0C1FFFC2A820006D80DE
+:100070001FFFC2AC200003C0C00000E43100EA3121
+:1000800000A13100A03103020002ED306E2A05000C
+:10009000ED3100020002160012FFDBC03014FFDA5F
+:1000A000D30FD30FD30F03431F244C107249F0D347
+:1000B0000FD30FD30F12FFD5230A00240A00D30F4A
+:1000C000D30FD30F03431F244C107249F0D30FD327
+:1000D0000FD30F14FFCE03421F14FFCB03421F1296
+:1000E000FFCCC0302D37302D37342D37382D373CED
+:1000F000233D017233ED00020012FFC4C0302F37E0
+:10010000002F37102F37202F3730233D017233ED6A
+:1001100000020012FFBEC0302737002737102737F4
+:1001200020273730233D017233ED03020012FFB95F
+:1001300013FFBA0C0200932012FFB913FFB90C028F
+:1001400000932012FFB8C0319320822012FFB71312
+:10015000FFB7932012FFB715FFB316FFB6C030D715
+:100160002005660160001B00000000000000000088
+:10017000043605000200D30FD30F05330C6E3B1479
+:100180000747140704437631E604360505330C6F40
+:100190003BED00020012FFA615FFA3230A00D720A3
+:1001A000070443043E0505330C0747146F3BF00377
+:1001B000020012FFA1C03014FFA1D30FD30FD30F41
+:1001C0009340B4447249F2D30FD30FD30F14FF9B63
+:1001D000834014FF9B834012FF9B230A0014FF9A65
+:1001E000D30FD30FD30F9340B4447249F2D30FD33C
+:1001F0000FD30F14FF95834012FF95C92F832084DE
+:10020000218522BC22743B0F8650B4559630B433FE
+:100210007433F463FFE60000653FE1655FDE12FFC3
+:100220007C230A0028374028374428374828374C91
+:10023000233D017233ED03020000020012FF7AC079
+:1002400032032E0503020012FF7813FF819320C0B2
+:1002500011014931004831010200C00014FF7E0441
+:10026000D23115FF7D945014FF7D04D33115FF7CEE
+:10027000945014FF7C04D43115FF7C24560014FFE5
+:100280007B04D53115FF7B24560010FF7A03000054
+:10029000000000000000000000000000000000005E
+:1002A000000000000000000000000000000000004E
+:1002B000000000000000000000000000000000003E
+:1002C000000000000000000000000000000000002E
+:1002D000000000000000000000000000000000001E
+:1002E000000000000000000000000000000000000E
+:1002F00000000000000000000000000000000000FE
+:1003000000000000000000000000000000000000ED
+:1003100000000000000000000000000000000000DD
+:1003200000000000000000000000000000000000CD
+:1003300000000000000000000000000000000000BD
+:1003400000000000000000000000000000000000AD
+:10035000000000000000000000000000000000009D
+:10036000000000000000000000000000000000008D
+:10037000000000000000000000000000000000007D
+:10038000000000000000000000000000000000006D
+:10039000000000000000000000000000000000005D
+:1003A000000000000000000000000000000000004D
+:1003B000000000000000000000000000000000003D
+:1003C000000000000000000000000000000000002D
+:1003D000000000000000000000000000000000001D
+:1003E000000000000000000000000000000000000D
+:1003F00000000000000000000000000000000000FD
+:1004000000000000000000000000000000000000EC
+:1004100000000000000000000000000000000000DC
+:1004200063FFFC000000000000000000000000006E
+:100430000000000000000000000000001FFC0000A1
+:100440001FFC0000E30005C81FFC00001FFC0000AB
+:10045000E30005C81FFC00001FFC0000E30005C806
+:100460001FFFC0001FFFC000E30005C81FFFC00042
+:100470001FFFC018E30005C81FFFC0181FFFC018EA
+:10048000E30005E01FFFC0181FFFC294E30005E072
+:100490001FFFC2941FFFC294E300085C1FFFC2A0AD
+:1004A0001FFFC59CE300085C200000002000016ADB
+:1004B000E3000B582000018020000180E3000CC401
+:1004C0002000020020000203E3000CC42000021CF4
+:1004D00020000220E3000CC8200002202000022699
+:1004E000E3000CCC2000023C20000240E3000CD4CE
+:1004F0002000024020000249E3000CD82000024CFA
+:1005000020000250E3000CE42000025020000259B9
+:10051000E3000CE82000025C20000260E3000CF421
+:100520002000026020000269E3000CF82000026C49
+:1005300020000270E3000D04200002702000027908
+:10054000E3000D082000028C2000028CE3000D1453
+:100550002000029020000293E3000D14200002AC62
+:10056000200002B0E3000D18200002D0200002F2AB
+:10057000E3000D1C200003B0200003B0E3000D4099
+:10058000200003B0200003B0E3000D40200003B0C2
+:10059000200003B0E3000D40200003B0200003B0B2
+:1005A000E3000D40200003B020006EA4E3000D40E6
+:1005B00020006EA420006EA4E30078340000000048
+:1005C00000000000000000001FFC00001FFC0000F5
+:1005D0001FFFC5A01FFFC69020006EA820006EA8B8
+:1005E000DEFFFE000000080CDEADBEEF1FFFC2B054
+:1005F0001FFCFE001FFFC0A41FFFC5D0300000007D
+:10060000003FFFFF8040000010000000080FFFFFC8
+:100610001FFFC27D000FFFFF804FFFFF8000000023
+:1006200000000880B000000560500000600000007D
+:1006300040000011350000004100000010000001E2
+:100640002000000000001000400000000500000035
+:10065000800000190400000000000800E100020012
+:1006600010000005806000007000000020000009FC
+:10067000001FF8008000001EA0000000F80000002D
+:1006800007FFFFFF080000001800000001008001C4
+:10069000420000001FFFC22D1FFFC0EC00010080C0
+:1006A000604000001A0000000C0000001000000A6A
+:1006B000000030000001000080000018FC00000075
+:1006C0008000000100004000600008008000001C65
+:1006D0008000001A030000008000040004030403EB
+:1006E00050000003FFFFBFFF1FFFC3E400000FFF28
+:1006F000FFFFF000000016D00000FFF7A50000008B
+:100700001FFFC4C01FFFC4710001000800000B20C0
+:10071000202FFF801FFFC46500002C00FFFEFFF8A4
+:1007200000FFFFFF1FFFC58800002000FFFFDFFF65
+:100730000000FFEF010011001FFFC3E21FFFC5A073
+:10074000FFFFEFFF0000FFFB1FFFC6501FFFBEB003
+:10075000FFFFF7FF1FFFC0740000FFFD1FFFC64033
+:100760000001FBD01FFFC5C01FFFC6801FFFC5A132
+:10077000E0FFFE001FFFC5B0000080001FFFC54C5A
+:100780001FFFC5C41FFFC0781FFFC4E41FFCFFD8B4
+:10079000000100817FFFFFFFE1000600000027103D
+:1007A0001FFCFE301FFCFE701FFFC5481FFFC56009
+:1007B0000003D0901FFFC5742B5063802B507980AD
+:1007C0002B5090802B50A6801FFFC4790100110F81
+:1007D000202FFE0020300080202FFF000000FFFFB0
+:1007E0000001FFF82B50B2002B50B208000100109E
+:1007F0002B50B1802B50B2802B50BA000001001159
+:100800002B50BD282B50BC802B50BDA020300000A9
+:10081000DFFFFE005000000200C0000002000000E8
+:10082000FFFFF7F41FFFC07C000FF800044000003A
+:10083000001000000C4000001C400000E00000A080
+:100840001FFFC5501FFD00081FFFC5641FFFC578AF
+:100850001FFFC58CE1000690E10006EC00000000DF
+:100860000000000000000000000000000100000087
+:100870000000000000000000000000002010004008
+:10088000201000402010004020140080200C0000A8
+:10089000200C0000200C00002010004020140080DC
+:1008A0002014008020140080201800C0201C0100AB
+:1008B000201C0100201C010020200140201800C045
+:1008C000201800C0201800C0201C0100201800C003
+:1008D000201800C0201800C0201C0100202001406A
+:1008E00020200140202001402020094020200940F4
+:1008F000202009402020094020240980FFFFFFFF1D
+:10090000FFFFFFFFFFFFFFFF0000000000000000EF
+:1009100000000000000000000000000020005588DA
+:1009200020005458200055882000558820005394FA
+:100930002000539420005394200051D4200051D41F
+:10094000200051CC2000513820004FE020004DC045
+:1009500020004B94000000000000000020005558CB
+:1009600020005424200054C8200054C82000527C89
+:100970002000527C2000527C2000527C2000527CBF
+:10098000200051C42000527C20004F0020004D70F8
+:1009900020004B40000000000000000020000BF091
+:1009A00020003ADC200004C02000473020000BE883
+:1009B000200041F4200003F0200046F020004B1CF2
+:1009C00020003F0020003E1C20003A58200038E85C
+:1009D00020003658200031B820003C7820002DD06F
+:1009E0002000286420006828200023F0200020D068
+:1009F0002000207C20001D68200018602000158841
+:100A000020000E5420000C3420001134200013204C
+:100A1000200043EC20003EB420000BF8200004C06E
+:100A200000000000000000000000000000000000C6
+:100A300000000000000000000000000000000000B6
+:100A400000000000000000000000000000000000A6
+:100A50000000000000000000000000000000000096
+:100A60000000000000000000000000000000000086
+:100A70000000000000000000000000000000000076
+:100A80000000000000000000000000000000000066
+:100A90000000000000000000000000000000000056
+:100AA0003264000000000000326400006400640052
+:100AB00064006400640064006400640000000000DE
+:100AC0000000000000000000000000000000000026
+:100AD0000000000000000000000000000000000016
+:100AE0000000000000000000000000000000000006
+:100AF00000000000000000000000000000000000F6
+:100B000000000000000010000000000000000000D5
+:100B100000000000000000000000000000001000C5
+:100B200000000000000000000000000000000000C5
+:100B300000432380000000000000000000000000CF
+:100B400000000000000000000000000000000000A5
+:100B50000000000000000000005C94015D94025E53
+:100B600094035F94004300000000000000000000B8
+:100B70000000000000000000000000000000000075
+:100B80000000000000000000000000000000000065
+:100B90000000000000000000005C90015D90025E1B
+:100BA00090035F9000530000000000000000000070
+:100BB0000000000000000000000000000000000035
+:100BC0000000000000000000000000000000000025
+:100BD0000000000000000000009C94001D90019D9A
+:100BE00094029E94039F94040894050994060A9421
+:100BF000070B94004300000000000000000000000C
+:100C000000000000000000000000000000000000E4
+:100C10000000000000000000009C90019D90029EDA
+:100C200090071D90039F90047890057990067A9024
+:100C3000077B90005300000000000000000000004F
+:100C400000000000000000000000000000000000A4
+:100C5000000000000000000000DC94001D9001DD99
+:100C60009402DE9403DF940404940505940606942C
+:100C70000707940808940909940A0A940B0B940036
+:100C80004300000000000000000000000000000021
+:100C9000000000000000000000DC9001DD9002DE9A
+:100CA000900B1D9003DF9004B49005B59006B690AC
+:100CB00007B79008B89009B9900ABA900BBB90009A
+:100CC0005300000063FFFC0020006C6010FFFF0A6F
+:100CD0000000000020006C8400D23110FFFE0A00EA
+:100CE0000000000020006CCC00D33110FFFE0A0091
+:100CF0000000000020006D0C00D43110FFFE0A003F
+:100D00000000000020006D8000D53110FFFE0A00B9
+:100D10000000000063FFFC00E00000A012FFF7826B
+:100D200020028257C82163FFFC12FFF303E830045E
+:100D3000EE3005C03093209421952263FFFC000023
+:100D40001FFFD000000400201FFFC5A01FFFC6909A
+:100D5000200A0011FFFB13FFFB03E631010200161E
+:100D6000FFFA17FFFAD30F776B069060B4667763CC
+:100D7000F85415F3541AA50F140063FFF90000008E
+:100D80006C1004C020D10F006C1004C0C71AEF060D
+:100D9000D830BC2BD72085720D4211837105450BCD
+:100DA000957202330C2376017B3B04233D0893713B
+:100DB000A32D12EEFE19EEFEA2767D632C2E0A0004
+:100DC000088202280A01038E380E0E42C8EE29A6B8
+:100DD0007E6D4A0500208800308C8271D10FC0F0F2
+:100DE000028F387FC0EA63FFE400C0F1C050037E89
+:100DF0000CA2EE0E3D1208820203F538050542CB27
+:100E00005729A67E2FDC100F4F366DFA050020887B
+:100E100000308CBC75C03008E208280A0105833810
+:100E2000030342C93E29A67E0D480CD30F6D8A05E7
+:100E300000208800B08C8271D10FC05008F5387541
+:100E4000C0C163FFBBC06002863876C0DA63FFD4DE
+:100E50006C101216EED8C1F9C1E8C1C72B221E28AA
+:100E6000221DC0D07B81352920060BB702299CFAB0
+:100E7000655008282072288CFF2824726491642A07
+:100E8000B0000CA80C64816F0EA90C6492BB7FA10A
+:100E90003FC1CE7CA13669AC336000370029200603
+:100EA000D7D0299CFACC57282072288CFF2824728E
+:100EB0006491392AD0000CA80C6481680EA90C64D6
+:100EC000931F7FA10BC1CE7CA10268AC06C020D1CC
+:100ED0000F2D25028A32C0900A6E5065E5B529248F
+:100EE00067090F4765F5B12C200C1FEEB30CCE112E
+:100EF000AFEE29E286B44879830260058219EEAF2D
+:100F000009C90A2992A36890078F2009FF0C65F58B
+:100F10006E2FE28564F56865559628221D7B810554
+:100F2000D9B060000200C0908B9417EEA50B881416
+:100F300087740B0B47A87718EEA309BB100877023C
+:100F400097F018EEA117EEA208A8010B8802074738
+:100F5000021BEE9E97F10B880298F22790232B90AC
+:100F60002204781006BB1007471208BB0228902104
+:100F70000777100C88100788020B880217EE968BF3
+:100F80003307BB0187340B880298F3979997F48B4A
+:100F90009587399BF588968B3898F688979BF897B4
+:100FA000F998F717EE8D28E28507C7082D74CF084A
+:100FB000480B28E68565550F2B221E28221D7B89AC
+:100FC000022B0A0064BF052CB00728B000DA200607
+:100FD000880A28824CC0D10B8000DBA065AFE76394
+:100FE000FEEA0000292072659E946004E72A2072C0
+:100FF00065AEBF6004DE00002EB0032C2067D4E095
+:1010000065C1058A328C330AFF500C4554BC5564C7
+:10101000F4EB19EE72882A09A90109880C64821F71
+:10102000C0926000DD2ED0032A2067D4E065A0D8EE
+:101030008A328B330AFC500B4554BC5564C4BE192C
+:10104000EE67882A09A9017989D50BEA5064A4E3DF
+:101050000CEE11C0F02F16132E16168AE78CE82A14
+:1010600016128EE9DFC0AAEA7EAB01B1CF0BA85001
+:101070006583468837DBC0AE89991E789B022BCCEE
+:10108000012B161B29120E2B0A0029161A7FC307E3
+:101090007FC9027EAB01C0B165B49D8B352F0A00BC
+:1010A0002A0A007AC30564C3CB2F0A0165F4892B91
+:1010B00012162B1619005104C0C100CC1A2CCCFFFB
+:1010C0002C16170CFC132C16182B121A2A121BDCC8
+:1010D000505819B6C0D0C0902E5CF42C12172812AC
+:1010E000182F121B2A121A08FF010CAA01883407B4
+:1010F0004C0AAB8B2812192BC6162F86082A860994
+:101100002E74102924672E70038975B1EA2A74039E
+:10111000B09909490C659DB42B20672D250265B354
+:10112000FA2B221E2C221D7BC901C0B064BD9D2C50
+:10113000B00728B000DA2006880A28824CC0D10BFC
+:101140008000DBA065AFE763FD8289BAB199659045
+:101150009788341CEE2398BA8F331EEE1C0F4F5421
+:101160002FB42C8D2A8A320EDD020CAC017DC966AB
+:101170000A49516F92608A3375A65B2CB0130AED51
+:10118000510DCD010D0D410C0C417DC9492EB01200
+:10119000B0EE65E3C6C0D08E378CB88A368FB97C86
+:1011A000A3077AC9027EFB01C0D1CED988350AAD2A
+:1011B000020E8E0878EB022DAC0189B7DAC0AF9B26
+:1011C00079BB01B1CADCB0C0B07DA3077AD9027C7B
+:1011D000EB01C0B164B161C091292467C020D10F77
+:1011E00000008ADAB1AA64A0C02C20672D25026510
+:1011F000C3111DEDF68A321EEDFB0DAD010EDD0CA7
+:1012000065D28A0A4E516FE202600281C0902924A1
+:1012100067090F4765F2F828221D7B89022B0A0017
+:1012200064BCA92CB00728B000DA2006880A2882FE
+:101230004CC0D10B8000DBA065AFE763FC8E0000E3
+:101240000CE9506492ED0CEF11C080281611AFBF6D
+:101250002F16198EF88BF7DAE08FF92B1610ABFBEF
+:101260007FBB01B1EA0CA8506580D68837DCE0AFBF
+:1012700089991C789B022CEC012C161B29120C2C32
+:101280000A0029161A7AE3077AE9027FBB01C0C176
+:1012900065C2A58B352C0A002A0A007AE30564E1B1
+:1012A000CA2C0A0164CE0D60028E88341BEDCD98E5
+:1012B000DA8F331EEDC60F4F542FD42C8C2A8A326E
+:1012C0000ECC020BAB010CBB0C65BF0A0A49516E78
+:1012D000920263FF018A330AAB5064BEF92CD0132B
+:1012E0000AEE510ECE010E0E410C0C410ECC0C65D7
+:1012F000CEE42FD012B0FF65F26EC0B08E378CD81E
+:101300008A362FD2097CA3077AC9027EFB01C0B1BD
+:1013100065BEC38835DBA0AE8E78EB01B1AB89D753
+:10132000DAC0AF9D79DB01B1CAC0C07BA3077AB92F
+:10133000027DEB01C0C165CE9DC090292467C0200D
+:10134000D10F88378C3698140CE90C29161408F83C
+:101350000C981D78FB07281214B088281614891DD4
+:101360009F159B16C0F02B121429161A2B161B8BD7
+:10137000147AE30B7AE90688158E1678EB01C0F132
+:1013800065F1BA29121A2F12118A352E121B9A1AD8
+:10139000AFEE2F1210C0A0AF9F79FB01B1EE9F11ED
+:1013A000881AC0F098107AE30A7EA9052A12017AF9
+:1013B0008B01C0F164F08160018389368B37991706
+:1013C0000BE80C981F09C90C29161578EB07281291
+:1013D00015B088281615D9C09A199E188A1F2E1282
+:1013E000152A161A2E161BDAC0C0E08C177F930B35
+:1013F0007FA90688188F1978FB01C0E165E13E29B5
+:10140000121A2F12138A352E121B9A1BAFEE2F12AF
+:1014100012C0A0AF9F79FB01B1EE9F13881BC0F0F3
+:1014200098127AE30A7EA9052A12037A8B01C0F189
+:1014300065F10A2E12162E16192A121B005104C02D
+:10144000E100EE1AB0EE2E16170EFF132F16180F2E
+:10145000CC01ACAA2F121A0EBC01ACFC7FCB01B19F
+:10146000AA2A161B2C161A63FC5E00007FB30263C7
+:10147000FE3163FE2B7EB30263FC3063FC2A000066
+:101480006450C0DA20DBC058168AC020D10FC0914A
+:1014900063FD7A00C09163FA44DA20DB70C0D12E7C
+:1014A0000A80C09A2924682C7007581575D2A0D1DB
+:1014B0000F03470B18ED4DDB70A8287873022B7DC6
+:1014C000F8D9B063FA6100002A2C74DB40580EEEA4
+:1014D00063FAE4000029221D2D25027B9901C0B08A
+:1014E000C9B62CB00728B000DA2006880A28824C3A
+:1014F000C0D10B8000DBA065AFE7C020D10FC09149
+:1015000063FBFF00022A0258024C0AA202060000F6
+:10151000022A025802490AA202060000DB70DA2001
+:10152000C0D12E0A80C09E2924682C7007581554FB
+:10153000C020D10FC09463FBC9C09663FBC4C096A2
+:1015400063FBBF002A2C74DB30DC405BFE0FDBA0AA
+:10155000C2A02AB4002C200C63FF27008D358CB765
+:101560007DCB0263FDD263FC6D8F358ED77FEB029E
+:1015700063FDC563FC6000006C1004C020D10F0047
+:101580006C1004C020D10F006C10042B221E2822E6
+:101590001DC0A0C0942924062A25027B8901DBA056
+:1015A000C9B913ED04DA2028B0002CB00703880A6B
+:1015B00028824CC0D10B8000DBA065AFE7C020D1F2
+:1015C0000F0000006C10042C20062A210268C805B8
+:1015D00028CCF965812E0A094C6591048F30C1B879
+:1015E0000F8F147FB00528212365812716ECF3297E
+:1015F000629E6F98026000F819ECEF2992266890BD
+:10160000078A2009AA0C65A0E72A629D64A0E12B45
+:10161000200C0CB911A6992D92866FD9026000DBBF
+:101620001DECE70DBD0A2DD2A368D0078E200DEE6C
+:101630000C65E0C7279285C0E06470BF1DECEC68C4
+:10164000434E1CECEB8A2B0CAA029A708920089955
+:10165000110D99029971882A98748F329F752821EB
+:1016600004088811987718ECDC0CBF11A6FF2DF246
+:1016700085A8B82E84CF2DDC282DF685C85A2A2CB3
+:1016800074DB40580E81D2A0D10FC020D10F0000D2
+:101690000029CCF96490B12C20668931B1CC0C0CB6
+:1016A000472C24666EC60260008509F85065807F6D
+:1016B0001CECD18A2B0F08400B881008AA020CAA38
+:1016C000029A7089200899110D99029971883398AE
+:1016D000738C329C728A2A9A748934997563FF7D5F
+:1016E00000CC57DA20DB30DC4058155FC020D10F2A
+:1016F00000DA20C0B65815EE63FFE500DA20581571
+:10170000EC63FFDC00DA20DB30DC40DD5058167A79
+:10171000D2A0D10FC858DA20DB305814C72A2102D2
+:1017200065AFBDC09409A90229250263FFB200007C
+:101730002B21045814731DECADC0E02E24668F30AD
+:101740002B200C0F8F1463FF66292138C088798302
+:101750001F8C310CFC5064CF562B2104C0C0581490
+:10176000681DECA2C0E08F302B200C0F8F1463FF9C
+:101770003E2C20662B2104B1CC0C0C472C2466583F
+:1017800014601DEC9AC0E02E24668F302B200C0FC5
+:101790008F1463FF1A0000006C1004C0B7C0A116BC
+:1017A000EC9615EC88D720D840B822C04005350209
+:1017B0009671957002A438040442C94B1AEC7B1947
+:1017C000EC7C29A67EC140D30F6D4A0500808800BD
+:1017D000208C220A88A272D10FC05008A53875B09B
+:1017E000E363FFD76C10069313941129200665520A
+:1017F00088C0716898052A9CF965A29816EC6F2933
+:1018000021028A1309094C6590CD8AA00A6A512ADF
+:10181000ACFD65A0C2CC5FDB30DA208C115815120C
+:10182000C0519A13C7BF9BA98E132EE20968E060CE
+:101830002F629E1DEC606FF8026000842DD2266836
+:10184000D0052F22007DF9782C629DC79064C0706E
+:101850009C108A132B200C2AA0200CBD11A6DD0A97
+:101860004F14BFA809880129D286AF88288C09792E
+:101870008B591FEC520FBF0A2FF2A368F0052822E4
+:10188000007F894729D285D4906590756000430018
+:10189000002B200C1FEC4A0CBD11A6DD29D2860FAF
+:1018A000BF0A6E96102FF2A368F00488207F890586
+:1018B00029D285659165DA2058157DC95C6001FFE4
+:1018C00000DA20C0B658157A60000C00C09063FFA3
+:1018D000B50000DA205815766551E48D138C11DBC4
+:1018E000D08DD0022A020D6D515813E39A1364A1D2
+:1018F000CEC75F8FA195A9C0510F0F479F1163FEFF
+:10190000FD00C091C0F12820062C2066288CF9A784
+:10191000CC0C0C472C24666FC6098D138DD170DE5C
+:1019200002290A00099D02648159C9D38A102B211A
+:10193000045813F38A13C0B02B24662EA2092AA0E0
+:10194000200E28141CEC298D1315EC1DC1700A778C
+:101950003685562DDC28AC2C9C12DED0A8557CD3C5
+:10196000022EDDF8D3E0DA40055B02DC305BFF8A53
+:10197000D4A028200CB455C0D02B0A882F0A800C84
+:101980008C11A6CC29C285AF3FAB9929C6851CEC2A
+:1019900012DEF0AC882D84CF28120229120378F3CE
+:1019A000022EFDF8289020D3E007880CC1700808AB
+:1019B00047289420087736657FAB891313EC10898C
+:1019C00090C0F47797491BEC0EC1CA2821048513F7
+:1019D000099E4006EE11875304881185520E880235
+:1019E0000C88029BA09FA18F2B9DA598A497A795DB
+:1019F000A603FF029FA22C200C1EEBF7AECE0CCC50
+:101A00001106CC082BC2852DE4CF2BBC202BC6851C
+:101A10002A2C748B11580D9CD2A0D10F28203DC0C8
+:101A2000E07C877F2E24670E0A4765A07B1AEBF5C2
+:101A300088201EEBE38F138EE48FF40888110A8848
+:101A4000020F8F14AFEE1FEBF098910FEE029E90F5
+:101A50001EEBEFC0801AEBE02CD285AABAB8CC28D6
+:101A6000A4CF2CD6852C21022F20720ECC02B1FFE0
+:101A70002F24722C2502C020D10F871387700707EF
+:101A80004763FD6E282138C099798B0263FE9ADD89
+:101A9000F063FE9500DA20DB308C11DD505815968E
+:101AA000D2A0D10FC0E163FF7A8B138C11DD50C03F
+:101AB000AA2E0A802A2468DA205813F1D2A0D10F66
+:101AC000C020D10F6C1006292102C0D07597102AB2
+:101AD00032047FA70A8B357FBF052D25020DD90261
+:101AE000090C4C65C18216EBB41EEBB228629EC095
+:101AF000FA78F30260018829E2266890078A2009B3
+:101B0000AA0C65A17A2A629DDFA064A1772B200C24
+:101B10000CBC11A6CC29C286C08C79830260015707
+:101B200019EBA709B90A2992A368900788200988A8
+:101B30000C65814327C2851CEBA964713A89310980
+:101B40008B140CBB016FB11D2C20669F10B1CC0C07
+:101B50000C472C24666EC60260014009FF5065F1F7
+:101B60003A8A102AAC188934C0C47F973C18EBA974
+:101B70001BEBA88F359C719B708B209D7408BB025A
+:101B80009B72C08298751BEBA40F08409B730F8853
+:101B90001198777FF70B2F2102284A0008FF022FA8
+:101BA0002502C0B4600004000000C0B07E97048F1E
+:101BB000362F25227D970488372825217C9736C02B
+:101BC000F1C0900AF9382F3C200909426490861927
+:101BD000EB7618EB7728967E00F08800A08C00F05A
+:101BE0008800A08C00F08800A08C2A629D2DE4A2C1
+:101BF0002AAC182A669D89307797388F338A321835
+:101C0000EB8007BE0B2C2104B4BB04CC1198E0C0C0
+:101C10008498E1882B9DE59AE69FE71AEB78099F67
+:101C20004006FF110FCC020A880298E2C1FC0FCCDB
+:101C3000022CE604C9B82C200C1EEB670CCA11AEAE
+:101C4000CC06AA0829A2852DC4CF09B90B29A685DF
+:101C5000CF5CC020D10FC081C0900F8938C0877978
+:101C6000880263FF7263FF6600CC57DA20DB30DC4A
+:101C7000405813FDC020D10FDA2058148D63FFE8BF
+:101C8000C0A063FE82DA20C0B658148963FFD90071
+:101C9000DB402A2C74580CFCD2A0D10F8A102B21C7
+:101CA000045813171EEB44C0D02D246663FEB10008
+:101CB0006C1006D62019EB3F1EEB4128610217EB92
+:101CC0003E08084C65805F8A300A6A5169A3572B29
+:101CD000729E6EB83F2A922668A0048C607AC9343E
+:101CE0002A729D2C4CFECAAB2B600CB64F0CBD115A
+:101CF000A7DD28D2860EBE0A78FB269C112EE2A311
+:101D00002C160068E0052F62007EF91522D285CFDF
+:101D10002560000D00DA60C0B6581465C85A60012D
+:101D20000F00DA60581462655106DC40DB308D30FC
+:101D3000DA600D6D515812D0D3A064A0F384A1C015
+:101D40005104044763FF6D00C0B02C60668931B157
+:101D5000CC0C0C472C64666FC60270960A2B61048B
+:101D60005812E7C0B02B64666550B42A3C10C0E737
+:101D7000DC20C0D1C0F002DF380F0F4264F09019B0
+:101D8000EB0A18EB0B28967E8D106DDA0500A08803
+:101D900000C08CC0A089301DEB1A77975388328C15
+:101DA000108F3302CE0BC02492E12261049DE00427
+:101DB00022118D6B9BE59FE798E61FEB1009984079
+:101DC0000688110822020FDD02C18D9DE208220261
+:101DD00092E4B4C22E600C1FEB000CE811A7882C13
+:101DE0008285AFEE0C220B2BE4CF228685D2A0D1C8
+:101DF0000F28600CD2A08C1119EAF80C8D11A9885B
+:101E0000A7DD2ED2852B84CF0ECC0B2CD685D10FFF
+:101E1000C0F00ADF387FE80263FF6C63FF600000F8
+:101E20002A6C74C0B2DC20DD405812C5C0B063FF1C
+:101E300063C020D10F0000006C10042920062A2264
+:101E40001EC0392C221D232468C0307AC107DDA0B2
+:101E5000600004000000C0D06E9738C08F2E0A804A
+:101E60002B2014C0962924060EBB022E21022B24FF
+:101E7000147E8004232502DE307AC10EC8ABDBD08D
+:101E8000DA202C0A00580B062E21020E0F4CC8FE39
+:101E90006000690068956528210208084C65805C2F
+:101EA0001AEAC61EEAC42BA29EC09A7B9B5E2BE256
+:101EB0002668B0048C207BC95329A29D1FEAC16407
+:101EC000904A9390C0C31DEAD52B21049D9608BB70
+:101ED000110CBB029B979B911CEAD2C08523E4A204
+:101EE0002BA29D2824068DFA282102B0DD2BBC30C0
+:101EF0002BA69D9DFA0C8802282502C8D2C020D1AD
+:101F00000F8EF912EAC82E2689C020D10FDA20C020
+:101F1000B65813E7C020D10F6C10062A2006941083
+:101F200068A80528ACF965825029210209094C6589
+:101F3000920ACC5FDB30DA208C1058134BC051D39F
+:101F4000A0C7AF9A3AC0D01CEA9D14EAA31EEA9C2F
+:101F50008F3A16EA99B1FB64B13128629E6F88020C
+:101F60006001ED294C332992266890078A2009AA3E
+:101F70000C65A1DC2A629DC08E64A1D42B200C0CC0
+:101F8000B7110677082972867983026001CD0CB9F2
+:101F90000A2992A36890082C220009CC0C65C1BBC9
+:101FA0002772856471B5282006288CF96481E52C98
+:101FB00020668931B1CC0C0C472C24666EC60260B9
+:101FC00001A109F85065819B2A21048CE488361E02
+:101FD000EA7D088914A9CC08084709881019EA92F3
+:101FE0000ECC029C7099718C2A1EEA9008CC020ECD
+:101FF000CC029C722E302C293013283012049910F8
+:102000000688100CEE109F740EAE0209880208EECE
+:10201000029E738C3704AA119C758938C0F4997696
+:102020008839C0C1987718EA828E359C7B9E780EDD
+:102030008E1408EE029E7A8E301CEA7177E73088A3
+:102040003289339C7C9F7D0E9C4006CC118F2B29BE
+:1020500076132D76112876120CAA0218EA68C1C9E7
+:102060000CAA022A761008FF029F7EC0AA60000117
+:10207000C0A6A4BC0CB911A6992892852DC4CF087E
+:10208000A80B289685655100C020D10F2B200C0C81
+:10209000B7110677082A72860CB90A6FA902600187
+:1020A000182992A36890082A220009AA0C65A109A0
+:1020B0002A728564A1032C203D0C2C4064C08C8CBA
+:1020C000350C8C1464C0848FE57CF37F8C360C8CCB
+:1020D0001464C0777CF374283013C0FC78F86CC0AB
+:1020E00090292467090C4765C0D719EA4718EA45C3
+:1020F0008F208C3508FF110C8C1408FF0288E49F98
+:10210000A1AC8C09CC029CA08C369FA30C8C14AC87
+:102110008809880298A218EA3DA4BC2F72852DC4B4
+:10212000CF2FFC102F76852F210229207208FF0265
+:10213000B2992924722F2502C020D10F00CC57DA82
+:1021400020DB308C105812C8C020D10FC09163FF23
+:102150008FDA20C0B658135663FFE100DA20581317
+:102160005463FFD82B21045811E61EEA152B200CCE
+:10217000C0D02D24668F3A63FE4DDA20DB30DC4080
+:10218000DD505813DDD2A0D10F2A2C748B10580BC0
+:10219000BED2A0D10F292138C08879832E8C310C72
+:1021A000FC5064CE222B2104C0C05811D5C0D01ED3
+:1021B000EA048F3A2B200C63FE0DDA2058133C639F
+:1021C000FF7ADA205BFF1CD2A0D10F002C20662BF7
+:1021D0002104B1CC0C0C472C24665811C91EE9F817
+:1021E0002B200CC0D02D24668F3A63FDDA0000004E
+:1021F0006C10089514C061C1B0D9402A203DC04080
+:102200000BAA010A64382A200629160568A8052C9D
+:10221000ACF965C33F1DE9EA6440052F120464F27E
+:10222000A02621021EE9E606064C6562E615E9E2F3
+:102230006440D98A352930039A130A990C6490CCEA
+:102240002C200C8B139C100CCC11A5CC9C112CC2F7
+:1022500086B4BB7CB3026002D78F100EFE0A2EE25A
+:10226000A368E0098620D30F0E660C6562C2881150
+:102270002882856482BA891364905EDA80D9308CB2
+:10228000201EE9E01FE9E11DE9CE8B138DD4D4B007
+:102290007FB718B88A293C10853608C6110E660229
+:1022A0009681058514A5D50F550295800418146DE7
+:1022B0008927889608CB110888140EBB02A8D82954
+:1022C0009C200F88029BA198A088929BA308881449
+:1022D000A8D80F880298A22AAC1019E9CCC0C08FE8
+:1022E000131EE9BD86118D10286285AEDD08FF0B37
+:1022F0002CD4CF2821022F66858B352A207209889D
+:1023000002ABAA2825022A2472C020D10F29529E8E
+:1023100018E9A96F980260020B28822668800829B4
+:10232000220008990C6591FC2A529DC1CE9A126434
+:10233000A1F22B200C2620060CB8110588082D824E
+:10234000860EBE0A7DC3026002052EE2A368E00885
+:102350002F22000EFF0C65F1F6288285D780DE80E3
+:102360006482009816266CF96462012C206688311C
+:102370002CCC010C0C472C24666EC6026001BC08F4
+:10238000FD5065D1B61DE9AB1CE98F19E9962A21EC
+:10239000048B2D2830102F211D0C88100BFB090AEF
+:1023A00088020988020CBB026441529B709D71989F
+:1023B00072C04D8D35D9E064D06ED730DBD0D830C7
+:1023C0007FD714273C10BCE92632168C3996E69C40
+:1023D000E78A37B4382AE6080B131464304A2A8295
+:1023E0001686799A9696978C778A7D9C982B821779
+:1023F0002C7C209A9A2A9C189B99867BB03B298C2E
+:10240000086DB9218BC996A52692162AAC18B899E1
+:102410009BA196A08BC786CD9BA22B921596A49BC1
+:10242000A386CB2CCC2026A605C0346BD4200D3B34
+:102430000C0DD8090E880A7FB705C0909988BC8812
+:10244000C0900B1A126DAA069988998B288C18C017
+:10245000D01BE97A1CE97916E96EB1FF2A211C2309
+:10246000E6130F0F4F26E6122F251D7FA906C0F099
+:10247000C08028251D05F6111AE9678F202BE61567
+:102480002CE6162DE61726E6180AFA022AE6142983
+:102490002006299CF96490F829200C8D14C0801A1C
+:1024A000E94E0C9C11AA99A5CCDA202BC285289460
+:1024B000CF0B4B0B2BC685C0B08C155811BBD2A0CF
+:1024C000D10F8A356FA546D8308BD56DA90C8A8679
+:1024D0000A8A14CBA77AB335288C10C080282467C9
+:1024E000080B4765B10BDA20DB302C12055811DEE2
+:1024F000D3A0C0C1C0D02DA4039C1463FD22863696
+:102500006461059B709D719872C04D63FEA4C0818B
+:1025100063FFC9008814CC87DA20DB308C15581192
+:10252000D2C020D10FDA20C0B658126163FFE40098
+:1025300000DA208B1058125E63FFD8009E178A12B3
+:102540002B21045810EF8E17C09029246663FE34A7
+:10255000C08063FE06DA20DB308C15DD505812E6B1
+:10256000D2A0D10FDA2058125263FFA7002B2138D6
+:10257000C0A87BAB026001048C310CFC5064CE041B
+:102580008A122B2104C0C098175810DD8E1763FDE6
+:10259000F32D21382DDCFF0D0D4F2D253865DEF78D
+:1025A00028206A7F87050826416460A3C09016E949
+:1025B000141CE9232A200723E61BB1AA0CFD0226DE
+:1025C000E61A2B200A29E61D2DE61E0CBB022BE67F
+:1025D0001C8B260A0A472BE6208B282AE53E2BE691
+:1025E000212924072820062A2064688346B44463EE
+:1025F000FEA5DB30DA208C158D142E0A80C08E28C3
+:10260000246858111FD2A0D10F2E7C4819E8ED2A5A
+:1026100032162B76129D712D761328761489960A20
+:102620002A14AA990C9902997069ED71C14663FD4B
+:102630008100000064AFB51DE8E22C20168DD20A9F
+:10264000CC0C00D10400CC1AACBC9C2963FF9D00CB
+:102650002B21046EB81E2C2066B8CC0C0C472C2401
+:1026600066C9C09E178A125810A68E17C0348F20D4
+:10267000C0D02D2466C06826240663FF2E8A122B44
+:1026800021042C20669817B1CC0C0C472C246658DA
+:10269000109C8E178716C0D02D246663FCE68D35FE
+:1026A000C08064D04AD9E0DC30DBE0DF301AE8E5F6
+:1026B000B188B4FF16E8E584C92D9DFF87C82CCCEE
+:1026C0001027D63006460127D6320A440117E8DF24
+:1026D00024D631A74727D63324F21596B794B68D62
+:1026E000C3BCBB9DB58D35299C107D83C22F211D98
+:1026F000C14663FD330000006C1006292006289CAB
+:10270000F86582BF2921022B200C09094C6590E154
+:1027100016E8AA0CBA11A6AA2DA2862C0A127DC30D
+:102720000260028C19E8A609B90A2992A3689007E9
+:102730008C2009CC0C65C27829A2856492722D6226
+:102740009E1AE89C6FD80260026E2AA22629160102
+:1027500068A0082B22000ABB0C65B25C29629DC1EF
+:102760008C6492542A21200A806099102C203CC746
+:10277000EF000F3E010B3EB1BD0FDB390BBB098FE4
+:10278000260DBD112DDC1C0D0D410EDD038E27B174
+:10279000DD0D0D410FEE0C0DBB0B2BBC1C0BB7025E
+:1027A0007EC71C2C21257BCB162D1AFC0CBA0C0DD8
+:1027B000A16000093E01073EB1780987390B770A0D
+:1027C00077EB0260020A2C2123282121B1CC0C0CCA
+:1027D0004F2C25237C8B29B0CD2D2523C855DA20FD
+:1027E000DB30581095292102CC96C0E80E9E022EAF
+:1027F0002502CC57DA20DB30DC4058111BC020D139
+:102800000F2C20668931B1CC0C0C472C24666EC687
+:10281000026001D309FD5065D1CD2F0A012E301180
+:1028200029221464E01128221B090C4400C1040071
+:10283000FA1A0A880228261B2E3010C0A0C0B094B5
+:102840001295131CE85F88302CC022088D147787FE
+:1028500004C0F10CFA38C041C0F225203CC0840805
+:1028600058010F5F010F4B3805354007BB10C0F012
+:10287000084F3808FF100FBB0228ECFEC0F0084FCD
+:1028800038842B0BA8100AFF102A21200F88020B76
+:10289000880208440218E86E8F1108440228212596
+:1028A0000A2A140828140488110A88022A21049488
+:1028B000F08B2004E41008BB1104BB02C04A04BB27
+:1028C000029BF1842A08AB110BEB0294F40A541119
+:1028D0000B44020555100D1B4094F707BB100B5518
+:1028E00002085502C08195F68433C05094F3B19428
+:1028F0008B3295F898F99BF2C080C1BC24261499BC
+:10290000FA9BF598FB853895FC843A94FD8B3B9BAC
+:10291000FE883998FF853525F6108436851324F610
+:10292000118B3784122BF612C0B064C07E893077C9
+:1029300097438D3288332E30108F111CE83109995E
+:10294000400699112CF614C0C42CF6158C2B2DF6CC
+:102950001A28F61B2BF61904A81109880208EE02A2
+:1029600019E827C18008EE0209C90229F6162EF6D9
+:1029700018C09E600001C09A2F200C18E8170CFEAA
+:1029800011A8FFA6EE2DE2852BF4CF0D9D0B2DE6B1
+:1029900085C87F8A268929A7AA9A260A990C090937
+:1029A00048292525655050C020D10F00C09A63FFEB
+:1029B000C6DA2058113F63FE38DA20C0B658113C01
+:1029C00063FE2E0068973C2B9CFD64BE24C020D182
+:1029D0000FDA20DB705810F8C0C0C0D10ADA390A0B
+:1029E000DC3865CDE063FE098A102B2104580FC442
+:1029F000C0B02B246663FE21DB402A2C745809A248
+:102A0000D2A0D10FDA20580FC963FCF76C1004C0B4
+:102A100020D10F006C1004290A801EE80E1FE80E5A
+:102A20001CE7E60C2B11ACBB2C2CFC2DB2850FCC7B
+:102A3000029ED19CD0C051C07013E80A14E8091856
+:102A4000E8072AB285A82804240A234691A986B853
+:102A5000AA2AB685A98827849F25649FD10F0000E4
+:102A60006C100AD630283010292006288CF9648290
+:102A70009B68980B2A9CF965A1B2022A02580FABF9
+:102A800089371BE7CFC89164520E2A21020A0C4CE9
+:102A900065C2588D3019E7C874D7052E212365E229
+:102AA0009E2F929E1AE7C46FF8026002532AA22654
+:102AB00068A0082C22000ACC0C65C2442A929D64AE
+:102AC000A23E9A151FE7BE8D67C1E6C8DD2B6206E0
+:102AD00018E7BC64B0052880217B8B432B200C18A1
+:102AE000E7B60CBC11A8CC29C28679EB460FBE0A0A
+:102AF0002EE2A368E0052F22007EF9372CC2859CC8
+:102B00001864C2332B212F87660B7B360B790C6F31
+:102B10009D266ED2462C203D7BC740CE5560001EC0
+:102B20002A200CC1B28C205811229A1864A2458D1B
+:102B30006763FFCFC0C063FFC5D7B063FFD300C0DA
+:102B4000E06000022E60030EDB0C6EB20EDC700C37
+:102B5000EA11AA6A2AAC20580199D7A0DA20DB70C2
+:102B6000C1C82D21205810BC8C268B279A160CBB6F
+:102B70000C7AB3348F18896399F3886298F28E6562
+:102B80009EF82D60108A189D1768D729C0D09DA97E
+:102B90002C22182B22139CAB9BAA97A58E667E73C2
+:102BA00002600097CF5860001FDA208B1658108201
+:102BB00065A13863FFBDC081C0908F18C0A29AF98B
+:102BC00099FB98FA97F563FFD2DB30DA20DC4058A6
+:102BD0001026C051D6A0C0C02BA0102CA4039B1758
+:102BE0002C1208022A02066B02DF702D60038E177A
+:102BF0009D149E100CDD11C0E0AD6D2DDC20580140
+:102C0000188C148B16ACAC2C64038A268929ABAAC9
+:102C10000A990C9A26886609094829252507880CEF
+:102C200098662F2218A7FF2F261863FE96DA20DB5E
+:102C300030DC40DD50581130D2A0D10FC0302C20F4
+:102C4000668961B1CC0C0C472C24666EC60260000C
+:102C5000D2C03009FD5065D0CA8E6764E0696470E7
+:102C600066DB608C18DF70DA202D60038E170CDDB8
+:102C7000119E10AD6D2DDC201EE7755800F923263E
+:102C800018DA208B16DC402F2213DD50B1FF2F26DF
+:102C900013580FC5D2A0D10F0028203D0848406529
+:102CA0008DE76F953EDA308DB56D990C8CA80C8C44
+:102CB00014CACF7CD32D2AAC10C090292467090DEB
+:102CC0004764DDC5600092002C1208066B022D6C73
+:102CD00020077F028E17DA209E101EE75C58007DC9
+:102CE00063FF9A00C09163FFD1000000655081DA54
+:102CF00020DB60DC40580FDCC020C0F02FA403D1E3
+:102D00000FDA20C0B658106A63FFE000006F95022A
+:102D100063FD6CDA20DB30DC40DD50C4E0580F5836
+:102D2000D2A0D10F8A152B2104580EF52324662832
+:102D30006010981763FF2100DA2058105D63FFAB25
+:102D4000C858DB30DA20580F3C2A210265AF9CC0FE
+:102D50009409A90229250263FF91DB30DC40DD5094
+:102D6000C0A32E0A802A2468DA20580F45D2A0D1A9
+:102D70000FC020D10FDA202B200C58107263FF6B8C
+:102D80006C1004282006C062288CF8658125C0508C
+:102D9000C7DF2B221BC0E12A206B29212300A104BD
+:102DA000B099292523B1AA00EC1A0BC4010A0A44E0
+:102DB0002A246B04E4390DCC030CBB012B261B64C5
+:102DC000406929200C1BE6FC0C9A110BAA082FA2C3
+:102DD000861BE6FA6FF9026000B60B9B0A2BB2A3C2
+:102DE00068B0082C22000BCC0C65C0A42BA2851D5A
+:102DF000E71E64B09B8C2B2421040DCC029CB08870
+:102E000020C0C50888110C880298B1882A0844118E
+:102E100098B48F3494B79FB5C0401EE6EF2DA285BD
+:102E20000E9E0825E4CF2DDC282DA6852921020938
+:102E3000094C68941A689820C9402A210265A00BA1
+:102E40002A221E2B221D7AB10265A079C020D10F43
+:102E50002C212365CFDE6000082E21212D21237E29
+:102E6000DBD52B221E2F221D2525027BF901C0B0A8
+:102E700064BFC413E6D02CB00728B000DA20038862
+:102E80000A28824CC0D10B8000DBA065AFE763FF4E
+:102E9000A62A2C74C0B02C0A02580E2F1CE6F49CF3
+:102EA000A08B2008BB1106BB029BA1893499A263A9
+:102EB000FF790000262468DA20DB30DC40DD505842
+:102EC000108ED2A0D10FDA202B200C580FF9C02081
+:102ED000D10F00006C1006073D14C080DC30DB40D1
+:102EE000DA20C047C02123BC3003283808084277C5
+:102EF0004001B1DD64815A1EE6AC19E6AD29E67EDB
+:102F0000D30F6DDA0500508800308CC0E0C020255A
+:102F1000A03C14E6ABB6D38FC0C0D00F87142440BA
+:102F2000220F8940941077F704C081048238C0F1E1
+:102F30000B2810C044C02204540104FD3802520181
+:102F400002FE3808DD10821C07EE100E6E020EDD48
+:102F500002242CFEC0E004FE380AEE100E88020D9A
+:102F600088028DAB1EE69B08D8020E880298B0C07E
+:102F7000E80428100E5E0184A025A125084411084C
+:102F80004402052514045511043402C0810E8E3903
+:102F900094B18FAA84109FB475660C26A11FC0F24D
+:102FA000062614600009000026A120C0F20626149F
+:102FB0000565020F770107873905E61007781008C5
+:102FC000660206550295B625A1040AE611085811B5
+:102FD00008280208660296B7C060644056649053A1
+:102FE000067E11C0F489C288C30B340B96459847FE
+:102FF000994618E6829F410459110E99021FE680F6
+:10300000020E4708D80298420E99029F40C1E00E76
+:10301000990299442FA00CB4380CF91114E66F1ED4
+:10302000E666A4FFAE992E928526F4CF0E880B2873
+:103030009685D10F2BA00C1FE6601CE6670CBE1115
+:10304000ACBBAFEE2DE28526B4CF0D3D0B2DE68552
+:10305000D10FC08005283878480263FEA263FE962F
+:103060006C1006C0C06570F18830C03008871477D6
+:103070008712C0B0C0A619E652299022C030CC9762
+:10308000C031600003C0B0C0A6C0E0C091C0D4C0D1
+:103090008225203C0B3F109712831CC070085801FA
+:1030A0000D5D01089738C0800B98380777100488A9
+:1030B00010086802087702C0800D98382D3CFE0881
+:1030C00088100D9E388D2B0AEE1008EE0207EE02D6
+:1030D0000CB8100FDD02053B400EDD029D4089203B
+:1030E000043D100899110D99022D210409A9020827
+:1030F000DD119941872A05B9100D3D020ABB110D5A
+:10310000BB02087702974428212587120828140457
+:103110008811071E4007EE100E99027566092621D8
+:103120001F062614600006002621200626140868C3
+:10313000029B47098802984629200CD2C0C0800C07
+:103140009E111BE6251FE61CAB99AFEE2DE28528EC
+:1031500094CF0DAD0B2DE685D10FDD40C0A6C0B0DC
+:103160008E51CAE0B2AAB1BB2DDC108F500E78365A
+:10317000981008770C9FD898D989538F5299119934
+:10318000DB9FDA7E8309B1CC255C10C97763FFCF62
+:1031900088108D1108E70C9751AD8DD7F078DB01C1
+:1031A000B1F79D5397528830C03008871408884083
+:1031B000648ED565BEC963FEBC0000006C1004D7E8
+:1031C00020B03A8820C0308221CAA0742B1E2972F8
+:1031D000046D080FC980C9918575B133A2527A3B3D
+:1031E0000B742B0863FFE900649FECD10FD240D130
+:1031F0000F0000006C100AD6302E3027D950DA406C
+:1032000015E5F02430269A1529160464E00264932B
+:10321000732920062A9CF865A3CE2A2102270A04D6
+:103220000A0B4C65B3978C3074C7052D212365D4E8
+:10323000A0C0A62B0A032C2200580F3664A3B9178E
+:10324000E5DE8E389A1664E3BA2F6027285021C92C
+:10325000F37E8311C2B08C202A200C580F55D7A0C2
+:10326000CDA16004A200C2B08C202A200C580F29E6
+:10327000D7A064A4862F212E8B680FBF360FB90C00
+:103280006F9D54296027D5B06E920528203D7B8F15
+:103290004CDA20DB50C1C42D211F580EEF8B269A2B
+:1032A000189A1989272AAC380B990C7A9353896399
+:1032B000C08099738F6298789F728E659E798D67B2
+:1032C0009D7B8C6695759C7A8E687E53026000B1FA
+:1032D0008B1465B050600038DBF063FFA5008A14E2
+:1032E000C9A92E60030E9B0C6EB2A5DC500CEA112E
+:1032F000AA6A2AAC285BFFB1D5A063FF93C0E06344
+:10330000FFE2DA208B18580EAC65A2B163FF9E0075
+:1033100000DA20DB308C15580E54D6A0C0C0C0D1C6
+:103320002D16042CA403DC70DA20DB60DF502D6046
+:1033300003C0E09E109D171EE5B90CDD110D6D0850
+:103340002DDC285BFF478E668F678817AF5FA8A8C4
+:1033500028640375FB01B1EE8A189E669F67892673
+:103360008829AA9909880C99268E6808084805EECC
+:103370000C28252515E5939E6865EECC63FEE600D6
+:103380000000C9432F21232B21212FFC010F0F4FB8
+:103390002F25237FBB026003142C20668961B1CCEA
+:1033A0000C0C472C24666EC60260022809FD50658D
+:1033B000D22264E1B62E602764E1B0DC70DF50DA1F
+:1033C00020DB601EE5AB2D6003C08098100CDD1182
+:1033D000AD6D2DDC285BFF22644181C0442B0A00C7
+:1033E0008C202A200C580ECB0AA70265A00FC0B073
+:1033F0002C22002A200C580EC7D7A064AFEFDA2089
+:10340000C1BCC1C82D21208F188E268929AFEE9E00
+:10341000260E990C090948292525580E8FC090C001
+:1034200050C0C288609A191EE566C0A12EE022082D
+:103430008F14778704C0810E8938C0800B93102DBC
+:10344000203C2921200CDC0104DB010929140BA8F4
+:10345000380CA5380D3D401CE57E8B2B08881007E5
+:1034600055100855020533022821250F154003BBCE
+:10347000020CBB0207551005D3100828140ADD11F1
+:103480000488110988020533022921040833029BAC
+:1034900070C0808A201BE57708AA110BAA029A71D6
+:1034A000C0A1852A9376957408931103DD020ADD85
+:1034B000029D778C63C1DC9C738B6298789A799BB0
+:1034C00072232214C0C0B1352526149C7B9D7593B0
+:1034D0007A2B621A9B7C2A621C9A7D28621D987E38
+:1034E00025621B957F2362172376102D62182D7697
+:1034F000112C62192C761264E0B98E6077E73DC01A
+:10350000FE13E53E1DE53FC1818A628B6304951180
+:103510000E9C4006CC110C5502247615085502C0AD
+:10352000802D76148D2B2B761B2A761A287619255A
+:10353000761803DD022D76166000030000C0FA2E17
+:10354000200C19E52518E51CA9E90CEE11A8EEC020
+:10355000802DE2852894CF0DFD0B2DE685DA208B9A
+:10356000198C158D14580D90D2A0D10FDC70DF503E
+:10357000DB602D6C28C0A01EE53E9A10DA205BFEB1
+:103580005563FE53002B203D0B4B4065BC826FE51D
+:1035900027DA308F556DE90C8EAA0E8E14C9E87E9D
+:1035A000F3162AAC10C090292467090F4764FC6009
+:1035B00060015F00C0FA63FF85C09163FFE8881473
+:1035C000658168DA20DB608C15580DA7C020C0909B
+:1035D00029A403D10F8A162B2104580CC9C0A02A94
+:1035E00024668E6863FDCA00002B9CF965B0FDDA85
+:1035F00020580CCE63FC220000DA20C0B6580E2CF6
+:1036000063FFBA002B200C0CBE11A7EE2DE286C181
+:10361000C27DC30260011819E4E909B90A2992A31D
+:103620006890082A220009AA0C65A10326E2856495
+:1036300060FD2C20668931B1CC0C0C472C24666FC0
+:10364000C60270960C8A162B2104580CADC0D02DE2
+:1036500024668E3077E74D1CE4E91BE4E98F32885D
+:1036600033C0A42D21040E994006991104DD1109DF
+:10367000DD029A61C19009DD029B60C0908B2B9D99
+:10368000649F66986799650CBB029B6228200C1AA0
+:10369000E4D2AA8A0C8811A7882F828529A4CF2F6B
+:1036A000FC202F86858A1465A0A6C020D10FB0FC0F
+:1036B0008B142C2523C8B7022A02066B02580CDE95
+:1036C0002A210265AEF7C0D80DAD022D250263FE9A
+:1036D000EC008E14C8E8DA20DB30580CD72A21021F
+:1036E00065AEDA07AF022F250263FED100DA20DBD8
+:1036F000308C158D14580E80D2A0D10FDA202B20DB
+:103700000C580DEB63FEB600DA202B200C580E0D82
+:1037100063FEAADA20DB308C152D12042E0A8028D5
+:103720000A00282468580CD663FAE500C020D10F9F
+:10373000DA20580DDF8914CD92DA20DB308C155851
+:103740000D4ADBA0C020C0A02AB403D10FC020D1F5
+:103750000F2A2C748B1558064CD2A0D10F000000F4
+:103760006C100E28210224160108084C6583A91F3D
+:10377000E49229F29E6F98026003AD1EE48E29E266
+:10378000266890082A220009AA0C65A39B24F29DB2
+:103790006443952A31160A4B412B240BB4BB0B0B07
+:1037A000472B240C0CB611AF66286286C1CC78C3B7
+:1037B0000260037F19E48209B90A2992A36890077D
+:1037C0008C2009CC0C65C36B276285647365293135
+:1037D00009C0D02D24668C3599139C2A88369C14F8
+:1037E000982B8E3798159E169E2C8C38C0E10C5C59
+:1037F000149C179C2D88392925042E251D28251C4D
+:103800002C3028C0822C243C2930290C0C4708C8B5
+:103810000129243D29311598189912090841089960
+:103820000C299CEC29251F7EC725921C8212282A70
+:1038300000082060991B01023E00093EB128098260
+:1038400039891B0E221102990C821C29251F821C0A
+:10385000941D951E24211F15E4880451609A10C1FF
+:10386000802B1610252014961F05054301063E00E7
+:103870000D3EB16B0DB6398B3C2D9CFC08663606AF
+:10388000441C893D2E26132E26142E26152E246B1D
+:1038900025241406D61CC05025261825261B2524B1
+:1038A000672524682832112525232525242525254B
+:1038B00025252C2925222D25202B252124252E26A2
+:1038C000252F14E46F16E46D1BE45298192D211C6A
+:1038D000C08498719B70892095759577957F967CAB
+:1038E000967E98799B7894731BE46714E4680C388F
+:1038F000400288100C064015E464016610947D9B1C
+:1039000074841D1BE444086602957B18E431851E0F
+:103910000B99029972997A0866022B121096768694
+:103920001F6FD2026001C8C0A0991A6D080AB1AA1F
+:1039300000A10400E81A7D8B0263FFEE891AC0E043
+:10394000961F1DE43E2B1610951E941D28203D2920
+:10395000761A297612C040C051C0B22D76130806DF
+:10396000408D170B8801065E380AEE101BE44A08EA
+:103970005438B0A609661188140B44102B761B042A
+:10398000EE028B1614E44308DA1406EE020D8810DA
+:103990002A761E86131AE41C04EE020D66110866D0
+:1039A000022E76160D14141EE41A0D44110BD814B1
+:1039B0000866020A44022E76182E76102476172600
+:1039C000761FC084287619287611C76F0C24400F03
+:1039D00044111CE3FB26761D26761C2676152676DA
+:1039E000148A262676242676252976222E762028E5
+:1039F00076218E1888150DB91016E4278BC70D880F
+:103A0000110E5E39ADBB851904EE022676230988B6
+:103A100002861F89102876260A04480544110505E8
+:103A2000480E551105440204EE02851E841D2E76B3
+:103A3000272820069B2D29246A2E31172B12102EA1
+:103A40002538CC83C0D02D2407C0D7090840648016
+:103A50008E9A290928416480AA64E0B42D2406C006
+:103A60009809E9362D0AA02A628501C404ADAA2D61
+:103A700021042A668508DD11883F8E3E2732100812
+:103A8000EA1800C40408E8180088110ECE5308771D
+:103A900002C08308DD029D4118E401090D4E9840E3
+:103AA00088209A4397449D4517E3FE1DE3CB058884
+:103AB0001108EE02ADBDC08007EE029E4228D4CFB1
+:103AC0002AF29D87CA2AAC18B1772AF69D1AE3B963
+:103AD00097CA28A4A268711C655060C020D10F004D
+:103AE0002D2406C080C09809E9360E893863FF731B
+:103AF000C0A063FE481BE3CB1AE3EB2AB68963FF41
+:103B0000D600000065EF54C098C0D82D240663FF8E
+:103B1000522D2406C09063FF4ACC57DA20DB308C4C
+:103B200011580C51C020D10F00DA20C0B6580CE05B
+:103B300063FFE500DA20580CDE63FFDC2A2C748B6F
+:103B400011580551D2A0D10F6C10062820068A33D7
+:103B50006F8202600161C05013E39729210216E3CE
+:103B600096699204252502D9502C20159A2814E331
+:103B7000948F2627200B0AFE0C0477092B712064F2
+:103B8000E1398E428D436FBC0260016F00E104B0E9
+:103B9000C800881A08A80808D80298272B200668A9
+:103BA000B32ECE972B221E2C221D0111027BC901A0
+:103BB000C0B064B0172CB00728B000DA2003880A20
+:103BC00028824CC0D10B8000DBA065AFE7C020D1BC
+:103BD0000F2D206464DFCA8B29C0F10BAB0C66BFCC
+:103BE000C02B200C0CBC11A6CC28C2862E0A08784B
+:103BF000EB611EE3720EBE0A2EE2A368E0052822E6
+:103C0000007E894F29C2851EE37E6490461FE38CA7
+:103C10009E90C084989128200A95930F88029892CC
+:103C20008E200FEE029E942F200788262F950A984B
+:103C3000969A972E200625240768E3432921022A15
+:103C4000C2851DE3652AAC20ADBD25D4CF2AC6852B
+:103C500063FF4E002E2065CBEDC082282465C9F697
+:103C600005E4310002002A62821BE36D2941020B48
+:103C7000AA022A668209E43129210263FF23000097
+:103C800064DFB88F422E201600F1040DEE0C00EE1A
+:103C90001AAEAE9E2963FFA38A202B3221B1AA9AC5
+:103CA000B0293221283223B4992936217989A92BC8
+:103CB00032222B362163FFA0C020D10F9F2725245D
+:103CC00015ACB82875202B2006C0C12EBCFE64E0C0
+:103CD000AB68B7772DBCFD65DEC72D2064C0F064EE
+:103CE000D0868E290EAE0C66E089C0F128205A28B5
+:103CF0008CFE08CF3865FEE863FF580000E00493AF
+:103D000010C0810AF30C038339C78F08D80308A8B1
+:103D10000108F80C080819A83303C80CA8B82875BE
+:103D200020030B472B24158310CBB700E104B0BC54
+:103D300000CC1AACAC0CDC029C27659E5EC0B20BBA
+:103D4000990209094F29250263FE50002D206A0DB2
+:103D50002D4165DF7EDA20C0B0580CA864AF18C0D2
+:103D6000F163FEEF9F2763FFD02E221F65EE3263C3
+:103D7000FF79000028221F658E2763FF6E25240629
+:103D800029210263FE1B00006C10066571332B4C69
+:103D900018C0C7293C18C0A1C08009A8380808422B
+:103DA0006481101CE3011AE3022AC67E2A5CFDD35B
+:103DB0000F6DAA0500B08800908C8940C0A00988CA
+:103DC000471FE32B080B47094C50090D5304DD1026
+:103DD000B4CC04CC100D5D029D310CBB029B30882D
+:103DE000438E2098350FEE029E328D26D850A6DDE8
+:103DF0009D268E40C0900E5E5064E0971CE3111E1D
+:103E0000E300038B0BC0F49FB19EB02D200A99B341
+:103E10000CDD029DB28F200CFF029FB48E262D2058
+:103E2000079EB68C282DB50A9CB72924072F20069B
+:103E30002B206469F339CBB61DE2E22320168DD224
+:103E40000B330C00D10400331AB48DA3C393292281
+:103E5000200C13E2E11FE2D80C2E11AFEEA32229B1
+:103E600024CF2FE285D2A00FDD0B2DE685D10F00E8
+:103E70002E200CB48C0CEB111FE2D81DE2CFAFEE5C
+:103E8000ADBB22B28529E4CF02C20B22B685D2A0F7
+:103E9000D10F00002E200C1CE2C81FE2CF0CEB114A
+:103EA000AFEEACBB22B28529E4CF02820B22B685ED
+:103EB000D2A0D10FC0D00BAD387DC80263FEEC6339
+:103EC000FEE08E40272C747BEE12DA70C0B32C3CDF
+:103ED00018DD50580A9B8940C08063FEE3066E02DD
+:103EE000022A02DB30DC40DD505800049A10DB501F
+:103EF000DA70580465881063FEF700006C100692B3
+:103F0000121EE2B98C40AE2D0C8C472E3C1804CA10
+:103F10000BD9A07DA30229ADF875C302600084C04F
+:103F2000B0C023C0A09D106D0844B89F0EB80A8D84
+:103F3000900EB70BB8770D6D36ADAA9D800D660C4F
+:103F4000D8F000808800708C879068B124B2227706
+:103F5000D3278891C0D0CB879890279C1000708879
+:103F600000F08C9D91CB6FC08108BB0375CB36638D
+:103F7000FFB4B1222EEC1863FFD485920D770C8626
+:103F8000939790A6D67D6B01B1559693959260005C
+:103F900016B3CC2D9C188810D9D078D3C729DDF85A
+:103FA00063FFC100C0238A421BE2C000CD322D4412
+:103FB000029B3092318942854379A1051EE2BC0EF5
+:103FC000550187121BE2AB897095350B9902993226
+:103FD00088420A880C98428676A6A696768F44AFC9
+:103FE000AF9F44D10F0000006C10089311D63088A9
+:103FF00030C0910863510808470598389812282165
+:1040000002293CFD08084C6581656591628A630A56
+:104010002B5065B18B0A6F142E0AFF7CA60A2C2048
+:104020005ACCC42D0A022D245A7FE0026002158961
+:104030002888261FE29F09880C65820F2E200B0F0F
+:10404000EE0B2DE0FE2EE0FF08DD110EDD021EE27C
+:1040500099AEDD1EE2991CE2990EDD010DCC37C14F
+:1040600080084837B88DB488981089601AE2557B6B
+:1040700096218B622AA0219C147BA3179D132A20D2
+:104080000C8B108C20580BCA8C148D13DBA0CEAC7B
+:104090006001C4002E200C1BE2480CEA110BAA0898
+:1040A0002BA2861FE2467BDB3B0FEF0A2FF2A368B1
+:1040B000F0052822007F892C2BA28564B0AA876294
+:1040C0008826DE700C7936097A0C6FAD1C8F279B21
+:1040D0001508FF0C77F3197E7B729D139C149B15BA
+:1040E000CF56600025C0B063FFD0D79063FFDD00DE
+:1040F000009D139C14DA20DB70580B2F8B158C1449
+:104100008D1365A06A8E6263FFCC00DA208B11DC10
+:1041100040580AD5D6A08B15C051DE70DA20DC607D
+:10412000DD405BFF768D138C14D9A02E200C1BE292
+:10413000221FE2290CEA11AFEFC0E0ABAA2BA28547
+:104140002EF4CF0B990B29A68563FF1D00DA20DC26
+:1041500060DD40DE708912282007DF50A9882824FE
+:10416000075BFF09D2A0D10F00DBE0DA20580B502B
+:104170006550EF2A20140A3A4065A0EBDB60DC4072
+:10418000DD30022A025809BCD6A064A0D584A183E0
+:10419000A00404470305479512036351C05163FE11
+:1041A0005C2C2006D30F28CCFD6480A568C704C012
+:1041B000932924062C2006C0B18D641FE2019D279F
+:1041C0009D289D298FF29D2600F10400BB1A00F066
+:1041D00004B0BE0EDD01C0F0ADBB8D652F24070D10
+:1041E0000E5E01EE11AEBB2E0AFEB0BB0B0B190E1C
+:1041F000BB36C0E20B0B470EBB372B241618E1F978
+:104200000A09450D0B422B240B29240AB4BE2E2487
+:104210000C7D88572920162FCCFDB09D0A5C520DCD
+:10422000CC362C246465FDEC0C0C4764CDE618E11B
+:10423000E48E2888820C9F0C00810400FF1AAFEEE8
+:104240009E2963FDCF1CE21163FE13001CE20B6389
+:10425000FE0C8D6563FFA500DA202B200C580B396E
+:10426000645F0FC020D10F00C020D10FC09329245C
+:1042700016C09363FFA000006C1004C06017E1CD6E
+:104280001DE1D0C3812931012A300829240A78A1EF
+:1042900008C3B27BA172D260D10FC0C16550512654
+:1042A00025022AD0202F200B290AFB2B20142E2098
+:1042B0001526241509BB010DFF0928F1202B241414
+:1042C000A8EE2EF52064A0A92B221E28221D011184
+:1042D000027B8901DB6064B0172CB00728B000DADC
+:1042E0002007880A28824CC0D10B8000DBA065AF74
+:1042F000E7DB30DC40DD50DA205800DE29210209FE
+:104300000B4CCAB2D2A0D10F00CC5A2C30087BC1C2
+:10431000372ED02064E02D022A02033B02DC40DD70
+:10432000505800D4D2A0D10F2B2014B0BB2B241492
+:104330000B0F4164F0797CB7CAC0C10C9C022C25DC
+:1043400002D2A0D10FC020D10F2E200669E2C126D3
+:1043500024062B221E2F221D29200B2820150D9903
+:10436000092A9120262415AA882895207BF14960E6
+:104370000048B0BB2B24140B0A4164A0627CB70236
+:104380002C25022B221E2C221DD30F7BC901C0B06D
+:10439000C9B62CB00728B000DA2007880A28824C5A
+:1043A000C0D10B8000DBA065AFE7C020D10F0000BB
+:1043B000262406D2A0D10F0000DB601DE18164BF7E
+:1043C0004F2CB00728B000DA2007880A28824CC09A
+:1043D000D10B8000DBA065AFE71DE17963FF310001
+:1043E00026240663FF9C00006C1004282006260A81
+:1043F000046F856364502A2920147D9724022A02C1
+:10440000DB30DC40DD50580019292102090A4CC874
+:10441000A2C020D10FC0B10B9B022B2502C020D11E
+:104420000F00022A02033B022C0A015800D1C9AA3C
+:10443000DA20DB30DC40580A0C29A011D3A07E978B
+:10444000082C0AFD0C9C012CA411C0512D2014062F
+:10445000DD022D241463FFA4DA20DB30DC40DD50C4
+:10446000C0E0580987D2A0D10F0000006C100616DA
+:10447000E1521CE152655157C0E117E14E2821027B
+:104480002D220008084C6580932B32000B695129BE
+:104490009CFD6590872A629E6EA84C2A722668A0B1
+:1044A000027AD9432A629DCBAD7CBE502B200C0CE6
+:1044B000BD11A6DD28D2862F4C0478FB160CBF0A4E
+:1044C0002FF2A368F0052822007F89072DD285D31B
+:1044D0000F65D0742A210419E17AD30F7A9B2EDA62
+:1044E00020580883600035002D21041BE1757DBB39
+:1044F00024DA20C0B658087ECA546001030B2B5042
+:104500002B240BB4BB0B0B472B240C63FFA0DA202E
+:10451000580A67600006DA20C0B6580A656550E0A0
+:10452000DC40DB302D3200022A020D6D515808D2DA
+:104530001CE123D3A064A0C8C05184A18EA00404B0
+:10454000470E0E4763FF3500002B2104C08B8931D5
+:10455000C070DF7009F950098F386EB8172C2066CB
+:10456000AECC0C0C472C24667CFB099D105808E44B
+:104570008D1027246694D11EE126B8DC9ED06550AC
+:1045800056C0D7B83AC0B1C0F00CBF380F0F42CBFD
+:10459000F119E10518E10728967EB04BD30F6DBAEB
+:1045A0000500A08800C08C2C200CC0201DE10B0C45
+:1045B000CF11A6FF2EF285ADCC27C4CF0E4E0B2E09
+:1045C000F685D10FC0800AB83878D0CD63FFC1001E
+:1045D0008E300E0E4763FEA12A2C742B0A01044D67
+:1045E000025808D72F200C12E0FC0CF911A699A252
+:1045F000FF27F4CF289285D2A008480B289685D1B2
+:104600000FC020D10F0000006C1004C060CB55DB40
+:1046100030DC40055D02022A025BFF942921020979
+:10462000084CC882D2A0D10F2B2014B0BB2B24146D
+:104630000B0C41CBC57DB7EBC0C10C9C022C2502F5
+:10464000D2A0D10F0000022A02033B02066C02C076
+:10465000D0C7F72E201428310126250228240A0F5E
+:10466000EE012E241458010E63FFA300262406D267
+:10467000A0D10F006C1006282102D62008084C6536
+:10468000809D2B200C12E0CC0CB811A2882A8286C7
+:10469000B5497A930260009719E0C909B90A2992CD
+:1046A000A36890082A620009AA0C65A08228828566
+:1046B0001CE0D46480799C80B887B14B9B819B10AF
+:1046C000655074C0A7D970280A01C0D0078D380D75
+:1046D0000D42CBDE1FE0B51EE0B62EF67ED830D3FD
+:1046E0000F6D4A0500808800908C2E3008C0A00015
+:1046F000EE322E740028600C19E0B80C8D11A2DD8A
+:10470000A988C0202CD2852284CFD2A00CBC0B2C2F
+:10471000D685D10FC0F0038F387FA0C063FFB400EF
+:10472000CC582A6C74DB30DC4058080BC020D10F09
+:10473000DA605809DF63FFE7DD402A6C74C0B0DC43
+:104740007058087F2E30088B1000EE322E7400282F
+:10475000600C19E0A10C8D11A2DDA988C0202CD21B
+:10476000852284CFD2A00CBC0B2CD685D10F0000A3
+:104770006C1004292014282006B19929241468817A
+:1047800024C0AF2C0A012B21022C24067BA004C0DC
+:10479000D02D2502022A02033B02044C02C0D0584D
+:1047A00000C0D2A0D10FC020D10F00006C1004298E
+:1047B0003101C2B429240A2A3011C28378A16C7B4A
+:1047C000A1696450472C2006C0686FC562CA572D86
+:1047D00020147CD722DA20DB30DC40DD505BFFA5E3
+:1047E000292102090E4CC8E2C020D10FC0F10F9F51
+:1047F000022F2502C020D10FDA20DB30C0C05BFFC2
+:10480000DC28201406880228241463FFC7292015F9
+:104810001BE06C2A200BC0C09C240BAA092BA120F2
+:104820002C2415AB9929A52063FF9900C020D10F36
+:10483000DA20DB30DC40DD50C0E0580891D2A0D156
+:104840000F0000006C1004CB5513E06725221F0DEC
+:10485000461106550CA32326221E25261F06440BAF
+:1048600024261E734B1DC852D240D10F280A80C087
+:104870004024261FA82828261E28261DD240D10FF6
+:10488000C020D10F244DF824261E63FFD80000005D
+:104890006C1004D620282006C0706E85026000D4FB
+:1048A0001DE04E19E04612E0442A8CFC64A1302B36
+:1048B0006102B44C0B0B4C65B0A22B600C8A600CEF
+:1048C000B8110288082E828609B90A7EC3026000E8
+:1048D0009A2992A368900509AA0C65A08E28828562
+:1048E000648088B8891BE04A94819B80655155C0DB
+:1048F000B7B8382A0A01C0C009AC380C0C4264C0F1
+:10490000421FE0291EE02B2EF67EB04AD30F6DAA7F
+:104910000500808800908CC0A029600C0C9C11A21E
+:10492000CC2BC285AD990B4B0B2BC6852860062777
+:1049300094CF6881222D6015D2A0C9D2C0E22E6426
+:1049400006D10F00C0F008AF387FB0BD63FFB100E3
+:10495000276406D2A0D10F00D2A0D10F00CC57DA25
+:1049600060DB30DC405808C0C020D10FDA60580945
+:104970005063FFE80028221E29221DD30F789901D9
+:10498000C080C1D6C1C11BE018C122AB6B6480429C
+:1049900078913F2A80000CAE0C64E0BB02AF0C643F
+:1049A000F0B52EACEC64E0AF0DAF0C64F0A92EAC0A
+:1049B000E864E0A32FACE764F09D2EACE664E097DA
+:1049C0002F800708F80BDA807B83022A8DF8D8A0A5
+:1049D00065AFBC28612308D739D97060007B00001F
+:1049E0002B600C0CB811A2882C82862A0A087CAB9A
+:1049F0007E09BA0A2AA2A368A0052C62007AC96FB0
+:104A00002A828564A0691FDFFE276504C0E3C0C455
+:104A10002E64069CA11CE02B9FA02E600A97A30C7D
+:104A2000EE029EA28F600CFF029FA42E60147AEF0C
+:104A30004627A417ADBC2F828527C4CF2FFC202F7B
+:104A4000868563FE692A6C74C0B1DC90DD4058072E
+:104A5000BC1DDFE163FEC100D9A0DA60DB30C2D04B
+:104A6000C1E0DC4009DE39DD50580805D2A0D10F85
+:104A7000DA6058090F63FEE4290A0129A4170DBF63
+:104A8000082E828527F4CF2EEC202E868564500BCD
+:104A90002A6C74DB4058017CD2A0D10FC020D10F0A
+:104AA0006C10062B221E28221D93107B8901C0B09A
+:104AB000C0C9C03BC1F20406401DDFCBC0E2C074D8
+:104AC0000747010E4E01AD2D9E11C0402E0A146401
+:104AD000B06E6D084428221D7B81652AB0007EA13E
+:104AE0003B7FA1477B51207CA14968A91768AA1484
+:104AF00073A111C09F79A10CC18B78A107C1AE2908
+:104B00000A1E29B4007CA12B2AB0070BAB0BDAB02C
+:104B10007DB3022ABDF8DBA0CAA563FFB428B0109C
+:104B200089116987BB649FB863FFDC00647FB4634D
+:104B3000FFD50000646FD0C041C1AE2AB40063FF4E
+:104B4000C62B2102CEBE2A221D2B221E7AB12A8C10
+:104B5000107CB1217AB901C0B0C9B913DF96DA204F
+:104B600028B0002CB00703880A28824CC0D10B80E3
+:104B700000DBA065AFE7D240D10F8910659FD463F9
+:104B8000FFF300006C1008C0D0C8598C30292102F6
+:104B90000C0C4760000C8E300E1E5065E19E2921E2
+:104BA00002C0C116DF85090B4C65B0908A300A6ED1
+:104BB0005168E3026000852F629E1BDF7E6EF85312
+:104BC0002BB22668B0052E22007BE94727629DB7ED
+:104BD00048CB7F97102B200CB04E0CBF11A6FF299D
+:104BE000F2869E12798B4117DF7507B70A2772A3E9
+:104BF000687004882077893029F285DF90D7906526
+:104C000090652A210419DFAE7A9B22DA205806B873
+:104C1000600029002C21041BDFAA7CBB18DA20C00D
+:104C2000B65806B3C95860014CC09063FFCCDA2077
+:104C300058089F600006DA20C0B658089D655135B7
+:104C4000DC40DB308D30DA200D6D5158070BC0D0C1
+:104C5000D3A064A120292102C05184A18CA0040406
+:104C6000470C0C4763FF3E00C09B8831DBD008F83F
+:104C700050089B3828210498116E8823282066ACA0
+:104C80008C0C0C472C24667CBB159F139E148A1039
+:104C90008B1158071B8E148F13C0D02D24668A30B9
+:104CA000C092C1C81BDF5B7FA6099BF099F12CF471
+:104CB0000827FC106550A4B83ADF70C051C08007C7
+:104CC000583808084264806718DF3819DF392986A8
+:104CD0007E6A420AD30F6DE90500A08800F08CC0FF
+:104CE000A08930B4E37F9628C0F207E90B2C940822
+:104CF0009B909F912F200C12DF380CF811A6882969
+:104D00008285A2FF2DF4CFD2A009330B238685D153
+:104D10000F22200C891218DF300C2B11A6BBA82201
+:104D20002D24CF2CB285D2A00C990B29B685D10F9A
+:104D3000C087C0900A593879809663FF8ADB30DAE1
+:104D400020C0C1C0D05BFF56292102C0D02A9CFEE2
+:104D500065AE4D2D2502C09063FE45009E142A2CA1
+:104D600074C0B1DC70DD405806F68E14C0D01BDF75
+:104D700028C1C863FF6AC020D10F00006C1006284C
+:104D8000210217DF0D08084C65824929729E6F9831
+:104D90000260025019DF082A922668A0078B200AB9
+:104DA000BB0C65B23F2A729DC0CB64A2371DDF04E5
+:104DB000C0602B3008C0F164B0712E0AFFB0B86437
+:104DC00081512DBCFE64D0F364505C2A2C74044BDA
+:104DD000025800AD0AA2020600000000001ADF0817
+:104DE0002C20076EBB0260022218DEFE13DF081BB8
+:104DF000DF36C0E229200A9AD09ED1ABCB039902BC
+:104E000099D223B08026B480B13308330293D318EB
+:104E1000DEF20CFD11A7DD2CD285A8F82684CF0C7C
+:104E2000EC0B2CD685655FA2C020D10F2B21048806
+:104E300031DE6008F85008CE386EB8102C2066B10C
+:104E4000CC0C0C472C24667CEB026001AF2E30109A
+:104E50002930112C301300993200CB3264E1452AFD
+:104E600030141EDF1A00AA3278CF050E9C092BC41D
+:104E70007F1CDF1766A0050E98092A8480B4A71846
+:104E8000DF15C76F009104AC9CDBC000AE1A00F3C5
+:104E90001A6EC1048BD00BCB0C1CDF0F08B81C069C
+:104EA0003303AC882A848B2CD03627848C03CC0126
+:104EB0000ECC022CD4365801AD63FF0B2F200C0C06
+:104EC000FB11A7BB2DB286C0987D9302600121190A
+:104ED000DEBB09F90A2992A36890082D220009DD9A
+:104EE0000C65D10C2DB285DE6064D10488312B2194
+:104EF0000408F85008CE386FB80263FEDF2C206635
+:104F0000B1CC0C0C472C24667CE30263FECE9D10D2
+:104F100060013100293108292504283014B0886443
+:104F200080A62B31092B240AC0812B30162FD423C5
+:104F30002B240BB4BC2C240C8D378B36292504DE96
+:104F4000D00D8E39DCB00B8C390ECC0264CE7808D3
+:104F50009C1101C4048F380DBE1800C4040DB8188C
+:104F600000881108FF02C08308CC0218DECC9CA187
+:104F700098A018DECB8C209EA39FA405CC110BCF4C
+:104F800053C1E09EA50CFF0208FF029FA218DE8914
+:104F90002624662C729D2684A22CCC182C769D6328
+:104FA000FE250000002D30121CDECD00DA3278DF45
+:104FB000050C9E0B2AE47F66B0050C9F0B2BF4803A
+:104FC0002A301100AA3263FEEC2E240A2B31099BF1
+:104FD0002B63FF5300CC57DA20DB30DC405807222C
+:104FE000C020D10F00DA20C0B65807B163FFE5003A
+:104FF00000DBF0DA205807AE63FFD9000058064006
+:105000001DDE70C0F126246663FE41008B20280A55
+:10501000FFB1CE23200A2C21040E0E472E24077840
+:1050200031359AD02CD50A96D319DEA62ED416C0C7
+:105030008398D1C0E309B80298D409390299D226DD
+:10504000240763FDC958062E8D102624662B2104E3
+:105050002F200C63FD86000008B81119DE6808EEE9
+:1050600002882B9ED59AD0C0EF09880298D204C935
+:10507000110E990299D4C0E49ED163FFC1000000D3
+:105080006C1004C020D10F006C100485210D381164
+:1050900014DE478622A42408660C962205330B935F
+:1050A00021743B13C862D230D10FC030BC29992182
+:1050B00099209322D230D10F233DF8932163FFE34F
+:1050C0006C100AD620941817DE3CD930B8389819DD
+:1050D0009914655256C0E1D2E02E61021DDE390EF0
+:1050E0000E4C65E1628F308E190F6F512FFCFD65FC
+:1050F000F1558EE129D0230E8F5077E66B8F181E65
+:10510000DE78B0FF0FF4110F1F146590CE18DE7516
+:105110008C60A8CCC0B119DE2728600B09CC0B0D20
+:10512000880929812028811E2A0A0009880C08BACA
+:10513000381BDE6B0CA90A2992947B9B0260008CC1
+:105140002B600C94160CBD11A7DD29D286B84879C6
+:1051500083026000D219DE1909B80A2882A39817C1
+:105160006880026000A36000A51ADE5F84180AEE62
+:1051700001CA981BDE108C192BB0008CC06EB313C3
+:105180001DDE0D0C1C520DCC0B2DC295C0A17EDB7B
+:10519000AE6000380C0C5360000900000018DE51AE
+:1051A0008C60A8CCC0B119DE0328600B09CC0B0DB4
+:1051B000880929812028811E2A0A0009880C08BA3A
+:1051C000380CA90A2992947E930263FF72DA60C0B8
+:1051D000BA58073764507360026A00001ADDF68C13
+:1051E000192AA0008CC06EA31A18DDF20C1C5208FC
+:1051F000CC0B18DE3B2BC295C0A178B30263FF3FF6
+:1052000063FFC9000C0C5363FF0989607899182962
+:10521000D285C9922B729E1DDDE76EB8232DD22652
+:10522000991369D00B60000DDA60580721600017F0
+:105230000088607D890A9A1A29729D9C129915CF5F
+:1052400095DA60C0B658071A6551F98D148C18DBD1
+:10525000D08DD0066A020D6D51580587D3A09A14DF
+:1052600064A1E182A085A1B8AF9F1905054702029C
+:10527000479518C05163FE602B6104C08B8931C013
+:10528000A009F950098A386EB81F2C6066A2CC0CB0
+:105290000C472C64667CAB119F119E1B8A15580528
+:1052A000988E1B8F11C0A02A64669F1164F0E58957
+:1052B0001388190FFD022E0A006DD9172F810300E4
+:1052C000908DAEFE0080889F9200908C008088B800
+:1052D0009900908C65514E8A10851A8B301FDDC85D
+:1052E000881229600708580A2C82942D61040ECC7C
+:1052F0000C2C86946FDB3C1CDDF4AC9C29C0800B2D
+:105300005D50A29909094729C48065D0DA2E600C46
+:10531000C0D01FDDB10CE811AFEEA7882282852D29
+:10532000E4CF02420B228685D2A0D10F8E300E0E22
+:105330004763FDA2A29C0C0C472C64077AB6CD8B68
+:10534000602E600A280AFF08E80C64810E18DDDD73
+:1053500083168213B33902330B2C34162D350AC051
+:105360002392319F30C020923308B20208E80292A3
+:10537000349832C0802864072B600CD2A01CDD96C4
+:105380000CBE11A7EE2DE285ACBB28B4CF0D9D0B52
+:105390002DE685D10F8B1888138D30B88C0D8F4773
+:1053A0000D4950B4990499100D0D5F04DD1009FFEB
+:1053B000029F800DBB029B8165508D851AB83AC053
+:1053C000F1C0800CF83808084264806B1BDD771947
+:1053D000DD7829B67E8D18B0DD6DDA0500A0880075
+:1053E000C08CC0A063FEF30082138B161DDD8828DD
+:1053F000600AC0E02EC4800D880202B20B99239F80
+:1054000020C0D298229D2122600CB2BB0C2D11A786
+:10541000DD28D28508BB0B18DD702BD685A8222E7F
+:1054200024CFD2A0D10F9E1B851A2A6C748B185BD7
+:10543000FF168E1B63FEA300C087C0900AF938795F
+:10544000809263FF86C020D10F9E1B2A6C74C0B16E
+:105450008D1858053B8E1B851A63FE7E886B821360
+:10546000891608BE110ECE0202920B9E25B4991E1B
+:10547000DD639F200E88029822C0EF04D8110E88A9
+:10548000029824C0E49E21C080D2A02B600C286426
+:10549000071CDD510CBE11A7EE2DE285ACBB28B474
+:1054A000CF0D9D0B2DE685D10F0000006C1004C0C0
+:1054B00020D10F006C10048633C071C03060000131
+:1054C000B13300310400741A0462017460F1D10F29
+:1054D0006C1004022A02033B025BFFF61CDD391B41
+:1054E000DD83C79F88B009A903098A019AB0798032
+:1054F0001EC0F00FE4311DDD300002002BD2821EF1
+:10550000DD7C2AC1020EBB022BD6820AE431D10F08
+:1055100028C102C19009880208084F28C50208E482
+:1055200031D10F006C1004C0C00CE43112DD251A1B
+:10553000DD2200020029A28218DD701BDD6E26210B
+:10554000020B990108660129A68226250206E4318C
+:1055500014DD6B15DD66236A9023261685502426FC
+:1055600015252617222C50D10F0000006C1008D6EC
+:10557000102B0A64291AB41ADD0F0D23111CDD103B
+:105580000F2511B81898130E551118DD5DAC55A8EC
+:1055900038AA332C80FF2A80FEA933288D01298068
+:1055A0000108AA112880000CAA02088811098802A3
+:1055B00008AA1C288C0828160458086814DD010A5B
+:1055C000A70224411A2A30802B120407AA2858085F
+:1055D00063B1338B13B4559A6004AC28B4662C566F
+:1055E0002B7B69E016DD3A9412C050C0D017DCF472
+:1055F0009D15D370D4102F60802E60829F169E1749
+:10560000881672891A8D128C402A607F0DCC282B47
+:105610003A200CAA28580851C0B10ABE372E354886
+:105620008F1772F91A8D128C402A60810DCC282BAD
+:105630003A200CAA28580849C0B10ABE372E354A6C
+:10564000B233B444B1556952B6B466C0508F15B880
+:1056500077D370B2FF9F156EF899D10F6C1004C00C
+:1056600021D10F006C1004270A001CDCD31FDCE4DE
+:105670001EDCE71DDCD01ADD141BDD22C02824B09F
+:10568000006D2A75AA48288080C09164806100411D
+:105690000415DCCBC03125503600361A06550105FD
+:1056A00095390C56110C66082962966E974D0D5966
+:1056B0000A29922468900812DD0602420872993B7A
+:1056C00023629512DCC8CB349F300282020E440262
+:1056D000C092993194329233AD52246295C0902495
+:1056E0004C1024669524B0002924A0AA42292480C5
+:1056F000B177B14404044224B400D10FD10FD10FCB
+:105700006C10041ADCAC2AA00058021C5BFFD50206
+:105710002A02033B025BFFD11BDCAAC9A12CB10208
+:10572000C0D40DCC020C0C4F2CB5020CE431D10FBF
+:10573000C0A00AE43118DCA00002002F828219DC2C
+:10574000B32EB10209FF022F86820EE431D10F0081
+:105750006C1004C02002E43114DC9A16DC970002BD
+:1057600000226282234102732F0603E431C020D15C
+:105770000F19DCE61ADCE52841020A2A0109880132
+:105780002A668228450208E43115DCDC12DCE125BA
+:105790004621D10F6C1004292006289CF96480A0B2
+:1057A0002A9CFD65A0968A288D262F0A087AD9049E
+:1057B0002B221FC8BD2C206464C0812E22090EAE8E
+:1057C0000C66E0782B200C1EDC7C0CBC11AECC28C7
+:1057D000C28619DC7A78F3026000AD09B90A299211
+:1057E000A36890082E220009EE0C65E09B29C28573
+:1057F0001FDC846490929F90C0E41FDC919E9128EE
+:10580000200AC0E09E930F8802989288200F880299
+:1058100098942F20079A979D962F950A2E24072853
+:10582000200629206468833328C28512DC6B288C0B
+:1058300020A2B22E24CF28C685C020D10FC020D1EF
+:105840000F2A206A0111020A2A4165AF52DA20C0EC
+:10585000B05805EA64AFE5C021D10F00649FC81FAE
+:10586000DC582D20168FF209DD0C00F10400DD1A42
+:10587000ADAD9D2912DC5928C285A2B22E24CF28B5
+:105880008C2028C685C020D10FC021D10F00000078
+:105890006C1004260A001BDC9F15DC4928206517C4
+:1058A000DC46288CFE6480940C4D110DBD082CD272
+:1058B000F52BD2F42ED2F77CB13DB4BB2BD6F47BC2
+:1058C000E9052BD2F62BD6F47CB92C2AD2F62AD6AF
+:1058D000F52AD6F406E4310002002872822AFAFF83
+:1058E000004104290A012F510200991A0A9903095B
+:1058F00088012876820FE4312624652BD2F48E5C51
+:105900002CD2F5B0EE9E5C7BCB1629D2F62FD2F7C7
+:105910000CB80C09FF0C08FF0C0F2F14C8F960001D
+:10592000320BCA0C0A2A14CEA92B5102C0C20CBBDE
+:10593000020B0B4F2B55020BE431D10F00DB30DA99
+:10594000205BFF941BDC7464AF5D0C4D11ADBD6337
+:10595000FFA8000006E4310002002F728218DC303C
+:105960002E510208FF022F76820EE431D10F000083
+:105970006C1004C03003E43116DC1015DC11000299
+:105980000024628274472118DC64875C084801287F
+:105990006682CD7319DC620C2A11AA99229283299E
+:1059A00092847291038220CC292B51020BE431C0E6
+:1059B00020D10F001FDC5B2E51020FEE012E55028D
+:1059C0000EE431B02DB17C9C5C12DC5608DD112D4B
+:1059D000561DD10F6C10061BDBF71EDBF922B00041
+:1059E0001ADC526F23721DDC39C04818DC511FDCF1
+:1059F0004FDC10D5C083F000808600508A6D4A4F7E
+:105A00000F35110D34092440800B560A296294B1D8
+:105A1000330E55092251480F44110C440A8740099E
+:105A2000A80C02883622514907883608770CA899B5
+:105A30002966949740296295874109A80C02883607
+:105A400007883608770CA899296695974103034281
+:105A5000B13808084298F0D10F1CDC3613DC372728
+:105A6000B0002332B5647057C091C0D016DC351534
+:105A7000DC33C0402AC00003884328C4006D793C51
+:105A8000004104B14400971A7780148E502FB295CC
+:105A90002DB695AFEE2EED2006EE369E5060001826
+:105AA00077A00983509D5023B69560000223B295DC
+:105AB000223D2006223622B695B455B8BBD10F0040
+:105AC00003884328C400D10F6C1004C04004E431A3
+:105AD00015DC1D000200885013DC1CCB815BFFBD70
+:105AE0001CDC1B0C2D11ADCC2BC2822AC28394501E
+:105AF0007BAB142EC28429C2850ABD0C0E990C0DF5
+:105B0000990C0929146000050BA90C092914993076
+:105B100015DBAC2A51020AE4312A2CFC58004B2B2D
+:105B200032000AA2022BBCFF9B30CCB6C8A4D2A084
+:105B3000D10F000004E4311EDBA00002002DE28240
+:105B40002FBAFF2C51020FDD012DE6820CE431D17A
+:105B50000F0000006C1004D10F0000006C1004C096
+:105B600020D10F006C100413DBFAC0D103230923EA
+:105B7000318FC0A06F340260008D19DB8F1BDB906A
+:105B800017DBF30C2811A8772672832572822CFA72
+:105B9000FF76514788502E7285255C0425768275E4
+:105BA000E9052572842576827659292E72842E760F
+:105BB000822E76830AE431000200239282002104BF
+:105BC0002FB10200D61A0C66030633012396820F0A
+:105BD000E43126728325728260000200D8A07659D3
+:105BE000220AE43100020023928200210400D21A2A
+:105BF0002FB1020C22030232012296820FE431D22D
+:105C000080D10F00D280D10FC020D10F6C1004DBE7
+:105C100030862015DB68280A00282502DA2028B003
+:105C2000002CB00705880A28824C2D0A010B800041
+:105C3000DBA065AFE61ADB610A4A0A29A2A3C7BF47
+:105C4000769101D10F2BA6A3D10F00006C1004C0D8
+:105C5000D1C7CF1BDB5B19DB5817DB560C2811A80B
+:105C60007786758574C0A076516288508E77B4555A
+:105C7000957475E903857695747659278F769F75A7
+:105C80009F740AE431000200239282B42E2FB102E5
+:105C900000E10400D61A0C66030633012396820F36
+:105CA000E431867583747639280AE4310002002EC7
+:105CB0009282B42200210424B10200DF1A0CFF03F7
+:105CC0000FEE012E968204E431D280D10FD8A07657
+:105CD00051D6D280D10F00006C1004290A801EDB3F
+:105CE0005D1FDB5D1CDB350C2B11ACBB2C2CFC2DA4
+:105CF000B2850FCC029ED19CD0C051C07013DB592D
+:105D000014DB5818DB562AB285A82804240A234637
+:105D100091A986B8AA2AB685A98827849F25649F59
+:105D2000D10F00006C100419DB8B0C2A11A9A98972
+:105D300090C484798B761BDB79ABAC2AC2832CC2EE
+:105D4000847AC1688AA02BBC30D3A064A05E0B2BE0
+:105D50000A2CB2A319DB4268C0071DDB7FD30F7D7D
+:105D6000C94AA929299D0129901F68913270A6036B
+:105D7000D3A0CA9E689210C7AF2AB6A32A2CFC5B98
+:105D8000FFB3D230D10F000013DB7503A3018C31B8
+:105D90001DDB130C8C140DCC012CB6A363FFDC00AF
+:105DA000C020D10FDA205BFFCCC020D10FC020D1A2
+:105DB0000F0000006C1004DB30C0D019DAFEDA20CE
+:105DC00028300022300708481209880A28824CDC53
+:105DD000200B80001BDAF90C4A11ABAA29A2840916
+:105DE000290B29A684D10F006C1004C04118DAF2E7
+:105DF00017DAF40C2611A727277038A866256286C3
+:105E0000007104A35500441A75414822628415DBD1
+:105E10001502320BC922882117DAF10884140744CD
+:105E200001754905C834C020D10FD10F0809471D9D
+:105E3000DB4AC0B28E201FDADF0E0E43AFEC2BC45C
+:105E4000A00FEE0A2DE6242A6284C0200A990B29AD
+:105E50006684D10FC020D10F6C1004DB30C0D01885
+:105E6000DAD5DA2025300022300708580A28824C7B
+:105E7000DC200B80008931709E121BDACF0C4A1196
+:105E8000ABAA29A28409290B29A684D10F09C952DA
+:105E900068532600910418DACAC0A12F811600AAFF
+:105EA0001A0AFF022F85161EDAC40C4D11AEDD2C26
+:105EB000D2840C2C0B2CD684D10FC0811FDAC1B830
+:105EC0009A0A0A472EF11600A10400881A08EE0269
+:105ED0002EF5161DDAB90C4C11ADCC2BC2840B2B50
+:105EE0000B2BC684D10F00006C1004DB30C0D0191E
+:105EF000DAB1DA2028300022300709880A28824CDB
+:105F0000DC200B80001CDAAC0C4B11ACBB2AB28439
+:105F10000A2A0B2AB684D10F6C1004C04118DAA6E5
+:105F200016DAA80C2711A626266038A87225228624
+:105F3000006104A35500441A7541082222840232EC
+:105F40000BD10F00C020D10F6C100415DB050249E6
+:105F5000142956112452120208430F8811C07300ED
+:105F6000810400361A008104C78F00771A0877036E
+:105F7000074401064402245612D10F006C10066E2D
+:105F800023026000AC6420A7C0A0851013DADD16E0
+:105F9000DAF4C040A6AA2BA2AE0B19416490666841
+:105FA000915D68925268933C2AA2AA283C7F288C73
+:105FB0007F0A0A4D2980012880002AACF208881146
+:105FC0000988027589462B3D0129B0002BB00108D4
+:105FD00099110B99027A9934B8332A2A00B1447284
+:105FE00049B160004A7FBF0715DADF63FFB90000DF
+:105FF000253AE863FFB10000253AE863FFA90000F5
+:10600000250A6463FFA1C05A63FF9C0000705F080B
+:106010002534FF058C142C34FE70AF0B0A8D142E22
+:106020003D012AE4012DE400DA405BFD5063FFA747
+:10603000D10FD10F6C10041ADA6219DA5F1CDACAB8
+:106040001BDACBC080C07160000D00000022A438B4
+:10605000B1AA299C107B915F26928679C2156E6247
+:1060600062C0206D080AB12200210400741A764B28
+:10607000DB63FFEE2292850D6311032514645FCF6D
+:10608000D650032D436DD9039820B4220644146DD5
+:106090004922982098219822982398249825982678
+:1060A000982798289829982A982B982C982D982EDC
+:1060B000982F222C4063FF971EDA4027E68027E6C0
+:1060C00081D10F00C02063FF830000006C1004C06A
+:1060D00062C04112DA3B1ADA3713DA522AA00023DF
+:1060E000322D19DA9F2BACFE2992AE6EA30260000E
+:1060F0008E090E402D1AC2C2CD0EDC392C251A6431
+:10610000B0895BFF9E15DA9A1ADA952B3AE80A3ABB
+:10611000015805922B211A0ABB28D3A09B50580581
+:10612000A92B52000ABB082A0A005805A815DA91C3
+:106130002D21022C3AE80C3C2804DD022D25029C7E
+:10614000505805A08B50AABBC0A15805A01CDA8AE4
+:106150002D21020C3C2806DD0213DA882D25029C35
+:10616000305805988B30AABBC0A25805982A210246
+:10617000C0B40BAA020A0A4F2A25025805ACD10F57
+:10618000242423C3CC2C251A63FF760018DA801C44
+:10619000DA7C19DA7D1BDA7B17DA4F85202E0AFDAF
+:1061A0001FDA7C2D203624F47A24F47E24F4820E27
+:1061B000DD0124F4862E0AF707552806DD02C07596
+:1061C0000EDD01050506AB5BA959C0E8AC5C24C433
+:1061D000AB0EDD0227C4AC2E0ADFA85527B4EC0EA7
+:1061E000DD0124B4EBC2E027942C0EDD0224942BB5
+:1061F0002E0A800D0D4627546C24546B0EDD022DA3
+:10620000243663FEFC0000006C10042A0A302B0ABE
+:10621000035BFF4D12DA53C390292616C3A1C0B306
+:10622000C08A2826175BFF48C03CC3B12B26161A2C
+:10623000D9E42AA02023261764A079C3A2C0B15BA9
+:10624000FF42C3A2C0B15BFF40C3C22C2616C2AF3F
+:10625000C0B12326175BFF3CC28F282616C0FE2F35
+:106260002617C2E22E26162A0AA1C0B1C0D82D26B2
+:10627000175BFF352A0AA12A2616C3A6C0B3C1920E
+:106280002926175BFF31C3C62C2616C1B32A0AA2E2
+:106290002B2617C0B35BFF2C290AA2292616C1851D
+:1062A000282617C2FB2F2616C0E72E26171DDA391F
+:1062B0002D2610D10FC3A2C0B35BFF2363FF820062
+:1062C0006C10041CDA031BD9ED18DA3317DA341614
+:1062D000DA3415DA34C0E0C0D414D9FF1FD9B9C0FC
+:1062E000288FF06D2A36DAC0D9C07C5B020FC90C4A
+:1062F0001CD9F90C9C28A8C3A6C22A36802A25845A
+:10630000A4C2A7CC2D248C2B248A2B24872E248B4B
+:10631000B1BB2E369F2C369E2C369DB1AC1CD9D7E6
+:106320001BDA22C0286D2A33DAC0D9C07C5B020F89
+:10633000C90C1CD9E80C9C28A8C3A6C22A36802BFD
+:106340002584A4C2B1BBA7CC2D248C2E248B2A2457
+:106350008A2E369F2C369E2C369DB1ACC07919D929
+:10636000D81BDA1413DA121ADA1218DA1314D9D97C
+:1063700016DA1304F42812DA1204660C040506A2D5
+:1063800052A858AA5AA3539B3029A50027848AC033
+:1063900091C0A52A848C29848B17DA0B18DA0AA7F6
+:1063A0005726361D26361E2E361F16DA0813DA0833
+:1063B000A65504330C2826C82E75002D54AC2E5437
+:1063C000AB2E54AA2326E62326E52E26E7D10F007E
+:1063D0006C100613D99417D9E224723D2232937FB0
+:1063E0002F0B6D08052832937F8F0263FFF3C0C423
+:1063F000C0B01AD973C051D94004593929A4206EAC
+:1064000044020BB502C3281ED96EDDB025E4220577
+:106410002D392DE421C0501ED9EF19D9DF18D9DF4D
+:1064200016D9E11DD9ED94102A724517D9AB6DA983
+:106430004BD450B3557A5B17DF50756B071FD9608B
+:106440008FF00F5F0C12D9A302F228AE2222D68160
+:10645000D54013D9A0746B0715D95A855005450C42
+:10646000035328B145A73FA832A93322369D2236CF
+:106470009E2436802B369F2BF48B2CF48C14D969F8
+:1064800024424DC030041414C84C6D0806B13304C6
+:106490001414C84263FFF20015D947C44000310408
+:1064A0001AD948C0D193A200DD1AC138B0DD9DA32E
+:1064B00018D95D2B824D29824E29A5202882537A36
+:1064C000871E2C54008E106FE45D12D93D2F2121C0
+:1064D0002321202F251F04330C23252023251ED103
+:1064E0000FC06218D99F88807E87D98910265400F2
+:1064F0006F94191BD9332AB1200A1A1404AA0C2A42
+:10650000B5202AB5212AB51E2AB51FD10F1BD92CBB
+:106510002AB1200A1A1403AA0C2AB5202AB5212A66
+:10652000B51E2AB51FD10F001CD9262BC1212DC1A4
+:10653000202BC51F03DD0C2DC5202DC51ED10F003E
+:106540006C100619D91F14D98612D93615D9A3C7CC
+:106550003FC0E02E56A82E56A92E56AA2E56AB2383
+:10656000262918D946DB101CD99DC0D42A42452DB6
+:1065700016012C160000B0890A880C98905BFF94D5
+:106580002C22E318D90F0C5C149C842B22E48C84FD
+:10659000B1BB0B5B140CBB0C9B852A22E50A5A1479
+:1065A0002A86062922CD0959142986072F22892FE8
+:1065B00086095BFF435BFF1423463BC1B01ED90035
+:1065C0001DD9602AE1022D463A0BAA020A0A4F2A77
+:1065D000E5025804965BFEBD5BFE96C050C0B01647
+:1065E000D8F614D8FE17D96FC0C0C73E93122C2618
+:1065F0002DC0306000440000007F9F0FB155091985
+:1066000014659FF4C0500AA9027FA7EF18D8EADAF0
+:106610005008580A28822C2B0A000B8000005104D5
+:10662000D2A0C091C7AF00991A0A99039912CE3827
+:1066300064206BD3202B20072516032C12022A621C
+:10664000827CA86318D8DC01110208580A28822C21
+:10665000DA500B8000D2A0643FD58A310A8A140434
+:10666000AA01C82A2B22010B8B1404BB017BA9456C
+:10667000DDA07A7B081DD8D22DD2000DAD0CDB3009
+:1066800019D8CD1AD91488130ADA28DC801DD951FB
+:1066900009880A28823C0DAA080B8000652F93D335
+:1066A00020C0B063FF9400007FAF34B155005004A8
+:1066B0000A091963FF42DAB07B7B081AD8C12AA203
+:1066C000000ABA0C1BD9048C310BAB280C8A141CA1
+:1066D000D941ACBB1CD94104AA012BC68163FF8FF1
+:1066E000645F60C050C0B0C7CE9C1263FF5500000D
+:1066F0006C100427221EC08008E4311BD8AF0002B2
+:10670000002AB28219D8AF003104C06100661A298C
+:1067100091020A6A022AB68209E43115D90C0C38B2
+:1067200011A8532832822432842A8CFC7841102903
+:1067300021022A368297A0096902292502D10F0079
+:106740002B21022C32850B6B022CCCFC2C36829731
+:10675000C02B2502D10F00006C1004C0E71DD89299
+:106760001CD8940D4911D7208B228A200B4B0BD2B9
+:10677000A007A80C9B72288CF4C8346F8E026000AE
+:10678000A31FD88AA298AF7B78B334C93DC081C01B
+:10679000F0028F380F0F42C9FA2CD67ED5206D4AF1
+:1067A0000500308800508C887008980878B16DD248
+:1067B000A09870D10FC0F0038F387FE0DE63FFD860
+:1067C000027B0CAFBB0B990C643047D830C0F1C0D2
+:1067D0005002F5380505426450792CD67E0B3612EE
+:1067E0002F6C100F4F366DFA0500808800208C0644
+:1067F000440CC081C05003B208237C0C03853805CB
+:10680000054264505A2CD67ED30F6D4A050020886D
+:1068100000308CD2A0A798BC889870D10FD2A0BCB1
+:10682000799970D10FD2302BAD08C0F1C0500BF563
+:1068300038050542CB542CD67E083F14260A100F8B
+:10684000660C0646366D6A0500208800B08C8270A2
+:1068500063FF2D00C05003F53875E08063FF7A00B8
+:10686000C06002863876E09F63FF9900C05003F550
+:106870003875E0C463FFBE006C1004D62068520F68
+:10688000695324DA20DB30DC405800F7D2A0D10F66
+:10689000DA20DB30DC405800F49A2424240EC02196
+:1068A00022640FC020D10F00B83BB04C2A2C748951
+:1068B000242D200E2E200FA4DDB1EE2E240FB0DDEE
+:1068C0002D240E2890072D9003A488B088B1DD2DCB
+:1068D00094032894075BFFA069511DC0E082242A1D
+:1068E000600F18D8BF2A240329600E8F202924079F
+:1068F00008FF029F209E64D10FC020D10F0000002E
+:106900006C1004942319D8B7C0B3083A110BAA022B
+:10691000992019D8299A2116D827C05028929D2548
+:1069200064A2288C1828969DD10F00006C100428B2
+:106930002066C038232406B788282466D10F0000BB
+:106940006C10060D3C111AD819D820035B0C862256
+:106950000D55118221AA8902320B928105630C9395
+:10696000820C550C792B54CB531CD8111DD80FC059
+:10697000F7A256C031C0A0043A380A0A42769343BF
+:10698000044302C9AB2CD67ED30F6DBA0500208814
+:1069900000308C8281A25272917D92818382C83EA6
+:1069A000D10FC071C06002763876F0DB63FFD5008E
+:1069B000C020BC89998199809282D10F222DF892B2
+:1069C0008163FFA219D7FA02860CA9669611D940F5
+:1069D000063612961006BB0C64A0442CD67E8A1094
+:1069E000D30F6DAA0500208800908CBC828311C053
+:1069F000E0A433240A01034E380E0E42CAEC2CD612
+:106A00007E6DBA0500208800308C821102520CA2E3
+:106A100082BC22928163FF83BC82928163FF7C00EF
+:106A2000C06002363876F0B563FFAF00C070024731
+:106A30003877F0CC63FFC6006C100414D7EBC1525A
+:106A4000A424CA3128221D73811C292102659016B5
+:106A50002A300075A912022A02033B022C3007C01B
+:106A6000D25801D5653FDCD10F2B300703BB0B0B90
+:106A7000BA0274B3022ABDF8D3A063FFC4000000B9
+:106A80006C1004292006C0706E9741292102C08F26
+:106A90002A2014C0B62B240606AA022A24147980C0
+:106AA000022725022A221E2C221D7AC10EC8ABDA2B
+:106AB00020DB302C0A00033D025BF7F96450892D7E
+:106AC00021020D0D4CC9D3C020D10F00002E9CFB1C
+:106AD00064E0962F21020F0F4C65F0A51AD7B71E60
+:106AE000D7B529A29EC08A798B712BE22668B004A3
+:106AF0008C207BC96629A29D1FD7B264905D9790B8
+:106B0000C0C31DD7C62B21049D9608BB110CBB0228
+:106B10009B919B971CD7C3C08527E4A22BA29D28DD
+:106B200024068DFA282102B0DD2BBC302BA69D9DBA
+:106B3000FA0C8802282502C8D2C020D10F8EF91283
+:106B4000D7B92E2689C020D10F283000688938DABD
+:106B500020DB30DC4058004463FF6300022A022B34
+:106B60000A065800D3220A00D10F655010293000C0
+:106B7000689924022A02033B02DC4058003BC020F3
+:106B8000D10FD270D10F00002A2C74033B02044CA9
+:106B9000025BFEF163FF2700DB30DC402A2C745BD4
+:106BA000FEEEC020D10F00006C1004C83F8926887B
+:106BB00029A399992609880C080848282525CC522C
+:106BC000C020D10FDB402A2C745BF92FD2A0D10F4B
+:106BD0006C1004D820D73082220D451105220C926A
+:106BE0008264207407420B13D771D420A3837323CC
+:106BF00002242DF8858074514CBC82C0906D08161B
+:106C000000408800708C773903D720C0918680744B
+:106C10003901D42074610263FFE2CA98C097C04171
+:106C20001BD7F2C0A00B8B0C0B4A380A0A42C9AA28
+:106C30001DD75E1CD75F2CD67EC140D30F6D4A0591
+:106C400000208800308C9780D270D10FBC8FC0E0BC
+:106C50000F4E387E90E263FFD6BC8292819280C054
+:106C6000209282D10F0000006C1006C0D71CD74EB6
+:106C70001BD7500D4911D7202E221F28221D0E4E42
+:106C80000BD280078A0C2E761F2AAC80C8346FAED8
+:106C9000026000CB2F0A801AD754A29EAA7A7EA344
+:106CA0003FC93FC0E1C05002E538050542CA552B37
+:106CB000C67EDB20D30F6D4A0500308800B08C2ED5
+:106CC000721DAE9E0EA50C645086D2802E761DC01D
+:106CD00091298403D10FC05003E53875D0D363FFE9
+:106CE000CD15D741027E0CA5EE643051C0A1250A16
+:106CF0000002A538033A020505426450922BC67E75
+:106D00000E35129510255C10054536D30F6D5A05CA
+:106D100000A08800208CC0A1A3E2C05023FA800309
+:106D2000730C03A538AF730505426450722BC67E01
+:106D3000851005450C6D5A0500208800308CD280E6
+:106D4000C0A10E9B0CAB7BAFBB2B761D2A8403D15D
+:106D50000FD280C0C1AF7D2D761D2C8403D10F00D2
+:106D6000D2302E8D08C0F1C0500EF538050542CB4B
+:106D7000592BC67E0A3F14C1600F660C064636D3F7
+:106D80000F6D6A0500208800E08C22721D63FF03EE
+:106D9000C061C05003653875D80263FF6263FF5C51
+:106DA000C05002A53875D08763FF8100C06003F62C
+:106DB0003876D0BF63FFB9006C10042A2015292053
+:106DC0001614D6FF0A990CCB9D2E200B04ED092B2F
+:106DD000D1208F2809BC36ACAA0CBB0C2BD5200ABD
+:106DE0000A472A2415CAAF8B438942B0A8009104F0
+:106DF00000881AA8FF0FBB029B278F260FB80C78BC
+:106E00003B1AC020D10F0000292102C0A20A99021A
+:106E1000292502C021D10F008B2763FFDC2BD12055
+:106E20000CAA0C0A0A472A2415ACBB2BD520C9AEE4
+:106E30008B438C288F42B0AD00F10400DD1AADCC3D
+:106E40000CBB029B27DA20B7EB580019C021D10FE9
+:106E50009F2763FFEF0000006C100428203C643083
+:106E60004705306000073E01053EB156076539050C
+:106E70004928C77FA933030641076603B1660606A2
+:106E800041A6337E871E222125291AFC732B150269
+:106E9000380C09816000063E01023EB124064239E9
+:106EA00003220AD10FD230D10FC05163FFC00000BE
+:106EB0006C100427221EC08008E4311DD6BF0002DA
+:106EC000002CD2821BD6BF003104C06100661A2B91
+:106ED000B1020C6C022CD6820BE43119D7440C3A67
+:106EE00011AA932832829780253282243284B455A5
+:106EF00025368275410A292102096902292502D114
+:106F00000F2A21022B32830A6A022B36822A25029B
+:106F1000D10F00006C100418D6A80C2711087708B0
+:106F2000267286253C04765B1315D6A405220A2218
+:106F300022A3682002742904227285D10FC020D1B7
+:106F40000F0000006C100419D6A727221EC080096C
+:106F5000770208E4311DD6980002002CD2821BD69D
+:106F600098003104C06100661A2BB1020C6C022C2F
+:106F7000D6820BE43119D71D0C3A11AA932832821C
+:106F80009780253282243284B45525368275410B90
+:106F90002A21020A6A022A2502D10F002B21022C83
+:106FA00032830B6B022C36822B2502D10F0000009E
+:106FB0006C10041BD6810C2A11ABAA29A286B43806
+:106FC000798B221BD67E19D6A50B2B0A2BB2A309CF
+:106FD000290868B00274B90D299D0129901F6E928D
+:106FE0000822A285D10FC020D10FC892C020D10F96
+:106FF000DA205BEE88C020D10F0000006C10041472
+:10700000D66E28429E19D66B6F88026000BA29920C
+:10701000266890078A2009AA0C65A0AC2A429DC068
+:10702000DC64A0A42B200C19D6650CBC11A4CC2EBA
+:10703000C28609B90A7ED30260009A2992A3689099
+:10704000078D2009DD0C65D08C25C2856450862D06
+:107050002104C0306ED80D2C2066B8CC0C0C472C07
+:10706000246665C07B1CD6E218D66B1AD66219D688
+:10707000731DD667C0E49E519D508F209357935542
+:1070800099539A569A5408FF021AD6839F5288261B
+:107090009F5A9E599D58935E9C5D935C9A5B08082D
+:1070A00048058811985FC0D81FD64C0CB911A49917
+:1070B000289285AFBF23F4CF288C402896858E2652
+:1070C0002D24069E29C020D10FCA33DA20C0B65B1A
+:1070D000FF78C72FD10FC93ADA205BFF75C72FD1D0
+:1070E0000FDBD05BFE072324662B200C63FF7500AB
+:1070F000C72FD10FC72FD10F6C1004C85B292006F2
+:1071000068941C689607C020D10FC020D10FDA20E8
+:10711000DB30DC40DD502E0A005BFE59D2A0D10FDF
+:107120002E200C18D6250CEF11A8FF29F286C08856
+:10713000798B791AD6220AEA0A2AA2A368A0048BBC
+:10714000207AB96823F2856430621BD62C290A8024
+:107150002C20682820672D21040B881104DD1108DC
+:10716000DD020DCC02C0842D4A100DCC021DD624A8
+:1071700098319D308A2B99379C340BAA02C0C09C51
+:10718000359C369A322A2C74DB4028F285C0D328ED
+:107190008C2028F6852C25042D24061FD60FDD40D3
+:1071A000AFEE2CE4CF5BFDE6D2A0D10F00DA20DBFE
+:1071B000E05BFF3FC020D10F6C100AD6302A2006BA
+:1071C00024160128ACF86583862B2122C0F22A21DF
+:1071D00024CC572AAC010A0A4F2A25247ABB026024
+:1071E000037F2C21020C0C4C65C3192E22158D3205
+:1071F000C0910EDD0C65D39088381ED5EF64836B8B
+:107200008C37C0B8C0960CB9399914B49A9A120D3B
+:10721000991199138F6718D5EAC9FB2880217F83BC
+:10722000168B142C22002A200C5BFF61D4A064A3CF
+:10723000B38F6760002800002B200C89120CBA1154
+:10724000AEAA2CA2861DD5DD7C9B3E0DBD0A2DD29B
+:10725000A368D00488207D893024A28564436427F4
+:10726000212E07F73607F90C6F9D01D7F0DA20DBE6
+:1072700070C1C42D211F5BFEF889268827DDA00977
+:10728000880C7A8B179A10600006C04063FFCC0010
+:1072900000DA208B105BFEC88D1065A267C0E09EEF
+:1072A000488C649C498B658A669B4A9A4B97458FAC
+:1072B000677F7302600120CD529D10DA20DB302CF5
+:1072C00012015BFE698D10C051D6A08FA7C0C08A85
+:1072D00068974D9A4C8869896A984E994F8E6A8A48
+:1072E00069AE7E77EB01B1AA9E6A9A698B60C0A0F5
+:1072F0000B8E1477B701C0A1C091C08493159D1760
+:107300009516C0D025203CC030085801089338C0DD
+:1073100082083310085B010535400B9D3807DD10EE
+:107320000BAB100E19402A211F07991003DD020D27
+:10733000BB020553100933020A55112921250A2AD7
+:10734000140929140499110A99020933028A2B2974
+:1073500021040BAA021BD6270899110955020855CA
+:10736000020BAA029A408920881408991109880200
+:1073700019D5A61DD62109880298418B2A934695D6
+:107380004783150DBB0285168D179B448A65896658
+:10739000AACAA97C77CB01B1AA07FB0C9C669A65A7
+:1073A00088268E29AD87972607EE0C0E0E482E25CF
+:1073B000259B672B200C87131ED5800CB911AE9925
+:1073C000289285A78828968517D584C090A7BB29C1
+:1073D000B4CF871863FE3C008C60C0E0C091C0F061
+:1073E000C034C0B82A210428203C08AA110B8B0104
+:1073F000038301039F380B9B39C03208FF100388B9
+:1074000001089E380C881407EE100FEE0203880165
+:1074100008983905BF1029211F0ABB1107881008D9
+:10742000FF020BAA0218D57809291403AA022B21FE
+:107430002583200B2B1404BB110833110FBB020B47
+:1074400099028B148F2A0B33020833028B2B647042
+:10745000868868974D984C8769886A9341994697C2
+:107460004E984FC07077C701C0719A4718D5E30B8B
+:107470007C100CEC0208F802984418D5E00CBC0211
+:1074800008CC029C402A200C295CFEC0801FD54AF3
+:107490001CD5520CAE112B2124ACAAAFEEB0BB8F81
+:1074A000132CE28528A4CFAFCC2CE6852A22152BFD
+:1074B0002524B1AA2A26156490DBC9D28F262E2254
+:1074C000090DFF082F26060FEE0C0E0E482E25255F
+:1074D0006550E4C020D10F00C07093419F4499468D
+:1074E0009A4777C70A1CD5362CC022C0810C873832
+:1074F0001CD5C40B781008E80208B8020C88029862
+:107500004063FF8000CC57DA20DB608C115BFDD636
+:10751000292102689806689403C020D10F2B221EEF
+:10752000C0A029221D2A25027B9901C0B064BFE8B2
+:1075300013D5212CB00728B000DA2003880A28824E
+:107540004CC0D10B8000DBA065AFE763FFCA000031
+:1075500068A779DA20DB30DC40DD505BFEE7D2A0A3
+:10756000D10FC16DC19D29252C60000429252CD681
+:10757000902624672F2468DA20DB308C11DD502E12
+:107580000A805BFD3FD2A0D10FC168C1A82A252C7B
+:1075900063FFDD000000C8DF8C268B29ADCC9C2664
+:1075A0000CBB0C0B0B482B25252A2C74DB602C12F2
+:1075B000015BFD87D2A0D10F2A2C748B115BF6B230
+:1075C000D2A0D10FDA205BFE3A63FF3800DA20C088
+:1075D000B15BFE8A64ABF1655F352D2124B1DD2DF1
+:1075E000252463FF1FDA202B200C5BFE5663FF145B
+:1075F00012D5858220028257C82163FFFC12D581F3
+:1076000003E83004EE3005B13093209421952263D5
+:10761000FFFC000010D57D910092019302940311AC
+:10762000D554821001EA30A21101F031C04004E4C7
+:107630001600020011D5768210234A00032202921E
+:107640001011D540C021921004E4318403830282DA
+:1076500001810000D23001230000000010D56D919F
+:107660000092019302940311D543821001EA30A2E3
+:107670001101F131C04004E41600020011D564820A
+:107680001013D4E7032202921004E431840383022E
+:107690008201810000D330013300000010D55E91DB
+:1076A00000810165104981026510448103CF1F925A
+:1076B000019302940311D531821001EA30A2110125
+:1076C000F231C04004E41600020011D550821013BC
+:1076D000D4CF032202921004E43184038302820196
+:1076E000C010910391029101810000D43001430048
+:1076F00012D500C03028374028374428374828376B
+:107700004C233D017233ED03020063FFFC000000D7
+:1077100010D542910092019302940311D54082103A
+:10772000921011D4F28310032202921011D53D124F
+:10773000D5049210C04004E41600020011D5348232
+:107740001013D4EB032202921004E4318403830269
+:107750008201810000D53001530000006C10026EE0
+:10776000322FD620056F04043F04745B2A05440CB5
+:1077700000410400331A220A006D490D73630403AB
+:10778000660CB1220F2211031314736302222C0121
+:10779000D10FC83BD10F000073630CC021D10F0083
+:1077A0000000000044495630C020D10F6C10020088
+:1077B00040046B4C07032318020219D10F0203196E
+:1077C000C020D10F6C100202EA30D10F6C1002CC35
+:1077D0002503F03160000F006F220503F1316000D6
+:1077E000056F230503F231000200D10F6C1002CCAB
+:1077F0002502F030D10F00006F220402F130D10FCA
+:107800006F230402F230D10FC020D10F6C1002227E
+:107810000A20230A006D280E2837402837442837CD
+:107820004828374C233D01030200D10F6C1002029F
+:10783000E431D10F0A0000004368656C73696F2062
+:1078400046572044454255473D3020284275696CD3
+:1078500074204D6F6E204D61722020382031373AF0
+:1078600032383A3135205053542032303130206F85
+:107870006E20636C656F70617472612E61736963F1
+:1078800064657369676E6572732E636F6D3A2F68F6
+:107890006F6D652F66656C69782F772F66775F3718
+:1078A0002E392D6977617270292C205665727369A3
+:1078B0006F6E2054337878203030372E30612E3080
+:1078C00030202D20313030373061303010070A0041
+:0478D0000BDFE8756D
+:00000001FF
diff --git a/firmware/cxgb3/t3fw-7.4.0.bin.ihex b/firmware/cxgb3/t3fw-7.4.0.bin.ihex
deleted file mode 100644
index 38dda94..0000000
--- a/firmware/cxgb3/t3fw-7.4.0.bin.ihex
+++ /dev/null
@@ -1,1917 +0,0 @@
-:1000000060007400200380002003700000001000D6
-:1000100000002000E100028400070000E1000288E7
-:1000200000010000E0000000E00000A0010000006E
-:1000300044444440E3000183200200002001E0002A
-:100040002001FF101FFFD0001FFFC000E300043C91
-:100050000200000020006B741FFFC29020006BBCE8
-:100060001FFFC29420006BFC1FFFC29820006C7021
-:100070001FFFC29C200003C0C00000E43100EA3131
-:1000800000A13100A03103020002ED306E2A05000C
-:10009000ED3100020002160012FFDBC03014FFDA5F
-:1000A000D30FD30FD30F03431F244C107249F0D347
-:1000B0000FD30FD30F12FFD5230A00240A00D30F4A
-:1000C000D30FD30F03431F244C107249F0D30FD327
-:1000D0000FD30F14FFCE03421F14FFCB03421F1296
-:1000E000FFCCC0302D37302D37342D37382D373CED
-:1000F000233D017233ED00020012FFC4C0302F37E0
-:10010000002F37102F37202F3730233D017233ED6A
-:1001100000020012FFBEC0302737002737102737F4
-:1001200020273730233D017233ED03020012FFB95F
-:1001300013FFBA0C0200932012FFB913FFB90C028F
-:1001400000932012FFB8C0319320822012FFB71312
-:10015000FFB7932012FFB715FFB316FFB6C030D715
-:100160002005660160001B00000000000000000088
-:10017000043605000200D30FD30F05330C6E3B1479
-:100180000747140704437631E604360505330C6F40
-:100190003BED00020012FFA615FFA3230A00D720A3
-:1001A000070443043E0505330C0747146F3BF00377
-:1001B000020012FFA1C03014FFA1D30FD30FD30F41
-:1001C0009340B4447249F2D30FD30FD30F14FF9B63
-:1001D000834014FF9B834012FF9B230A0014FF9A65
-:1001E000D30FD30FD30F9340B4447249F2D30FD33C
-:1001F0000FD30F14FF95834012FF95C92F832084DE
-:10020000218522BC22743B0F8650B4559630B433FE
-:100210007433F463FFE60000653FE1655FDE12FFC3
-:100220007C230A0028374028374428374828374C91
-:10023000233D017233ED03020000020012FF7AC079
-:1002400032032E0503020012FF7813FF819320C0B2
-:1002500011014931004831010200C00014FF7E0441
-:10026000D23115FF7D945014FF7D04D33115FF7CEE
-:10027000945014FF7C04D43115FF7C24560014FFE5
-:100280007B04D53115FF7B24560010FF7A03000054
-:10029000000000000000000000000000000000005E
-:1002A000000000000000000000000000000000004E
-:1002B000000000000000000000000000000000003E
-:1002C000000000000000000000000000000000002E
-:1002D000000000000000000000000000000000001E
-:1002E000000000000000000000000000000000000E
-:1002F00000000000000000000000000000000000FE
-:1003000000000000000000000000000000000000ED
-:1003100000000000000000000000000000000000DD
-:1003200000000000000000000000000000000000CD
-:1003300000000000000000000000000000000000BD
-:1003400000000000000000000000000000000000AD
-:10035000000000000000000000000000000000009D
-:10036000000000000000000000000000000000008D
-:10037000000000000000000000000000000000007D
-:10038000000000000000000000000000000000006D
-:10039000000000000000000000000000000000005D
-:1003A000000000000000000000000000000000004D
-:1003B000000000000000000000000000000000003D
-:1003C000000000000000000000000000000000002D
-:1003D000000000000000000000000000000000001D
-:1003E000000000000000000000000000000000000D
-:1003F00000000000000000000000000000000000FD
-:1004000000000000000000000000000000000000EC
-:1004100000000000000000000000000000000000DC
-:1004200063FFFC000000000000000000000000006E
-:100430000000000000000000000000001FFC0000A1
-:100440001FFC0000E30005C81FFC00001FFC0000AB
-:10045000E30005C81FFC00001FFC0000E30005C806
-:100460001FFFC0001FFFC000E30005C81FFFC00042
-:100470001FFFC018E30005C81FFFC0181FFFC018EA
-:10048000E30005E01FFFC0181FFFC290E30005E076
-:100490001FFFC2901FFFC290E30008581FFFC290C9
-:1004A0001FFFC58CE3000858200000002000016AEF
-:1004B000E3000B542000018020000180E3000CC009
-:1004C0002000020020000203E3000CC02000021CF8
-:1004D00020000220E3000CC420000220200002269D
-:1004E000E3000CC82000023C20000240E3000CD0D6
-:1004F0002000024020000249E3000CD42000024CFE
-:1005000020000250E3000CE02000025020000259BD
-:10051000E3000CE42000025C20000260E3000CF029
-:100520002000026020000269E3000CF42000026C4D
-:1005300020000270E3000D0020000270200002790C
-:10054000E3000D042000028C2000028CE3000D105B
-:100550002000029020000293E3000D10200002AC66
-:10056000200002B0E3000D14200002D0200002F2AF
-:10057000E3000D18200003B0200003B0E3000D3CA1
-:10058000200003B0200003B0E3000D3C200003B0C6
-:10059000200003B0E3000D3C200003B0200003B0B6
-:1005A000E3000D3C200003B020006D94E3000D3CFF
-:1005B00020006D9420006D94E3007720000000007F
-:1005C00000000000000000001FFC00001FFC0000F5
-:1005D0001FFFC5901FFFC67020006D9820006D980A
-:1005E000DEFFFE000000080CDEADBEEF1FFFC2A064
-:1005F0001FFCFE001FFFC0941FFFC5C0300000009D
-:10060000003FFFFF8040000010000000080FFFFFC8
-:100610001FFFC26D000FFFFF804FFFFF8000000033
-:1006200000000880B000000560500000600000007D
-:1006300040000011350000004100000010000001E2
-:100640002000000000001000400000000500000035
-:1006500080000019040000000000080010000005E0
-:10066000806000007000000020000009001FF800FA
-:100670008000001EA0000000F800000007FFFFFF40
-:100680000800000018000000010080014200000086
-:100690001FFFC21D1FFFC0DC000100806040000082
-:1006A0001A0000000C0000001000000A00003000DA
-:1006B000600008008000001C000100008000001A9B
-:1006C00080000018FC0000008000000100004000D5
-:1006D000030000008000040050000003FFFFBFFF84
-:1006E0001FFFC3D400000FFFFFFFF000000016D073
-:1006F0000000FFF7A50000001FFFC4B01FFFC4618A
-:100700000001000800000B20202FFF801FFFC455B0
-:1007100000002C00FFFEFFF800FFFFFF1FFFC57861
-:1007200000002000FFFFDFFF0000FFEF01001100CD
-:100730001FFFC3D21FFFC590FFFFEFFF0000FFFBAD
-:100740001FFFC6301FFFBEA0FFFFF7FF1FFFC064E3
-:100750000000FFFD1FFFC6200001FBD01FFFC5B03A
-:100760001FFFC6601FFFC591E0FFFE001FFFC5A071
-:10077000000080001FFFC53C1FFFC5B41FFFC068FD
-:100780001FFFC4D01FFCFFD8000100817FFFFFFFC7
-:10079000E1000600000027101FFCFE301FFCFE7069
-:1007A000E10002001FFFC5381FFFC5500003D090B5
-:1007B0001FFFC5642B5063802B5079802B50908095
-:1007C0002B50A6801FFFC4690100110F202FFE00CF
-:1007D00020300080202FFF000000FFFF0001FFF805
-:1007E0002B50B2002B50B208000100102B50B180EA
-:1007F0002B50B2802B50BA00000100112B50BD28A5
-:100800002B50BC802B50BDA020300000DFFFFE002D
-:100810005000000200C0000002000000FFFFF7F4DB
-:100820001FFFC06C000FF800044000000010000023
-:100830000C4000001C400000E00000A01FFFC5406D
-:100840001FFD00081FFFC5541FFFC5681FFFC57CA3
-:10085000E1000690E10006EC00000000000000004E
-:100860000000000000000000010000000000000087
-:100870000000000000000000201000402010004098
-:100880002010004020140080200C0000200C0000EC
-:10089000200C000020100040201400802014008054
-:1008A00020140080201800C0201C0100201C010022
-:1008B000201C010020200140201800C0201800C08A
-:1008C000201800C0201C0100201800C0201800C003
-:1008D000201800C0201C01002020014020200140E1
-:1008E00020200140202009402020094020200940EC
-:1008F0002020094020240980FFFFFFFFFFFFFFFFAA
-:10090000FFFFFFFF000000000000000000000000EB
-:100910000000000000000000200054902000536000
-:1009200020005490200054902000529C2000529CA3
-:100930002000529C200050DC200050DC200050D4CD
-:100940002000504020004EE820004CC820004A9C67
-:100950000000000000000000200054602000532C24
-:10096000200053D0200053D0200051842000518417
-:10097000200051842000518420005184200050CC5C
-:100980002000518420004E0820004C7820004A4866
-:10099000000000000000000020000BE820003A30BA
-:1009A000200004C02000463C20000BE0200041480D
-:1009B000200003F0200045FC20004A2420003E5483
-:1009C00020003D70200039AC2000383C200035ACC0
-:1009D0002000310C20003BCC20002D6C2000280092
-:1009E000200067182000238C2000206C2000201895
-:1009F00020001D04200018182000154820000E2C8F
-:100A000020000C2C2000110C200012F82000434084
-:100A100020003E0820000BF0200004C00000000071
-:100A200000000000000000000000000000000000C6
-:100A300000000000000000000000000000000000B6
-:100A400000000000000000000000000000000000A6
-:100A50000000000000000000000000000000000096
-:100A60000000000000000000000000000000000086
-:100A70000000000000000000000000000000000076
-:100A80000000000000000000000000000000000066
-:100A900000000000000000000000000032640000C0
-:100AA0000000000032640000640064006400640020
-:100AB00064006400640064000000000000000000A6
-:100AC0000000000000000000000000000000000026
-:100AD0000000000000000000000000000000000016
-:100AE0000000000000000000000000000000000006
-:100AF00000000000000000000000000000000000F6
-:100B000000001000000000000000000000000000D5
-:100B100000000000000000000000100000000000C5
-:100B200000000000000000000000000000432380DF
-:100B300000000000000000000000000000000000B5
-:100B400000000000000000000000000000000000A5
-:100B500000000000005C94015D94025E94035F94C9
-:100B60000043000000000000000000000000000042
-:100B70000000000000000000000000000000000075
-:100B80000000000000000000000000000000000065
-:100B900000000000005C90015D90025E90035F9099
-:100BA00000530000000000000000000000000000F2
-:100BB0000000000000000000000000000000000035
-:100BC0000000000000000000000000000000000025
-:100BD00000000000009C94001D90019D94029E94D2
-:100BE000039F94040894050994060A94070B940043
-:100BF00043000000000000000000000000000000B2
-:100C000000000000000000000000000000000000E4
-:100C100000000000009C90019D90029E90071D9096
-:100C2000039F90047890057990067A90077B900056
-:100C30005300000000000000000000000000000061
-:100C400000000000000000000000000000000000A4
-:100C50000000000000DC94001D9001DD9402DE9491
-:100C600003DF94040494050594060694070794088A
-:100C700008940909940A0A940B0B9400430000009D
-:100C80000000000000000000000000000000000064
-:100C90000000000000DC9001DD9002DE900B1D9052
-:100CA00003DF9004B49005B59006B69007B790089E
-:100CB000B89009B9900ABA900BBB9000530000009D
-:100CC00063FFFC0020006B5010FFFF0A00000000D3
-:100CD00020006B7400D23110FFFE0A0000000000FB
-:100CE00020006BBC00D33110FFFE0A0000000000A2
-:100CF00020006BFC00D43110FFFE0A000000000051
-:100D000020006C7000D53110FFFE0A0000000000CA
-:100D100063FFFC00E00000A012FFF7822002825770
-:100D2000C82163FFFC12FFF303E83004EE3005C076
-:100D30003093209421952263FFFC00001FFFD00018
-:100D4000000400201FFFC5901FFFC670200A00117D
-:100D5000FFFB13FFFB03E63101020016FFFA17FF4A
-:100D6000FAD30F776B069060B4667763F85415B5C5
-:100D7000541A610F140063FFF90000006C1004C0E6
-:100D800020D10F006C1004C0C71AEF06D830BC2B5E
-:100D9000D72085720D4211837105450B9572023380
-:100DA0000C2376017B3B04233D089371A32D12EEA7
-:100DB000FE19EEFEA2767D632C2E0A000882022820
-:100DC0000A01038E380E0E42C8EE29A67E6D4A0532
-:100DD00000208800308C8271D10FC0F0028F387FE4
-:100DE000C0EA63FFE400C0F1C050037E0CA2EE0E27
-:100DF0003D1208820203F538050542CB5729A67E2D
-:100E00002FDC100F4F366DFA0500208800308CBCA7
-:100E100075C03008E208280A01058338030342C977
-:100E20003E29A67E0D480CD30F6D8A050020880050
-:100E3000B08C8271D10FC05008F53875C0C163FF06
-:100E4000BBC06002863876C0DA63FFD46C1012161D
-:100E5000EED8C1F9C1E8C1C72B221E28221DC0D07F
-:100E60007B81312920060BB702299CFA655008289E
-:100E70002072288CFF28247264915C2AB0000CA890
-:100E80000C6481670EA90C6492B37FA13769AC2F03
-:100E90006000340000282006D7D0288CFACC572ACE
-:100EA00020722AACFF2A24726481352AD0000CA952
-:100EB0000C6491640EAC0C64C31B7FA10768AC0783
-:100EC000C020D10F002D25028A32C0900A6E5065D5
-:100ED000E5B5292467090F4765F5B12C200C1FEEF5
-:100EE000B50CCE11AFEE29E286B4487983026005D5
-:100EF0008219EEB109C90A2992A36890078F2009C7
-:100F0000FF0C65F56E2FE28564F56865559628221D
-:100F10001D7B8105D9B060000200C0908B9417EE54
-:100F2000A70B881487740B0B47A87718EEA509BB8D
-:100F30001008770297F018EEA317EEA408A8010B8B
-:100F400088020747021BEEA097F10B880298F22750
-:100F500090232B902204781006BB1007471208BB81
-:100F6000022890210777100C88100788020B88024E
-:100F700017EE988B3307BB0187340B880298F397E1
-:100F80009997F48B9587399BF588968B3898F688D6
-:100F90009797F99BF898F717EE8F28E28507C7080F
-:100FA0002D74CF08480B28E68565550F2B221E2887
-:100FB000221D7B89022B0A0064BF042CB00728B0D5
-:100FC00000DA2006880A28824CC0D10B8000DBA002
-:100FD00065AFE763FEE90000292072659E9C60040E
-:100FE000E72A207265AEC36004DE00002EB0032C39
-:100FF0002067D4E065C1058A328C330AFF500C4566
-:1010000054BC5564F4EB19EE74882A09A9010988C7
-:101010000C64821FC0926000DD2ED0032A2067D4AA
-:10102000E065A0D88A328B330AFC500B4554BC557E
-:1010300064C4BE19EE69882A09A9017989D50BEA29
-:101040005064A4E30CEE11C0F02F16132E16168A6E
-:10105000E78CE82A16128EE9DFC0AAEA7EAB01B15E
-:10106000CF0BA8506583468837DBC0AE89991E78C0
-:101070009B022BCC012B161B29120E2B0A002916C2
-:101080001A7FC3077FC9027EAB01C0B165B49D8BD7
-:10109000352F0A002A0A007AC30564C3CB2F0A0140
-:1010A00065F4892B12162B1619005104C0C100CC0F
-:1010B0001A2CCCFF2C16170CFC132C16182B121AFA
-:1010C0002A121BDC50581974C0D0C0902E5CF42C2E
-:1010D00012172812182F121B2A121A08FF010CAA25
-:1010E000018834074C0AAB8B2812192BC6162F86A1
-:1010F000082A86092E74102924672E70038975B179
-:10110000EA2A7403B09909490C659DB32B20672D19
-:10111000250265B3FA2B221E2C221D7BC901C0B00B
-:1011200064BD9C2CB00728B000DA2006880A28820B
-:101130004CC0D10B8000DBA065AFE763FD8189BAAD
-:10114000B19965909788341CEE2598BA8F331EEEBE
-:101150001E0F4F542FB42C8D2A8A320EDD020CAC98
-:10116000017DC9660A49516F92608A3375A65B2C6E
-:10117000B0130AED510DCD010D0D410C0C417DC98F
-:10118000492EB012B0EE65E3C6C0D08E378CB88A57
-:10119000368FB97CA3077AC9027EFB01C0D1CED9B4
-:1011A00088350AAD020E8E0878EB022DAC0189B7A6
-:1011B000DAC0AF9B79BB01B1CADCB0C0B07DA30778
-:1011C0007AD9027CEB01C0B164B161C09129246776
-:1011D000C020D10F00008ADAB1AA64A0C02C206719
-:1011E0002D250265C3111DEDF88A321EEDFD0DADF2
-:1011F000010EDD0C65D28A0A4E516FE20260028157
-:10120000C090292467090F4765F2F828221D7B89C1
-:10121000022B0A0064BCA82CB00728B000DA200614
-:10122000880A28824CC0D10B8000DBA065AFE76341
-:10123000FC8D00000CE9506492ED0CEF11C0802889
-:101240001611AFBF2F16198EF88BF7DAE08FF92B36
-:101250001610ABFB7FBB01B1EA0CA8506580D688A5
-:1012600037DCE0AF89991C789B022CEC012C161B13
-:1012700029120C2C0A0029161A7AE3077AE9027F50
-:10128000BB01C0C165C2A58B352C0A002A0A007AB1
-:10129000E30564E1CA2C0A0164CE0D60028E883435
-:1012A0001BEDCF98DA8F331EEDC80F4F542FD42C7F
-:1012B0008C2A8A320ECC020BAB010CBB0C65BF0A28
-:1012C0000A49516E920263FF018A330AAB5064BE31
-:1012D000F92CD0130AEE510ECE010E0E410C0C412A
-:1012E0000ECC0C65CEE42FD012B0FF65F26EC0B00C
-:1012F0008E378CD88A362FD2097CA3077AC9027E12
-:10130000FB01C0B165BEC38835DBA0AE8E78EB01B2
-:10131000B1AB89D7DAC0AF9D79DB01B1CAC0C07B60
-:10132000A3077AB9027DEB01C0C165CE9DC09029AB
-:101330002467C020D10F88378C3698140CE90C290B
-:10134000161408F80C981D78FB07281214B088288A
-:101350001614891D9F159B16C0F02B121429161AFE
-:101360002B161B8B147AE30B7AE90688158E1678F8
-:10137000EB01C0F165F1BA29121A2F12118A352E2C
-:10138000121B9A1AAFEE2F1210C0A0AF9F79FB016B
-:10139000B1EE9F11881AC0F098107AE30A7EA90571
-:1013A0002A12017A8B01C0F164F0816001838936D1
-:1013B0008B3799170BE80C981F09C90C291615785B
-:1013C000EB07281215B088281615D9C09A199E184F
-:1013D0008A1F2E12152A161A2E161BDAC0C0E08C90
-:1013E000177F930B7FA90688188F1978FB01C0E13E
-:1013F00065E13E29121A2F12138A352E121B9A1BF1
-:10140000AFEE2F1212C0A0AF9F79FB01B1EE9F1378
-:10141000881BC0F098127AE30A7EA9052A12037A83
-:101420008B01C0F165F10A2E12162E16192A121B15
-:10143000005104C0E100EE1AB0EE2E16170EFF1395
-:101440002F16180FCC01ACAA2F121A0EBC01ACFC3F
-:101450007FCB01B1AA2A161B2C161A63FC5E000072
-:101460007FB30263FE3163FE2B7EB30263FC306305
-:10147000FC2A00006450C0DA20DBC0581648C020A7
-:10148000D10FC09163FD7A00C09163FA44DA20DB8A
-:1014900070C0D12E0A80C09A2924682C7007581574
-:1014A00038D2A0D10F03470B18ED4FDB70A8287876
-:1014B00073022B7DF8D9B063FA6100002A2C74DB2B
-:1014C00040580EB363FAE4000029221D2D25027B4B
-:1014D0009901C0B0C9B62CB00728B000DA20068840
-:1014E0000A28824CC0D10B8000DBA065AFE7C0208A
-:1014F000D10FC09163FBFF00022A025802440AA2E6
-:1015000002060000022A025802410AA20206000056
-:10151000DB70DA20C0D12E0A80C09E2924682C708E
-:1015200007581517C020D10FC09463FBC9C096633C
-:10153000FBC4C09663FBBF002A2C74DB30DC405B2D
-:10154000FE11DBA0C2A02AB4002C200C63FF2700F0
-:101550008D358CB77DCB0263FDD263FC6D8F358EEC
-:10156000D77FEB0263FDC563FC6000006C1004C014
-:1015700020D10F006C1004C020D10F006C10042B80
-:10158000221E28221DC0A0C0942924062A25027BE1
-:101590008901DBA0C9B913ED06DA2028B0002CB010
-:1015A0000703880A28824CC0D10B8000DBA065AFFE
-:1015B000E7C020D10F0000006C10042C20062A2167
-:1015C0000268C80528CCF965812E0A094C6591048A
-:1015D0008F30C1B80F8F147FB00528212365812774
-:1015E00016ECF529629E6F98026000F819ECF1295B
-:1015F00092266890078A2009AA0C65A0E72A629DB6
-:1016000064A0E12B200C0CB911A6992D92866FD9FC
-:10161000026000DB1DECE90DBD0A2DD2A368D007E6
-:101620008E200DEE0C65E0C7279285C0E06470BF88
-:101630001DECEE68434E1CECED8A2B0CAA029A704E
-:1016400089200899110D99029971882A98748F320E
-:101650009F75282104088811987718ECDE0CBF11BB
-:10166000A6FF2DF285A8B82E84CF2DDC282DF68577
-:10167000C85A2A2C74DB40580E46D2A0D10FC02085
-:10168000D10F00000029CCF96490B12C206689317B
-:10169000B1CC0C0C472C24666EC60260008509F89C
-:1016A0005065807F1CECD38A2B0F08400B881008F4
-:1016B000AA020CAA029A7089200899110D99029920
-:1016C00071883398738C329C728A2A9A74893499FF
-:1016D0007563FF7D00CC57DA20DB30DC4058151DE8
-:1016E000C020D10F00DA20C0B65815AC63FFE5006A
-:1016F000DA205815AA63FFDC00DA20DB30DC40DD9D
-:1017000050581638D2A0D10FC858DA20DB30581400
-:101710008A2A210265AFBDC09409A9022925026366
-:10172000FFB200002B21045814351DECAFC0E02E91
-:1017300024668F302B200C0F8F1463FF662921380D
-:10174000C08879830263FF5B2C20662B2104B1CC17
-:101750000C0C472C24665814291DECA3C0E02E2441
-:10176000668F302B200C0F8F1463FF376C1004C072
-:10177000B7C0A116ECA015EC92D720D840B822C073
-:10178000400535029671957002A438040442C94B95
-:101790001AEC8519EC8629A67EC140D30F6D4A0547
-:1017A00000808800208C220A88A272D10FC05008C5
-:1017B000A53875B0E363FFD76C1006931394112915
-:1017C0002006655288C0716898052A9CF965A29820
-:1017D00016EC792921028A1309094C6590CD8AA05B
-:1017E0000A6A512AACFD65A0C2CC5FDB30DA208CDE
-:1017F000115814D8C0519A13C7BF9BA98E132EE25B
-:101800000968E0602F629E1DEC6A6FF80260008438
-:101810002DD22668D0052F22007DF9782C629DC735
-:101820009064C0709C108A132B200C2AA0200CBD41
-:1018300011A6DD0A4F14BFA809880129D286AF88F6
-:10184000288C09798B591FEC5C0FBF0A2FF2A36813
-:10185000F0052822007F894729D285D490659075AC
-:1018600060004300002B200C1FEC540CBD11A6DDC2
-:1018700029D2860FBF0A6E96102FF2A368F0048853
-:10188000207F890529D285659165DA20581543C9DD
-:101890005C6001FF00DA20C0B658154060000C0003
-:1018A000C09063FFB50000DA2058153C6551E48D07
-:1018B000138C11DBD08DD0022A020D6D515813AD5F
-:1018C0009A1364A1CEC75F8FA195A9C0510F0F478E
-:1018D0009F1163FEFD00C091C0F12820062C2066F8
-:1018E000288CF9A7CC0C0C472C24666FC6098D13E5
-:1018F0008DD170DE02290A00099D02648159C9D385
-:101900008A102B21045813BD8A13C0B02B24662ED5
-:10191000A2092AA0200E28141CEC338D1315EC27E5
-:10192000C1700A773685562DDC28AC2C9C12DED08F
-:10193000A8557CD3022EDDF8D3E0DA40055B02DC4B
-:10194000305BFF8AD4A028200CB455C0D02B0A8865
-:101950002F0A800C8C11A6CC29C285AF3FAB9929E8
-:10196000C6851CEC1CDEF0AC882D84CF2812022921
-:10197000120378F3022EFDF8289020D3E007880C9C
-:10198000C170080847289420087736657FAB891313
-:1019900013EC1A8990C0F47797491BEC18C1CA2838
-:1019A00021048513099E4006EE1187530488118592
-:1019B000520E88020C88029BA09FA18F2B9DA59898
-:1019C000A497A795A603FF029FA22C200C1EEC0152
-:1019D000AECE0CCC1106CC082BC2852DE4CF2BBC8F
-:1019E000202BC6852A2C748B11580D69D2A0D10FDB
-:1019F00028203DC0E07C877F2E24670E0A4765A023
-:101A00007B1AEBFF88201EEBED8F138EE48FF4081A
-:101A100088110A88020F8F14AFEE1FEBFA98910F0E
-:101A2000EE029E901EEBF9C0801AEBEA2CD285AA3A
-:101A3000BAB8CC28A4CF2CD6852C21022F20720E28
-:101A4000CC02B1FF2F24722C2502C020D10F8713A6
-:101A5000877007074763FD6E282138C099798B028C
-:101A600063FE9ADDF063FE9500DA20DB308C11DD39
-:101A70005058155CD2A0D10FC0E163FF7A8B138C54
-:101A800011DD50C0AA2E0A802A2468DA205813BC1F
-:101A9000D2A0D10FC020D10F6C1006292102C0D0D6
-:101AA0007597102A32047FA70A8B357FBF052D2535
-:101AB000020DD902090C4C65C18216EBBE1EEBBCAF
-:101AC00028629EC0FA78F30260018829E2266890B5
-:101AD000078A2009AA0C65A17A2A629DDFA064A169
-:101AE000772B200C0CBC11A6CC29C286C08C798324
-:101AF0000260015719EBB109B90A2992A36890074E
-:101B0000882009880C65814327C2851CEBB364716A
-:101B10003A8931098B140CBB016FB11D2C20669FD3
-:101B200010B1CC0C0C472C24666EC6026001400933
-:101B3000FF5065F13A8A102AAC188934C0C47F97E7
-:101B40003C18EBB31BEBB28F359C719B708B209DC7
-:101B50007408BB029B72C08298751BEBAE0F0840E5
-:101B60009B730F881198777FF70B2F2102284A006B
-:101B700008FF022F2502C0B4600004000000C0B0BE
-:101B80007E97048F362F25227D97048837282521BC
-:101B90007C9736C0F1C0900AF9382F3C20090942E1
-:101BA00064908619EB8018EB8128967E00F08800FF
-:101BB000A08C00F08800A08C00F08800A08C2A6225
-:101BC0009D2DE4A22AAC182A669D89307797388F1C
-:101BD000338A3218EB8A07BE0B2C2104B4BB04CC29
-:101BE0001198E0C08498E1882B9DE59AE69FE71A5A
-:101BF000EB82099F4006FF110FCC020A880298E28F
-:101C0000C1FC0FCC022CE604C9B82C200C1EEB71D1
-:101C10000CCA11AECC06AA0829A2852DC4CF09B9D9
-:101C20000B29A685CF5CC020D10FC081C0900F8941
-:101C300038C08779880263FF7263FF6600CC57DA89
-:101C400020DB30DC405813C3C020D10FDA205814F9
-:101C50005363FFE8C0A063FE82DA20C0B658144F79
-:101C600063FFD900DB402A2C74580CC9D2A0D10FD5
-:101C70008A102B21045812E11EEB4EC0D02D246691
-:101C800063FEB1006C1006D62019EB491EEB4B2801
-:101C9000610217EB4808084C65805F8A300A6A5178
-:101CA00069A3572B729E6EB83F2A922668A0048CB7
-:101CB000607AC9342A729D2C4CFECAAB2B600CB6DC
-:101CC0004F0CBD11A7DD28D2860EBE0A78FB269CDC
-:101CD000112EE2A32C160068E0052F62007EF91594
-:101CE00022D285CF2560000D00DA60C0B658142BD3
-:101CF000C85A60010F00DA60581428655106DC40AC
-:101D0000DB308D30DA600D6D5158129AD3A064A08B
-:101D1000F384A1C05104044763FF6D00C0B02C6080
-:101D2000668931B1CC0C0C472C64666FC602709684
-:101D30000A2B61045812B1C0B02B64666550B42AF6
-:101D40003C10C0E7DC20C0D1C0F002DF380F0F42EA
-:101D500064F09019EB1418EB1528967E8D106DDA4F
-:101D60000500A08800C08CC0A089301DEB247797A7
-:101D70005388328C108F3302CE0BC02492E1226143
-:101D8000049DE00422118D6B9BE59FE798E61FEB15
-:101D90001A0998400688110822020FDD02C18D9DA4
-:101DA000E208220292E4B4C22E600C1FEB0A0CE897
-:101DB00011A7882C8285AFEE0C220B2BE4CF228654
-:101DC00085D2A0D10F28600CD2A08C1119EB020C87
-:101DD0008D11A988A7DD2ED2852B84CF0ECC0B2C9C
-:101DE000D685D10FC0F00ADF387FE80263FF6C634D
-:101DF000FF6000002A6C74C0B2DC20DD4058128FF6
-:101E0000C0B063FF63C020D10F0000006C10042C31
-:101E1000221D2A221EC049D320293006243468C03E
-:101E2000407AC105DDA060000200C0D06E9738C0C6
-:101E30008F2E0A802B3014C0962934060EBB022E3A
-:101E400031022B34147E8004243502DE407AC10E28
-:101E5000C8ABDBD0DA302C0A00580AE52E31020E6E
-:101E60000F4CC8FEC020D10F6895F8283102080831
-:101E70004C658FEF1AEAD01CEACE2BA29EC09A7B4B
-:101E80009B462BC22668B0048D307BD93B29A29D8E
-:101E9000C0E3CB9394901BEAE02D31049B9608DDC0
-:101EA000110EDD029D979D9112EADDC0E524C4A2CA
-:101EB0002E34062F310228A29D02FF02288C3028E2
-:101EC000A69D2F3502C020D10FDA30C0B65813B30B
-:101ED000C020D10F6C1006292006689805289CF9AF
-:101EE00065825D29210209094C659210CD51DB30D4
-:101EF000DA20044C02581317C051D3A0C7AF2A36BA
-:101F00000AC0E019EAAD1DEAB31FEAAC8A3A16EA44
-:101F1000A9B1AC64C13528629E6F88026001F129C5
-:101F2000DC332992266890078B2009BB0C65B1E051
-:101F300027629DC08E6471D82B200C0CBC11A6CCDE
-:101F400029C2867983026001D219EA9B09B90A295C
-:101F500092A3971068900828220009880C6581BB1D
-:101F600027C2856471B5292006299CF96491EC2C5F
-:101F700020668931B1CC0C0C472C24666EC60260F9
-:101F800001A109F85065819B883689F4088C14AC4E
-:101F9000991CEA8B0C99022C2104997019EAA1086A
-:101FA00008479971892A09881008990218EA9E0839
-:101FB000990299722830132930120488100699105A
-:101FC00008990228302C9A740C881008C8020988D5
-:101FD00002987389379975883898768A39C0819ABA
-:101FE000771AEA918935987B99780989140A9902B8
-:101FF000997A8A30893277A73618EA808F33987CAD
-:10200000C084987D882B2E76112976122F7613198D
-:10201000EA7A0A9F4006FF1104CA110988020FAA32
-:1020200002987EC1F90FAA022A7610C0AA600001A8
-:10203000C0A6ADBF0CBC11A6CC29C2852EF4CF0919
-:10204000A90B29C685655107C020D10F2B200C0C88
-:10205000BC1106CC0828C28609B90A6F8902600142
-:102060002E2992A36890082A220009AA0C65A11FB4
-:102070002AC28564A11928203D08284064808C84E8
-:102080003504841464408485F574537F8436048455
-:1020900014644077745374293013C08C79886CC0F1
-:1020A000902924670908476580ED882089F48435E4
-:1020B0001FEA55048414A4940F440294A014EA5017
-:1020C00008881104880298A1843698A3048414A473
-:1020D000990F990299A219EA4CADB428C2852E44F1
-:1020E000CF288C1028C6852821022F20720988024B
-:1020F000B2FF2F2472282502C020D10F00CC57DA5E
-:1021000020DB30DC40581293C020D10FC09163FF18
-:102110008FDA20C0B658132163FFE100DA2058138C
-:102120001F63FFD88A102B21045811B41DEA2A1FFF
-:10213000EA232B200CC0E02E24668A3A63FE480076
-:1021400000DA20DB30DC40DD505813A6D2A0D10FDE
-:102150002A2C74DB40580B8ED2A0D10F292138C015
-:102160008879830263FE202A12002C20662B21042A
-:102170002CCC010C0C472C24665811A01DEA161F0C
-:10218000EA0F2B200CC0E02E24668A3A63FDF8008B
-:10219000DA2058130263FF64DA205BFF1CD2A0D15F
-:1021A0000F0000006C10089515C061C1B0D9402A1D
-:1021B000203DC0400BAA010A64382A2006291606D1
-:1021C00068A8052CACF965C33B1DE9FC6440052FEC
-:1021D000120564F29C2621021EE9F806064C65628F
-:1021E000E315E9F46440D98A352930039A140A9931
-:1021F0000C6490CC2C200C8B149C110CCC11A5CC15
-:102200009C122CC286B4BB7CB3026002D38F110E29
-:10221000FE0A2EE2A368E0098620D30F0E660C6545
-:1022200062BE88122882856482B6891464905EDA60
-:1022300080D9308C201EE9F21FE9F31DE9E08B14F0
-:102240008DD4D4B07FB718B88A293C10853608C61B
-:10225000110E66029681058514A5D50F550295804D
-:102260000418146D8927889608CB110888140EBBB2
-:1022700002A8D8299C200F88029BA198A088929B35
-:10228000A3088814A8D80F880298A22AAC1019E9CC
-:10229000DEC0C08F141EE9CF86128D11286285AE74
-:1022A000DD08FF0B2CD4CF2821022F66858B352A21
-:1022B0002072098802ABAA2825022A2472C020D1E4
-:1022C0000F29529E18E9BB6F9802600208288226E7
-:1022D00068800829220008990C6591F92A529DC14D
-:1022E000CA9A1364A1EF2B200C2620060CB811A566
-:1022F000882D82860EBE0A7DC3026002022EE2A3F2
-:1023000068E0082F22000EFF0C65F1F3288285DEBD
-:10231000806481FF9810266CF96461FF2C20668828
-:1023200031B1CC0C0C472C24666EC6026001BC088F
-:10233000FD5065D1B617E9BD19E9A21AE9A92C210A
-:10234000048B2D2830102F211D0C88100BFB090C3D
-:1023500088020A880209BB026441528910C04D9B61
-:1023600090979198928D35D9E064D06CD730DBD0BE
-:10237000D8307FD713273C10BCE92632168C39960B
-:10238000E69CE78A37B4389AE80B13146430492A7C
-:10239000821686799A9696978C778A7D9C982B825E
-:1023A000172C7C209A9A2A9C189B99867BB03BB864
-:1023B000896DB9218BC996A52692162AAC18B899B1
-:1023C0009BA196A08BC786CD9BA22B921596A49B12
-:1023D000A386CB2CCC2026A605C0346BD4200D3B85
-:1023E0000C0DD8090E880A7FB705C0909988BC8863
-:1023F000C0900B1A126DAA069988998B288C18C068
-:10240000D01BE98C1CE98B16E981B1FF2A211C2322
-:10241000E6130F0F4F26E6122F251D7FA906C0F0E9
-:10242000C08028251D05F6111AE97A8F202BE615A4
-:102430002CE6162DE61726E6180AFA022AE61429D3
-:102440002006299CF96490FF29200C8D15C0801A64
-:10245000E9610C9C11AA99A5CCDA202BC28528949D
-:10246000CF0B4B0B2BC685C0B08C1658118AD2A04F
-:10247000D10F8A356FA548D8308BD56DA90C8A86C7
-:102480000A8A14CBA97AB337288C10C08028246715
-:10249000080B4765B112DA20DB302C12065811AD5B
-:1024A000D3A0C0C1C0D02DA4039C1563FD268636E1
-:1024B00064610C8910C04D9B909791989263FEA423
-:1024C000C08163FFC78A15CCA7DA20DB308C165891
-:1024D00011A1C020D10FDA20C0B658123063FFE43A
-:1024E00000DA208B1158122D63FFD9009E178A1332
-:1024F0002B21045810C28E17C0B02B246663FE3403
-:10250000C08063FE09DA20DB308C16DD505812B52E
-:10251000D2A0D10FDA2058122163FFA82D2138C094
-:10252000C87DC30263FE0D8A132B21042C206698FC
-:1025300017B1CC0C0C472C24665810B08E17C0D0A5
-:102540002D246663FDEE0000262138B06606064F96
-:10255000262538656EF128206A7F870508294164A1
-:1025600090A5C0D01BE92619E93426200723E61BD5
-:10257000B16609FA022BE61A28200A2DE61D2AE682
-:102580001E09880228E61C882606064728E6202B16
-:10259000220826E53E2BE6212D24072C20062A20A2
-:1025A0006468C347B44463FE9EDB30DA208D15C0F7
-:1025B000CE2E0A802C24688C165810F1D2A0D10F90
-:1025C0008E102A321616E8FD0A2A1486662BE612A9
-:1025D00097E127E61328E614AA6609660296E02E1C
-:1025E000EC4869ED50C14663FD7A000064AFB41950
-:1025F000E8F328201689920A880C00910400881AB2
-:10260000A8B8982963FF9C002B21046EB81E2C20CB
-:1026100066B8CC0C0C472C2466C9C09E178A135888
-:1026200010778E17C0348F20C0D02D2466C0682646
-:10263000240663FF2C008D35C08064D04AD9E0DCCD
-:1026400030DBE0DF301AE8FDB188B4FF17E8FD8623
-:10265000C9249DFF8DC82CCC102D46300767012D55
-:1026600046320A66011DE8F7264631AD6D2D463328
-:1026700026F21597B796B684C3BCBB94B58D3529A1
-:102680009C107D83C22F211DC14663FD4B000000BD
-:102690006C1006292006289CF86582BF2921022B90
-:1026A000200C09094C6590E116E8C30CBA11A6AAE2
-:1026B0002DA2862C0A127DC30260028C19E8BF0984
-:1026C000B90A2992A36890078C2009CC0C65C278BE
-:1026D00029A2856492722D629E1AE8B56FD80260B5
-:1026E000026E2AA22629160168A0082B22000ABB26
-:1026F0000C65B25C29629DC18C6492542A21200A27
-:10270000806099102C203CC7EF000F3E010B3EB1BA
-:10271000BD0FDB390BBB098F260DBD112DDC1C0D48
-:102720000D410EDD038E27B1DD0D0D410FEE0C0DB9
-:10273000BB0B2BBC1C0BB7027EC71C2C21257BCBF3
-:10274000162D1AFC0CBA0C0DA16000093E01073EC3
-:10275000B1780987390B770A77EB0260020A2C21DE
-:1027600023282121B1CC0C0C4F2C25237C8B29B0A4
-:10277000CD2D2523C855DA20DB3058106F292102D2
-:10278000CC96C0E80E9E022E2502CC57DA20DB3014
-:10279000DC405810F0C020D10F2C20668931B1CC1C
-:1027A0000C0C472C24666EC6026001D309FD5065EF
-:1027B000D1CD2F0A012E301129221464E0112822D4
-:1027C0001B090C4400C10400FA1A0A880228261BBF
-:1027D0002E3010C0A0C0B0941295131CE878883039
-:1027E0002CC022088D14778704C0F10CFA38C04140
-:1027F000C0F225203CC0840858010F5F010F4B3800
-:1028000005354007BB10C0F0084F3808FF100FBB5C
-:102810000228ECFEC0F0084F38842B0BA8100AFFEA
-:10282000102A21200F88020B880208440218E8862B
-:102830008F110844022821250A2A14082814048824
-:10284000110A88022A210494F08B2004E41008BBAA
-:102850001104BB02C04A04BB029BF1842A08AB11DD
-:102860000BEB0294F40A54110B44020555100D1B96
-:102870004094F707BB100B5502085502C08195F62E
-:102880008433C05094F3B1948B3295F898F99BF24D
-:10289000C080C1BC24261499FA9BF598FB85389515
-:1028A000FC843A94FD8B3B9BFE883998FF85352547
-:1028B000F6108436851324F6118B3784122BF6120A
-:1028C000C0B064C07E89307797438D3288332E3014
-:1028D000108F111CE84A0999400699112CF614C072
-:1028E000C42CF6158C2B2DF61A28F61B2BF6190482
-:1028F000A81109880208EE0219E840C18008EE021A
-:1029000009C90229F6162EF618C09E600001C09A69
-:102910002F200C18E8300CFE11A8FFA6EE2DE28542
-:102920002BF4CF0D9D0B2DE685C87F8A268929A71C
-:10293000AA9A260A990C090948292525655050C0EC
-:1029400020D10F00C09A63FFC6DA2058111463FE2D
-:1029500038DA20C0B658111163FE2E0068973C2B60
-:102960009CFD64BE24C020D10FDA20DB705810CD4E
-:10297000C0C0C0D10ADA390ADC3865CDE063FE098F
-:102980008A102B2104580F9DC0B02B246663FE21B2
-:10299000DB402A2C7458097ED2A0D10FDA20580FC0
-:1029A000A263FCF76C1004C020D10F006C10042946
-:1029B0000A801EE8261FE8261CE7FF0C2B11ACBB83
-:1029C0002C2CFC2DB2850FCC029ED19CD0C051C0C6
-:1029D0007013E82214E82118E81F2AB285A82804F9
-:1029E000240A234691A986B8AA2AB685A9882784ED
-:1029F0009F25649FD10F00006C100AD6302830103C
-:102A0000292006288CF964829B68980B2A9CF9651A
-:102A1000A1B2022A02580F8489371BE7E8C89164E3
-:102A2000520E2A21020A0C4C65C2588D3019E7E17A
-:102A300074D7052E212365E29E2F929E1AE7DD6F43
-:102A4000F8026002532AA22668A0082C22000ACCB1
-:102A50000C65C2442A929D64A23E9A151FE7D78D49
-:102A600067C1E6C8DD2B620618E7D564B00528808B
-:102A7000217B8B432B200C18E7CF0CBC11A8CC2951
-:102A8000C28679EB460FBE0A2EE2A368E0052F222C
-:102A9000007EF9372CC2859C1864C2332B212F8706
-:102AA000660B7B360B790C6F9D266ED2462C203D33
-:102AB0007BC740CE5560001E2A200CC1B28C205826
-:102AC00010F79A1864A2458D6763FFCFC0C063FFFB
-:102AD000C5D7B063FFD300C0E06000022E60030ED4
-:102AE000DB0C6EB20EDC700CEA11AA6A2AAC20581C
-:102AF0000199D7A0DA20DB70C1C82D212058109190
-:102B00008C268B279A160CBB0C7AB3348F188963EA
-:102B100099F3886298F28E659EF82D60108A189D50
-:102B20001768D729C0D09DA92C22182B22139CAB43
-:102B30009BAA97A58E667E7302600097CF586000AF
-:102B40001FDA208B1658105765A13863FFBDC0816E
-:102B5000C0908F18C0A29AF999FB98FA97F563FF75
-:102B6000D2DB30DA20DC40580FFBC051D6A0C0C009
-:102B70002BA0102CA4039B172C1208022A02066B10
-:102B800002DF702D60038E179D149E100CDD11C0A6
-:102B9000E0AD6D2DDC205801188C148B16ACAC2CDC
-:102BA00064038A268929ABAA0A990C9A26886609A1
-:102BB000094829252507880C98662F2218A7FF2F7A
-:102BC000261863FE96DA20DB30DC40DD5058110514
-:102BD000D2A0D10FC0302C20668961B1CC0C0C473B
-:102BE0002C24666EC6026000D2C03009FD5065D04C
-:102BF000CA8E6764E069647066DB608C18DF70DA27
-:102C0000202D60038E170CDD119E10AD6D2DDC2084
-:102C10001EE78D5800F9232618DA208B16DC402F8A
-:102C20002213DD50B1FF2F2613580F9AD2A0D10FD7
-:102C30000028203D084840658DE76F953EDA308DCD
-:102C4000B56D990C8CA80C8C14CACF7CD32D2AACF2
-:102C500010C090292467090D4764DDC5600092000B
-:102C60002C1208066B022D6C20077F028E17DA20CB
-:102C70009E101EE77458007D63FF9A00C09163FFA9
-:102C8000D1000000655081DA20DB60DC40580FB1D4
-:102C9000C020C0F02FA403D10FDA20C0B658103FD7
-:102CA00063FFE000006F950263FD6CDA20DB30DC2F
-:102CB00040DD50C4E0580F32D2A0D10F8A152B212D
-:102CC00004580ECE232466286010981763FF210055
-:102CD000DA2058103263FFABC858DB30DA20580FC7
-:102CE000162A210265AF9CC09409A9022925026316
-:102CF000FF91DB30DC40DD50C0A32E0A802A24681F
-:102D0000DA20580F1FD2A0D10FC020D10FDA202B0C
-:102D1000200C58104763FF6B6C1004282006C0621B
-:102D2000288CF8658125C050C7DF2B221BC0E12A03
-:102D3000206B29212300A104B099292523B1AA00E1
-:102D4000EC1A0BC4010A0A442A246B04E4390DCCA2
-:102D5000030CBB012B261B64406929200C1BE715C3
-:102D60000C9A110BAA082FA2861BE7136FF90260B9
-:102D700000B60B9B0A2BB2A368B0082C22000BCC28
-:102D80000C65C0A42BA2851DE73664B09B8C2B2458
-:102D900021040DCC029CB08820C0C50888110C8885
-:102DA0000298B1882A08441198B48F3494B79FB51B
-:102DB000C0401EE7082DA2850E9E0825E4CF2DDC1D
-:102DC000282DA68529210209094C68941A689820A3
-:102DD000C9402A210265A00B2A221E2B221D7AB18E
-:102DE0000265A079C020D10F2C212365CFDE6000C1
-:102DF000082E21212D21237EDBD52B221E2F221DE3
-:102E00002525027BF901C0B064BFC413E6E92CB0EC
-:102E10000728B000DA2003880A28824CC0D10B8032
-:102E200000DBA065AFE763FFA62A2C74C0B02C0AB4
-:102E300002580E081CE70C9CA08B2008BB1106BB97
-:102E4000029BA1893499A263FF790000262468DAE5
-:102E500020DB30DC40DD50581063D2A0D10FDA20E7
-:102E60002B200C580FCEC020D10F00006C1006078D
-:102E70003D14C080DC30DB40DA20C047C02123BCD9
-:102E800030032838080842774001B1DD64815A1EBA
-:102E9000E6C519E6C629E67ED30F6DDA050050882F
-:102EA00000308CC0E0C02025A03C14E6C4B6D38F0F
-:102EB000C0C0D00F87142440220F8940941077F7A8
-:102EC00004C081048238C0F10B2810C044C0220421
-:102ED000540104FD3802520102FE3808DD10821C44
-:102EE00007EE100E6E020EDD02242CFEC0E004FE82
-:102EF000380AEE100E88020D88028DAB1EE6B4086B
-:102F0000D8020E880298B0C0E80428100E5E018432
-:102F1000A025A125084411084402052514045511D3
-:102F2000043402C0810E8E3994B18FAA84109FB4EC
-:102F300075660C26A11FC0F2062614600009000069
-:102F400026A120C0F20626140565020F7701078727
-:102F50003905E61007781008660206550295B62571
-:102F6000A1040AE61108581108280208660296B75B
-:102F7000C060644056649053067E11C0F489C288D4
-:102F8000C30B340B96459847994618E69B9F41041E
-:102F900059110E99021FE699020E4708D80298426D
-:102FA0000E99029F40C1E00E990299442FA00CB4E3
-:102FB000380CF91114E6881EE67FA4FFAE992E9214
-:102FC0008526F4CF0E880B289685D10F2BA00C1FD9
-:102FD000E6791CE6800CBE11ACBBAFEE2DE2852677
-:102FE000B4CF0D3D0B2DE685D10FC0800528387874
-:102FF000480263FEA263FE966C1006C0C06570F1C5
-:103000008830C030088714778712C0B0C0A619E690
-:103010006B299022C030CC97C031600003C0B0C093
-:10302000A6C0E0C091C0D4C08225203C0B3F1097C1
-:1030300012831CC0700858010D5D01089738C080CC
-:103040000B9838077710048810086802087702C0C8
-:10305000800D98382D3CFE0888100D9E388D2B0A67
-:10306000EE1008EE0207EE020CB8100FDD02053B71
-:10307000400EDD029D408920043D100899110D99F4
-:10308000022D210409A90208DD119941872A05B9F9
-:10309000100D3D020ABB110DBB02087702974428B0
-:1030A00021258712082814048811071E4007EE10F6
-:1030B0000E990275660926211F0626146000060077
-:1030C0002621200626140868029B47098802984694
-:1030D00029200CD2C0C0800C9E111BE63E1FE63595
-:1030E000AB99AFEE2DE2852894CF0DAD0B2DE68583
-:1030F000D10FDD40C0A6C0B08E51CAE0B2AAB1BBAC
-:103100002DDC108F500E7836981008770C9FD898C9
-:10311000D989538F52991199DB9FDA7E8309B1CCFB
-:10312000255C10C97763FFCF88108D1108E70C97D5
-:1031300051AD8DD7F078DB01B1F79D5397528830B0
-:10314000C030088714088840648ED565BEC963FE08
-:10315000BC0000006C1004D720B03A8820C0308238
-:1031600021CAA0742B1E2972046D080FC980C99151
-:103170008575B133A2527A3B0B742B0863FFE900CB
-:10318000649FECD10FD240D10F0000006C100AD622
-:10319000302E3027D950DA4015E6092430269A150A
-:1031A00029160464E0026493732920062A9CF865BA
-:1031B000A3CE2A2102270A040A0B4C65B3978C3050
-:1031C00074C7052D212365D4A0C0A62B0A032C2289
-:1031D00000580F0B64A3B917E5F78E389A1664E30D
-:1031E000BA2F6027285021C9F37E8311C2B08C20EA
-:1031F0002A200C580F2AD7A0CDA16004A200C2B08B
-:103200008C202A200C580EFED7A064A4862F212ED5
-:103210008B680FBF360FB90C6F9D54296027D5B04E
-:103220006E920528203D7B8F4CDA20DB50C1C42DE7
-:10323000211F580EC48B269A189A1989272AAC3850
-:103240000B990C7A93538963C08099738F62987835
-:103250009F728E659E798D679D7B8C6695759C7A35
-:103260008E687E53026000B18B1465B050600038E8
-:10327000DBF063FFA5008A14C9A92E60030E9B0C26
-:103280006EB2A5DC500CEA11AA6A2AAC285BFFB129
-:10329000D5A063FF93C0E063FFE2DA208B18580EDD
-:1032A0008165A2B163FF9E0000DA20DB308C1558E7
-:1032B0000E29D6A0C0C0C0D12D16042CA403DC70EA
-:1032C000DA20DB60DF502D6003C0E09E109D171EEA
-:1032D000E5D20CDD110D6D082DDC285BFF478E66F5
-:1032E0008F678817AF5FA8A828640375FB01B1EE4C
-:1032F0008A189E669F6789268829AA9909880C9949
-:10330000268E6808084805EE0C28252515E5AC9E94
-:103310006865EECC63FEE6000000C9432F21232B35
-:1033200021212FFC010F0F4F2F25237FBB026003AC
-:10333000142C20668961B1CC0C0C472C24666EC617
-:103340000260022809FD5065D22264E1B62E602792
-:1033500064E1B0DC70DF50DA20DB601EE5C32D6075
-:1033600003C08098100CDD11AD6D2DDC285BFF22B1
-:10337000644181C0442B0A008C202A200C580EA0E6
-:103380000AA70265A00FC0B02C22002A200C580EFC
-:103390009CD7A064AFEFDA20C1BCC1C82D21208F1B
-:1033A000188E268929AFEE9E260E990C0909482908
-:1033B0002525580E64C090C050C0C288609A191E5E
-:1033C000E57FC0A12EE022088F14778704C0810E0C
-:1033D0008938C0800B93102D203C2921200CDC0162
-:1033E00004DB010929140BA8380CA5380D3D401C3D
-:1033F000E5968B2B088810075510085502053302F7
-:103400002821250F154003BB020CBB0207551005F0
-:10341000D3100828140ADD11048811098802053325
-:10342000022921040833029B70C0808A201BE58F8B
-:1034300008AA110BAA029A71C0A1852A93769574E5
-:1034400008931103DD020ADD029D778C63C1DC9CC9
-:10345000738B6298789A799B72232214C0C0B1351D
-:103460002526149C7B9D75937A2B621A9B7C2A627D
-:103470001C9A7D28621D987E25621B957F2362170A
-:103480002376102D62182D76112C62192C76126479
-:10349000E0B98E6077E73DC0FE13E5571DE558C1E2
-:1034A000818A628B630495110E9C4006CC110C55E9
-:1034B00002247615085502C0802D76148D2B2B76AC
-:1034C0001B2A761A28761925761803DD022D761622
-:1034D0006000030000C0FA2E200C19E53E18E53507
-:1034E000A9E90CEE11A8EEC0802DE2852894CF0D3D
-:1034F000FD0B2DE685DA208B198C158D14580D6582
-:10350000D2A0D10FDC70DF50DB602D6C28C0A01E74
-:10351000E5569A10DA205BFE5563FE53002B203DE2
-:103520000B4B4065BC826FE527DA308F556DE90C97
-:103530008EAA0E8E14C9E87EF3162AAC10C090290C
-:103540002467090F4764FC6060015F00C0FA63FFF5
-:1035500085C09163FFE88814658168DA20DB608CA0
-:1035600015580D7CC020C09029A403D10F8A162BBA
-:103570002104580CA2C0A02A24668E6863FDCA00EC
-:10358000002B9CF965B0FDDA20580CA763FC2200E3
-:1035900000DA20C0B6580E0163FFBA002B200C0CD5
-:1035A000BE11A7EE2DE286C1C27DC30260011819CB
-:1035B000E50209B90A2992A36890082A220009AAFB
-:1035C0000C65A10326E2856460FD2C20668931B17B
-:1035D000CC0C0C472C24666FC60270960C8A162BF6
-:1035E0002104580C86C0D02D24668E3077E74D1C00
-:1035F000E5021BE5028F328833C0A42D21040E9909
-:103600004006991104DD1109DD029A61C19009DDBE
-:10361000029B60C0908B2B9D649F66986799650C98
-:10362000BB029B6228200C1AE4EBAA8A0C8811A723
-:10363000882F828529A4CF2FFC202F86858A1465A8
-:10364000A0A6C020D10FB0FC8B142C2523C8B70234
-:103650002A02066B02580CB82A210265AEF7C0D8C0
-:103660000DAD022D250263FEEC008E14C8E8DA20B1
-:10367000DB30580CB12A210265AEDA07AF022F25E4
-:103680000263FED100DA20DB308C158D14580E5504
-:10369000D2A0D10FDA202B200C580DC063FEB6004B
-:1036A000DA202B200C580DE263FEAADA20DB308CE6
-:1036B000152D12042E0A80280A00282468580CB000
-:1036C00063FAE500C020D10FDA20580DB48914CD7B
-:1036D00092DA20DB308C15580D1FDBA0C020C0A073
-:1036E0002AB403D10FC020D10F2A2C748B15580691
-:1036F00028D2A0D10F0000006C100C2821029410D9
-:1037000008084C6583621FE4AB29F29E6F98026043
-:1037100003661DE4A729D2266890082A220009AA78
-:103720000C65A3542CF29D64C34E2B200C0CB611D7
-:10373000AF66286286C1EC78E30260034619E49E16
-:1037400009B90A2992A36890078A2009AA0C65A3DF
-:103750003224628564432CC0E12A3109C0702724D9
-:103760006689359A11992A88369912982B89379843
-:1037700013992C883899140858149815982D89395C
-:103780002A25042E251D29251C283028C0922824EE
-:103790003C2A302908084798160989012A243D2A1D
-:1037A000311599170A094109A90C299CEC29251FF3
-:1037B0007E87192D2A000DA06000083E010A3EB147
-:1037C000AD08DA390EAA110A990C29251F2A211FE2
-:1037D00018E4A80A8160C1D0941A951B01083E0024
-:1037E000053EB184054839843C259CFC0D8836296A
-:1037F000201408AA1C8D3D2726182E26132E2614C9
-:103800002E261527261B2E246B27246727246808BD
-:10381000581C0909432924142932112A252E282548
-:103820002F27252427252527252C27252325252037
-:103830002425212D2522841A2D211C851B6FD202BF
-:10384000600209C0A099186D080AB1AA00A104007D
-:10385000E91A7D9B0263FFEE8918C080C0E1C07049
-:10386000C0D29B1D951B961C9C1E16E4722C203DFD
-:1038700015E4820C0B400DCC010BE7381DE4640A03
-:1038800077100CE8380B8810C0C49C410877029D63
-:1038900040B0A80988118B209C499D48954B9643C0
-:1038A000087702861418E47315E45A08770205BBFA
-:1038B000029B4A9B4297468812871108DA149A4E57
-:1038C0000D88100D77110877021AE44E06D8140DF2
-:1038D0006610087702974FC78F984D984C98458788
-:1038E0001598440715140D55110A5502954715E40E
-:1038F000638A262D46102D46182D46202C46112C65
-:1039000046192C46212B46122B461A2846142846C7
-:10391000152B462288162546242546268B170A0C89
-:1039200048090D4885130EDD1105CC110839400BEF
-:10393000EB390299101EE4520DCC020D5511082DE1
-:10394000400655022E461316E41D0FDD11254616BE
-:10395000080840851B0188100DBB0286671DE449DD
-:103960000988020CBB0219E4191CE4472B46172DE9
-:10397000461BA7661BE446C0702C461C0988028CB7
-:103980001E28461E2B4623C0908B1D29461D294606
-:103990001F18E43F2946272846252931162E2006E0
-:1039A00029246A243117962D242538861CCCE1273A
-:1039B0002407C0D7090E4064E0829A29092841648F
-:1039C000809164409B2D2406C098094936280AA09E
-:1039D00024628501C404A84428210424668508883B
-:1039E000118E3F8A3E2D32100EA41800C4040EAE74
-:1039F0001800EE110ACA530EDD02C0E30E880298C9
-:103A0000C11EE42409084E9EC08E2094C398C59D13
-:103A1000C418E3F01DE42105EE110EAA020DAA025E
-:103A2000A8B82784CF9AC21EE3E224F29D27E4A21D
-:103A3000244C1824F69D655052C020D10F2D240629
-:103A4000C0A0C09809493604A93863FF7FC0A063AD
-:103A5000FE070000654F6DC098C0A82A240663FFCA
-:103A60006B2D2406C09063FF63CC57DA20DB308CCB
-:103A700010580C38C020D10F00DA20C0B6580CC73F
-:103A800063FFE500DA20580CC563FFDC2A2C748B39
-:103A90001058053FD2A0D10F6C10062820068A339B
-:103AA0006F8202600161C05013E3C229210216E354
-:103AB000C1699204252502D9502C20159A2814E3B7
-:103AC000BF8F2627200B0AFE0C0477092B711C647C
-:103AD000E1398E428D436FBC0260016F00E104B09A
-:103AE000C800881A08A80808D80298272B2006685A
-:103AF000B32ECE972B221E2C221D0111027BC90151
-:103B0000C0B064B0172CB00728B000DA2003880AD0
-:103B100028824CC0D10B8000DBA065AFE7C020D16C
-:103B20000F2D206464DFCA8B29C0F10BAB0C66BF7C
-:103B3000C02B200C0CBC11A6CC28C2862E0A0878FB
-:103B4000EB611EE39D0EBE0A2EE2A368E00528226B
-:103B5000007E894F29C2851EE3A96490461FE3B603
-:103B60009E90C084989128200A95930F880298927D
-:103B70008E200FEE029E942F200788262F950A98FC
-:103B8000969A972E200625240768E3432921022AC6
-:103B9000C2851DE3902AAC20ADBD25D4CF2AC685B1
-:103BA00063FF4E002E2065CBEDC082282465C9F648
-:103BB00005E4310002002A62821BE3982941020BCE
-:103BC000AA022A668209E43129210263FF23000048
-:103BD00064DFB88F422E201600F1040DEE0C00EECB
-:103BE0001AAEAE9E2963FFA38A202B3221B1AA9A76
-:103BF000B0293221283223B4992936217989A92B79
-:103C000032222B362163FFA0C020D10F9F2725240D
-:103C100015ACB828751C2B2006C0C12EBCFE64E074
-:103C2000AB68B7772DBCFD65DEC72D2064C0F0649E
-:103C3000D0868E290EAE0C66E089C0F128205A2865
-:103C40008CFE08CF3865FEE863FF580000E004935F
-:103C500010C0810AF30C038339C78F08D80308A862
-:103C60000108F80C080819A83303C80CA8B828756F
-:103C70001C030B472B24158310CBB700E104B0BC09
-:103C800000CC1AACAC0CDC029C27659E5EC0B20B6B
-:103C9000990209094F29250263FE50002D206A0D63
-:103CA0002D4165DF7EDA20C0B0580C8F64AF18C09C
-:103CB000F163FEEF9F2763FFD02E221F65EE326374
-:103CC000FF79000028221F658E2763FF6E252406DA
-:103CD00029210263FE1B00006C10066571332B4C1A
-:103CE00018C0C7293C18C0A1C08009A838080842DC
-:103CF0006481101CE32C1AE32D2AC67E2A5CFDD3B6
-:103D00000F6DAA0500B08800908C8940C0A009887A
-:103D1000471FE355080B47094C50090D5304DD10AC
-:103D2000B4CC04CC100D5D029D310CBB029B3088DD
-:103D3000438E2098350FEE029E328D26D850A6DD98
-:103D40009D268E40C0900E5E5064E0971CE33B1EA3
-:103D5000E32B038B0BC0F49FB19EB02D200A99B3C7
-:103D60000CDD029DB28F200CFF029FB48E262D2009
-:103D7000079EB68C282DB50A9CB72924072F20064C
-:103D80002B206469F339CBB61DE30D2320168DD2A9
-:103D90000B330C00D10400331AB48DA3C393292232
-:103DA000200C13E30C1FE3030C2E11AFEEA322290A
-:103DB00024CF2FE285D2A00FDD0B2DE685D10F0099
-:103DC0002E200CB48C0CEB111FE3031DE2FAAFEEB6
-:103DD000ADBB22B28529E4CF02C20B22B685D2A0A8
-:103DE000D10F00002E200C1CE2F31FE2FA0CEB11A5
-:103DF000AFEEACBB22B28529E4CF02820B22B6859E
-:103E0000D2A0D10FC0D00BAD387DC80263FEEC63E9
-:103E1000FEE08E40272C747BEE12DA70C0B32C3C8F
-:103E200018DD50580A868940C08063FEE3066E02A2
-:103E3000022A02DB30DC40DD505800049A10DB50CF
-:103E4000DA70580453881063FEF700006C10069275
-:103E5000121EE2E48C40AE2D0C8C472E3C1804CA96
-:103E60000BD9A07DA30229ADF875C302600084C000
-:103E7000B0C023C0A09D106D0844B89F0EB80A8D35
-:103E8000900EB70BB8770D6D36ADAA9D800D660C00
-:103E9000D8F000808800708C879068B124B22277B7
-:103EA000D3278891C0D0CB879890279C100070882A
-:103EB00000F08C9D91CB6FC08108BB0375CB36633E
-:103EC000FFB4B1222EEC1863FFD485920D770C86D7
-:103ED000939790A6D67D6B01B1559693959260000D
-:103EE00016B3CC2D9C188810D9D078D3C729DDF80B
-:103EF00063FFC100C0238A421BE2E900CD322D449A
-:103F0000029B3092318942854379A1051EE2E50E7C
-:103F1000550187121BE2D5897095350B99029932AC
-:103F200088420A880C98428676A6A696768F44AF79
-:103F3000AF9F44D10F0000006C10089311D6308859
-:103F400030C0910863510808470598389812282115
-:103F500002293CFD08084C6581656591628A630A07
-:103F60002B5065B18B0A6F142E0AFF7CA60A2C20F9
-:103F70005ACCC42D0A022D245A7FE0026002158912
-:103F80002888261FE2C809880C65820F2E200B0F97
-:103F9000EE0B2DE0FE2EE0FF08DD110EDD021EE22D
-:103FA000C2AEDD1EE2C21CE2C20EDD010DCC37C185
-:103FB00080084837B88DB488981089601AE2807BF1
-:103FC00096218B622AA0219C147BA3179D132A2083
-:103FD0000C8B108C20580BB18C148D13DBA0CEAC45
-:103FE0006001C4002E200C1BE2730CEA110BAA081E
-:103FF0002BA2861FE2717BDB3B0FEF0A2FF2A36837
-:10400000F0052822007F892C2BA28564B0AA876244
-:104010008826DE700C7936097A0C6FAD1C8F279BD1
-:104020001508FF0C77F3197E7B729D139C149B156A
-:10403000CF56600025C0B063FFD0D79063FFDD008E
-:10404000009D139C14DA20DB70580B168B158C1412
-:104050008D1365A06A8E6263FFCC00DA208B11DCC1
-:1040600040580ABCD6A08B15C051DE70DA20DC6047
-:10407000DD405BFF768D138C14D9A02E200C1BE243
-:104080004D1FE2540CEA11AFEFC0E0ABAA2BA285A2
-:104090002EF4CF0B990B29A68563FF1D00DA20DCD7
-:1040A00060DD40DE708912282007DF50A9882824AF
-:1040B000075BFF09D2A0D10F00DBE0DA20580B37F5
-:1040C0006550EF2A20140A3A4065A0EBDB60DC4023
-:1040D000DD30022A025809A7D6A064A0D584A183A6
-:1040E000A00404470305479512036351C05163FEC2
-:1040F0005C2C2006D30F28CCFD6480A568C704C0C3
-:10410000932924062C2006C0B18D641FE22C9D2724
-:104110009D289D298FF29D2600F10400BB1A00F016
-:1041200004B0BE0EDD01C0F0ADBB8D652F24070DC0
-:104130000E5E01EE11AEBB2E0AFEB0BB0B0B190ECC
-:10414000BB36C0E20B0B470EBB372B241618E224FC
-:104150000A09450D0B422B240B29240AB4BE2E2438
-:104160000C7D88572920162FCCFDB09D0A5C520D7E
-:10417000CC362C246465FDEC0C0C4764CDE618E2CB
-:104180000F8E2888820C9F0C00810400FF1AAFEE6E
-:104190009E2963FDCF1CE23E63FE13001CE23563E3
-:1041A000FE0C8D6563FFA500DA202B200C580B2038
-:1041B000645F0FC020D10F00C020D10FC09329240D
-:1041C00016C09363FFA000006C1004C06017E1F8F4
-:1041D0001DE1FBC3812931012A300829240A78A175
-:1041E00008C3B27BA172D260D10FC0C16550512605
-:1041F00025022AD0202F200B290AFB2B20142E2049
-:104200001526241509BB010DFF0928F11C2B2414C8
-:10421000A8EE2EF51C64A0A92B221E28221D011138
-:10422000027B8901DB6064B0172CB00728B000DA8C
-:104230002007880A28824CC0D10B8000DBA065AF24
-:10424000E7DB30DC40DD50DA205800DE29210209AE
-:104250000B4CCAB2D2A0D10F00CC5A2C30087BC173
-:10426000372ED02064E02D022A02033B02DC40DD21
-:10427000505800D4D2A0D10F2B2014B0BB2B241443
-:104280000B0F4164F0797CB7CAC0C10C9C022C258D
-:1042900002D2A0D10FC020D10F2E200669E2C12684
-:1042A00024062B221E2F221D29200B2820150D99B4
-:1042B000092A911C262415AA8828951C7BF149609F
-:1042C0000048B0BB2B24140B0A4164A0627CB702E7
-:1042D0002C25022B221E2C221DD30F7BC901C0B01E
-:1042E000C9B62CB00728B000DA2007880A28824C0B
-:1042F000C0D10B8000DBA065AFE7C020D10F00006C
-:10430000262406D2A0D10F0000DB601DE1AC64BF03
-:104310004F2CB00728B000DA2007880A28824CC04A
-:10432000D10B8000DBA065AFE71DE1A463FF310086
-:1043300026240663FF9C00006C1004282006260A31
-:10434000046F856364502A2920147D9724022A0271
-:10435000DB30DC40DD50580019292102090A4CC825
-:10436000A2C020D10FC0B10B9B022B2502C020D1CF
-:104370000F00022A02033B022C0A015800D1C9AAED
-:10438000DA20DB30DC405809F329A011D3A07E9756
-:10439000082C0AFD0C9C012CA411C0512D201406E0
-:1043A000DD022D241463FFA4DA20DB30DC40DD5075
-:1043B000C0E0580973D2A0D10F0000006C1006169F
-:1043C000E17D1CE17D655157C0E117E179282102AB
-:1043D0002D220008084C6580932B32000B6951296F
-:1043E0009CFD6590872A629E6EA84C2A722668A062
-:1043F000027AD9432A629DCBAD7CBE502B200C0C97
-:10440000BD11A6DD28D2862F4C0478FB160CBF0AFE
-:104410002FF2A368F0052822007F89072DD285D3CB
-:104420000F65D0742A210419E1A3D30F7A9B2EDAE9
-:104430002058086E600035002D21041BE19E7DBBD5
-:1044400024DA20C0B6580869CA546001030B2B5007
-:104450002B240BB4BB0B0B472B240C63FFA0DA20DF
-:10446000580A4E600006DA20C0B6580A4C6550E083
-:10447000DC40DB302D3200022A020D6D515808BDA0
-:104480001CE14ED3A064A0C8C05184A18EA0040436
-:10449000470E0E4763FF3500002B2104C08C893185
-:1044A000C070DF7009F950098F386EB8172C20667C
-:1044B000AECC0C0C472C24667CFB099D105808CF11
-:1044C0008D1027246694D11EE151B8DC9ED0655032
-:1044D00056C0D7B83AC0B1C0F00CBF380F0F42CBAE
-:1044E000F119E13018E13228967EB04BD30F6DBA46
-:1044F0000500A08800C08C2C200CC0201DE1360CCB
-:10450000CF11A6FF2EF285ADCC27C4CF0E4E0B2EB9
-:10451000F685D10FC0800AB83878D0CD63FFC100CE
-:104520008E300E0E4763FEA12A2C742B0A01044D17
-:10453000025808C22F200C12E1270CF911A699A2EB
-:10454000FF27F4CF289285D2A008480B289685D162
-:104550000FC020D10F0000006C1004C060CB55DBF1
-:1045600030DC40055D02022A025BFF94292102092A
-:10457000084CC882D2A0D10F2B2014B0BB2B24141E
-:104580000B0C41CBC57DB7EBC0C10C9C022C2502A6
-:10459000D2A0D10F0000022A02033B02066C02C027
-:1045A000D0C7F72E201428310126250228240A0F0F
-:1045B000EE012E241458010E63FFA300262406D218
-:1045C000A0D10F006C1006282102D62008084C65E7
-:1045D000809D2B200C12E0F70CB811A2882A82864D
-:1045E000B5497A930260009719E0F409B90A299253
-:1045F000A36890082A620009AA0C65A08228828517
-:104600001CE0FF6480799C80B887B14B9B819B1034
-:10461000655074C0A7D970280A01C0D0078D380D25
-:104620000D42CBDE1FE0E01EE0E12EF67ED830D357
-:104630000F6D4A0500808800908C2E3008C0A000C5
-:10464000EE322E740028600C19E0E30C8D11A2DD0F
-:10465000A988C0202CD2852284CFD2A00CBC0B2CE0
-:10466000D685D10FC0F0038F387FA0C063FFB400A0
-:10467000CC582A6C74DB30DC405807F6C020D10FD0
-:10468000DA605809C663FFE7DD402A6C74C0B0DC0D
-:104690007058086A2E30088B1000EE322E740028F5
-:1046A000600C19E0CC0C8D11A2DDA988C0202CD2A1
-:1046B000852284CFD2A00CBC0B2CD685D10F000054
-:1046C0006C1004292014282006B19929241468812B
-:1046D00024C0AF2C0A012B21022C24067BA004C08D
-:1046E000D02D2502022A02033B02044C02C0D058FE
-:1046F00000C0D2A0D10FC020D10F00006C1004293F
-:104700003101C2B429240A2A3011C28378A16C7BFA
-:10471000A1696450472C2006C0686FC562CA572D36
-:1047200020147CD722DA20DB30DC40DD505BFFA593
-:10473000292102090E4CC8E2C020D10FC0F10F9F01
-:10474000022F2502C020D10FDA20DB30C0C05BFF72
-:10475000DC28201406880228241463FFC7292015AA
-:104760001BE0972A200BC0C09C240BAA092BA11C7C
-:104770002C2415AB9929A51C63FF9900C020D10FEB
-:10478000DA20DB30DC40DD50C0E058087DD2A0D11B
-:104790000F0000006C1004CB5513E09225221F0D72
-:1047A000461106550CA32326221E25261F06440B60
-:1047B00024261E734B1DC852D240D10F280A80C038
-:1047C0004024261FA82828261E28261DD240D10FA7
-:1047D000C020D10F244DF824261E63FFD80000000E
-:1047E0006C1004D620282006C0706E85026000D4AC
-:1047F0001DE07919E07112E06F2A8CFC64A1302B66
-:104800006102B44C0B0B4C65B0A22B600C8A600C9F
-:10481000B8110288082E828609B90A7EC302600098
-:104820009A2992A368900509AA0C65A08E28828512
-:10483000648088B8891BE07594819B80655155C060
-:10484000B7B8382A0A01C0C009AC380C0C4264C0A1
-:10485000421FE0541EE0562EF67EB04AD30F6DAADA
-:104860000500808800908CC0A029600C0C9C11A2CF
-:10487000CC2BC285AD990B4B0B2BC6852860062728
-:1048800094CF6881222D6015D2A0C9D2C0E22E64D7
-:1048900006D10F00C0F008AF387FB0BD63FFB10094
-:1048A000276406D2A0D10F00D2A0D10F00CC57DAD6
-:1048B00060DB30DC405808A7C020D10FDA6058090F
-:1048C0003763FFE80028221E29221DD30F789901A3
-:1048D000C080C1D6C1C11BE043C122AB6B64804222
-:1048E00078913F2A80000CAE0C64E0BB02AF0C64F0
-:1048F000F0B52EACEC64E0AF0DAF0C64F0A92EACBB
-:10490000E864E0A32FACE764F09D2EACE664E0978A
-:104910002F800708F80BDA807B83022A8DF8D8A055
-:1049200065AFBC28612308D739D97060007B0000CF
-:104930002B600C0CB811A2882C82862A0A087CAB4A
-:104940007E09BA0A2AA2A368A0052C62007AC96F60
-:104950002A828564A0691FE029276504C0E3C0C4DA
-:104960002E64069CA11CE0549FA02E600A97A30C05
-:10497000EE029EA28F600CFF029FA42E60147AEFBD
-:104980004627A417ADBC2F828527C4CF2FFC202F2C
-:10499000868563FE692A6C74C0B1DC90DD405807DF
-:1049A000A71DE00C63FEC100D9A0DA60DB30C2D0E5
-:1049B000C1E0DC4009DE39DD505807F1D2A0D10F4B
-:1049C000DA605808F663FEE4290A0129A4170DBF2E
-:1049D000082E828527F4CF2EEC202E868564500B7E
-:1049E0002A6C74DB4058016AD2A0D10FC020D10FCD
-:1049F0006C10062B221E28221D93107B8901C0B04B
-:104A0000C0C9C03BC1F20406401DDFF6C0E2C0745D
-:104A10000747010E4E01AD2D9E11C0402E0A1464B1
-:104A2000B06E6D084428221D7B81652AB0007EA1EE
-:104A30003B7FA1477B51207CA14968A91768AA1434
-:104A400073A111C09F79A10CC18B78A107C1AE29B8
-:104A50000A1E29B4007CA12B2AB0070BAB0BDAB0DD
-:104A60007DB3022ABDF8DBA0CAA563FFB428B0104D
-:104A700089116987BB649FB863FFDC00647FB463FE
-:104A8000FFD50000646FD0C041C1AE2AB40063FFFF
-:104A9000C62B2102CEBE2A221D2B221E7AB12A8CC1
-:104AA000107CB1217AB901C0B0C9B913DFC1DA20D5
-:104AB00028B0002CB00703880A28824CC0D10B8094
-:104AC00000DBA065AFE7D240D10F8910659FD463AA
-:104AD000FFF300006C1008C0D0C8598C30292102A7
-:104AE0000C0C4760000C8E300E1E5065E19E292193
-:104AF00002C0C116DFB0090B4C65B0908A300A6E57
-:104B00005168E3026000852F629E1BDFA96EF85397
-:104B10002BB22668B0052E22007BE94727629DB79D
-:104B200048CB7F97102B200CB04E0CBF11A6FF294D
-:104B3000F2869E12798B4117DFA007B70A2772A36E
-:104B4000687004882077893029F285DF90D79065D6
-:104B500090652A210419DFD77A9B22DA205806A310
-:104B6000600029002C21041BDFD37CBB18DA20C095
-:104B7000B658069EC95860014CC09063FFCCDA203D
-:104B8000580886600006DA20C0B65808846551359A
-:104B9000DC40DB308D30DA200D6D515806F6C0D088
-:104BA000D3A064A120292102C05184A18CA00404B7
-:104BB000470C0C4763FF3E00C09C8831DBD008F8EF
-:104BC00050089B3828210498116E8823282066AC51
-:104BD0008C0C0C472C24667CBB159F139E148A10EA
-:104BE0008B115807068E148F13C0D02D24668A307F
-:104BF000C092C1C81BDF867FA6099BF099F12CF4F7
-:104C00000827FC106550A4B83ADF70C051C0800777
-:104C1000583808084264806718DF6319DF64298602
-:104C20007E6A420AD30F6DE90500A08800F08CC0AF
-:104C3000A08930B4E37F9628C0F207E90B2C9408D2
-:104C40009B909F912F200C12DF630CF811A68829EE
-:104C50008285A2FF2DF4CFD2A009330B238685D104
-:104C60000F22200C891218DF5B0C2B11A6BBA82287
-:104C70002D24CF2CB285D2A00C990B29B685D10F4B
-:104C8000C087C0900A593879809663FF8ADB30DA92
-:104C900020C0C1C0D05BFF56292102C0D02A9CFE93
-:104CA00065AE4D2D2502C09063FE45009E142A2C52
-:104CB00074C0B1DC70DD405806E18E14C0D01BDF3B
-:104CC00053C1C863FF6AC020D10F00006C100628D2
-:104CD000210216DF3808084C65821929629E6F98F8
-:104CE0000260022019DF332992266890078A200982
-:104CF000AA0C65A20F27629DC0CC6472072B210409
-:104D00008E31C0A0DDA00EFE500ECD386EB8102C36
-:104D10002066B1CC0C0C472C24667CDB026001EFD2
-:104D2000C0C12930081BDF2564909C2F0AFFC0D327
-:104D3000B09E64E1026892136450882A2C74044B7C
-:104D4000025800930AA20206000000002B200C2744
-:104D500021040CBC11A6CC29C286280A087983023A
-:104D60006001B919DF1509B90A2992A36890082EC4
-:104D7000220009EE0C65E1A42EC28564E19E262086
-:104D80000713DF1E6E7B0260019A17DF151FDF1EFF
-:104D900019DF4BC0D228200A93E09DE1A9690F8852
-:104DA0000298E22F90802A9480B1FF07FF029FE3D0
-:104DB0002EC2851FDF080EDE0BAFBF2AF4CF2EC632
-:104DC00085655F76C020D10F2830102930112E3034
-:104DD0001300993200ED326480EE2A30141FDF3860
-:104DE00000AA3278EF050F9E092DE47F1EDF36669C
-:104DF000A0050F98092A8480B4A718DF33C76F0075
-:104E00009104AE9EDDE000AF1A00C31A6EE1052DDD
-:104E1000B2000DED0C1EDF2D08D81C063303AE8842
-:104E20002A848B2EB02E27848C03EE010FEE022EE7
-:104E3000B42E58018F63FEFF29310829250428303C
-:104E4000142E3109B0886480A32E240AC0812E302C
-:104E5000162CB4232E240BB4EF2F240C8C378B3656
-:104E6000292504DEB0DFC00C8F390B8E390FEE021E
-:104E700064EEC4089F1101C4048D380CB81800C436
-:104E8000040CBE1800EE110EDD02C0E30EFF021E80
-:104E9000DF019F719E701EDF008F2098739D740547
-:104EA000FF110BCD53C18098750FDD020EDD029D01
-:104EB000721EDEBF2A24662F629D2AE4A22FFC18F0
-:104EC0002F669D63FE710000002F30121BDF010072
-:104ED000FA3278FF050B980B2A847F66D0050B9A6F
-:104EE0000B2DA4802A301100AA3263FF442F240A1C
-:104EF0009E2B63FF56CC57DA20DB30DC4058071579
-:104F0000C020D10F00DA20C0B65807A463FFE50027
-:104F1000DA7058063AC0A02A246663FE02DA2058E6
-:104F2000079F63FFCFB16928200A862009094799A6
-:104F30001129240798107F812693E027E50A9AE338
-:104F400088109DE119DEDD8D11096F029FE42DE4CB
-:104F500016098802C0D398E22A240763FE51000094
-:104F60001DDEA60868118F11892B93E008FF02C08F
-:104F70008F9FE50D990299E2047F11C0D49DE1084D
-:104F8000FF029FE463FFD0006C1004C020D10F002B
-:104F90006C100485210D381114DE848622A42408A7
-:104FA000660C962205330B9321743B13C862D230F2
-:104FB000D10FC030BC29992199209322D230D10F32
-:104FC000233DF8932163FFE36C100AD62094181751
-:104FD000DE79D930B83898199914655252C0E1D2A7
-:104FE000E02E61021DDE760E0E4C65E1628F308E82
-:104FF000190F6F512FFCFD65F1558EE129D0230E5D
-:105000008F5077E66B8F181EDEB3B0FF0FF4110FD1
-:105010001F146590CE18DEB08C60A8CCC0B119DE2C
-:105020006428600B09CC0B0D880929811C28811A82
-:105030002A0A0009880C08BA381BDEA60CA90A291E
-:1050400092947B9B0260008C2B600C94160CBD111B
-:10505000A7DD29D286B8487983026000D219DE56CE
-:1050600009B80A2882A398176880026000A360002C
-:10507000A51ADE9A84180AEE01CA981BDE4D8C1917
-:105080002BB0008CC06EB3131DDE4A0C1C520DCC2D
-:105090000B2DC295C0A17EDBAE6000380C0C5360B6
-:1050A000000900000018DE8C8C60A8CCC0B119DEAD
-:1050B0004028600B09CC0B0D880929811C28811A16
-:1050C0002A0A0009880C08BA380CA90A2992947E89
-:1050D000930263FF72DA60C0BA580730645073609D
-:1050E000026600001ADE338C192AA0008CC06EA361
-:1050F0001A18DE2F0C1C5208CC0B18DE762BC2952A
-:10510000C0A178B30263FF3F63FFC9000C0C536377
-:10511000FF09896078991829D285C9922B729E1D42
-:10512000DE246EB8232DD226991369D00B60000DB2
-:10513000DA6058071A6000170088607D890A9A1A99
-:1051400029729D9C129915CF95DA60C0B658071345
-:105150006551F58D148C18DBD08DD0066A020D6D6B
-:1051600051580584D3A09A1464A1DD82A085A1B80A
-:10517000AF9F190505470202479518C05163FE60AD
-:105180002B6104C08C8931C0A009F950098A386E9E
-:10519000B81F2C6066A2CC0C0C472C64667CAB114B
-:1051A0009F119E1B8A155805958E1B8F11C0A02A32
-:1051B00064669F1164F0E12912032812096DF91742
-:1051C0002F810300908DAEFE0080889F9200908C0E
-:1051D000008088B89900908C65514E8A10851A8B92
-:1051E000301FDE06881229600708580A2C82942D89
-:1051F00061040ECC0C2C86946FDB3C1CDE30AC9C26
-:1052000029C0800B5D50A29909094729C48065D047
-:10521000DA2E600CC0D01FDDEF0CE811AFEEA788CE
-:105220002282852DE4CF02420B228685D2A0D10FA7
-:105230008E300E0E4763FDA6A29C0C0C472C640713
-:105240007AB6CD8B602E600A280AFF08E80C6481CC
-:105250000E18DE1983168213B33902330B2C341661
-:105260002D350AC02392319F30C020923308B202FC
-:1052700008E80292349832C0802864072B600CD270
-:10528000A01CDDD40CBE11A7EE2DE285ACBB28B46A
-:10529000CF0D9D0B2DE685D10F8B1888138D30B85F
-:1052A0008C0D8F470D4950B4990499100D0D5F0472
-:1052B000DD1009FF029F800DBB029B8165508D852B
-:1052C0001AB83AC0F1C0800CF83808084264806B04
-:1052D0001BDDB519DDB629B67E8D18B0DD6DDA059A
-:1052E00000A08800C08CC0A063FEF30082138B1660
-:1052F0001DDDC628600AC0E02EC4800D880202B2FF
-:105300000B99239F20C0D298229D2122600CB2BB12
-:105310000C2D11A7DD28D28508BB0B18DDAE2BD6CE
-:1053200085A8222E24CFD2A0D10F9E1B851A2A6CCD
-:10533000748B185BFF178E1B63FEA300C087C090A1
-:105340000AF93879809263FF86C020D10F9E1B2A0C
-:105350006C74C0B18D185805398E1B851A63FE7E9A
-:10536000886B8213891608BE110ECE0202920B9E24
-:1053700025B4991EDDA19F200E88029822C0EF045B
-:10538000D8110E88029824C0E49E21C080D2A02BA0
-:10539000600C2864071CDD8F0CBE11A7EE2DE28582
-:1053A000ACBB28B4CF0D9D0B2DE685D10F000000BE
-:1053B0006C1004C020D10F006C10048633C071C083
-:1053C00030600001B13300310400741A04620174CA
-:1053D00060F1D10F6C1004022A02033B025BFFF65E
-:1053E0001CDD771BDDBFC79F88B009A903098A01AF
-:1053F0009AB079801EC0F00FE4311DDD6E0002000E
-:105400002BD2821EDDB82AC1020EBB022BD6820A25
-:10541000E431D10F28C102C19009880208084F2841
-:10542000C50208E431D10F006C1004C0C00CE43197
-:1054300012DD631ADD6000020029A28218DDAC1BB8
-:10544000DDAA2621020B990108660129A6822625DC
-:105450000206E43114DDA715DDA2236A902326128B
-:105460008550242611252613222C40D10F00000040
-:105470006C1008D6102B0A64291AB41ADD4D0D23BE
-:10548000111CDD4E0F2511B81898130E551118DD9B
-:1054900099AC55A838AA332C80FF2A80FEA933285E
-:1054A0008D0129800108AA112880000CAA02088811
-:1054B0001109880208AA1C288C08281604580862BA
-:1054C00014DD3F0AA7022441162A30802B1204075C
-:1054D000AA2858085DB1338B13B4559A6004AC28E0
-:1054E000B4662C56277B69E016DD769412C050C056
-:1054F000D017DD329D15D370D4102F60802E6082BE
-:105500009F169E17881672891A8D128C402A607F0A
-:105510000DCC282B3A200CAA2858084BC0B10ABE43
-:10552000372E35408F1772F91A8D128C402A608100
-:105530000DCC282B3A200CAA28580843C0B10ABE2B
-:10554000372E3542B233B444B1556952B6B466C051
-:10555000508F15B877D370B2FF9F156EF899D10FA1
-:105560006C1004C021D10F006C1004270A001CDD50
-:10557000111FDD221EDD251DDD0E1ADD501BDD5E37
-:10558000C02824B0006D2A75AA48288080C0916484
-:10559000806100410415DD09C03125502E00361A06
-:1055A0000655010595390C56110C66082962966E50
-:1055B000974D0D590A29922468900812DD42024243
-:1055C0000872993B23629512DD06CB349F3002822C
-:1055D000020E4402C092993194329233AD52246249
-:1055E00095C090244C1024669524B0002924A0AACC
-:1055F00042292480B177B14404044224B400D10F7D
-:10560000D10FD10F6C10041ADCEA2AA00058021C3A
-:105610005BFFD5022A02033B025BFFD11BDCE8C91A
-:10562000A12CB102C0D40DCC020C0C4F2CB5020C35
-:10563000E431D10FC0A00AE43118DCDE0002002FF3
-:10564000828219DCF12EB10209FF022F86820EE45C
-:1056500031D10F006C1004C02002E43114DCD816E4
-:10566000DCD5000200226282234102732F0603E48C
-:1056700031C020D10F19DD221ADD212841020A2A6A
-:10568000010988012A668228450208E43115DD18DF
-:1056900012DD1D25461DD10F6C1004292006289C03
-:1056A000F96480A02A9CFD65A0968A288D262F0A81
-:1056B000087AD9042B221FC8BD2C206464C0812E17
-:1056C00022090EAE0C66E0782B200C1EDCBA0CBC56
-:1056D00011AECC28C28619DCB878F3026000AD099F
-:1056E000B90A2992A36890082E220009EE0C65E001
-:1056F0009B29C2851FDCC26490929F90C0E41FDC8E
-:10570000CE9E9128200AC0E09E930F88029892882E
-:10571000200F880298942F20079A979D962F950A1C
-:105720002E240728200629206468833328C2851286
-:10573000DCA9288C20A2B22E24CF28C685C020D177
-:105740000FC020D10F2A206A0111020A2A4165AF39
-:1057500052DA20C0B05805E464AFE5C021D10F0093
-:10576000649FC81FDC962D20168FF209DD0C00F116
-:105770000400DD1AADAD9D2912DC9728C285A2B2C6
-:105780002E24CF288C2028C685C020D10FC021D13F
-:105790000F0000006C1004260A001BDCDB15DC8700
-:1057A00028206517DC84288CFE6480940C4D110D34
-:1057B000BD082CD2F52BD2F42ED2F77CB13DB4BB70
-:1057C0002BD6F47BE9052BD2F62BD6F47CB92C2A08
-:1057D000D2F62AD6F52AD6F406E431000200287261
-:1057E000822AFAFF004104290A012F510200991A66
-:1057F0000A99030988012876820FE4312624652B53
-:10580000D2F48E5A2CD2F5B0EE9E5A7BCB1629D20A
-:10581000F62FD2F70CB80C09FF0C08FF0C0F2F1451
-:10582000C8F96000320BCA0C0A2A14CEA92B510207
-:10583000C0C20CBB020B0B4F2B55020BE431D10F36
-:1058400000DB30DA205BFF941BDCB064AF5D0C4DF5
-:1058500011ADBD63FFA8000006E4310002002F7205
-:105860008218DC6E2E510208FF022F76820EE43180
-:10587000D10F00006C1004C03003E43116DC4E156B
-:10588000DC4F00020024628274472118DCA0875A92
-:10589000084801286682CD7319DC9E0C2A11AA994A
-:1058A0002292832992847291038220CC292B510267
-:1058B0000BE431C020D10F001FDC972E51020FEEF8
-:1058C000012E55020EE431B02DB17C9C5A12DC92AF
-:1058D00008DD112D5619D10F6C10061BDC351EDCAE
-:1058E0003722B0001ADC8E6F23721DDC75C0481899
-:1058F000DC8D1FDC8BDC10D5C083F000808600506F
-:105900008A6D4A4F0F35110D34092440800B560A19
-:10591000296294B1330E55092251400F44110C44B1
-:105920000A874009A80C02883622514107883608A8
-:10593000770CA8992966949740296295874109A810
-:105940000C02883607883608770CA899296695973F
-:1059500041030342B13808084298F0D10F1CDC72B1
-:1059600013DC7327B0002332B5647057C091C0D0E8
-:1059700016DC7115DC6FC0402AC00003884328C4C0
-:10598000006D793C004104B14400971A7780148E71
-:10599000502FB2952DB695AFEE2EED2006EE369E29
-:1059A0005060001877A00983509D5023B695600081
-:1059B0000223B295223D2006223622B695B455B870
-:1059C000BBD10F0003884328C400D10F6C1004C062
-:1059D0004004E43115DC59000200885013DC58CB38
-:1059E000815BFFBD1CDC570C2D11ADCC2BC2822A74
-:1059F000C28394507BAB142EC28429C2850ABD0C8D
-:105A00000E990C0D990C0929146000050BA90C09BD
-:105A10002914993015DBEA2A51020AE4312A2CFCB8
-:105A200058004B2B32000AA2022BBCFF9B30CCB695
-:105A3000C8A4D2A0D10F000004E4311EDBDE0002B6
-:105A4000002DE2822FBAFF2C51020FDD012DE682DC
-:105A50000CE431D10F0000006C1004D10F000000E5
-:105A60006C1004C020D10F006C100413DC36C0D1C0
-:105A700003230923318DC0A06F340260008D19DB30
-:105A8000CD1BDBCE17DC2F0C2811A87726728325BF
-:105A900072822CFAFF76514788502E7285255C045D
-:105AA00025768275E9052572842576827659292E18
-:105AB00072842E76822E76830AE4310002002392CD
-:105AC000820021042FB10200D61A0C6603063301AE
-:105AD0002396820FE43126728325728260000200D1
-:105AE000D8A07659220AE4310002002392820021D4
-:105AF0000400D21A2FB1020C220302320122968234
-:105B00000FE431D280D10F00D280D10FC020D10F4D
-:105B10006C1004DB30862015DBA6280A002825023D
-:105B2000DA2028B0002CB00705880A28824C2D0AFC
-:105B3000010B8000DBA065AFE61ADB9F0A4A0A2949
-:105B4000A2A3C7BF769101D10F2BA6A3D10F00004E
-:105B50006C1004C0D1C7CF1BDB9919DB9617DB94FF
-:105B60000C2811A87786758574C0A076516288507C
-:105B70008E77B455957475E90385769574765927B3
-:105B80008F769F759F740AE431000200239282B4DD
-:105B90002E2FB10200E10400D61A0C660306330171
-:105BA0002396820FE431867583747639280AE431AE
-:105BB0000002002E9282B42200210424B10200DFF0
-:105BC0001A0CFF030FEE012E968204E431D280D12D
-:105BD0000FD8A07651D6D280D10F00006C100429C6
-:105BE0000A801EDB9A1FDB9A1CDB730C2B11ACBBEB
-:105BF0002C2CFC2DB2850FCC029ED19CD0C051C064
-:105C00007013DB9614DB9518DB932AB285A8280461
-:105C1000240A234691A986B8AA2AB685A98827848A
-:105C20009F25649FD10F00006C100419DBC70C2A5C
-:105C300011A9A98990C484798B761BDBB5ABAC2AFA
-:105C4000C2832CC2847AC1688AA02BBC30D3A064E2
-:105C5000A05E0B2B0A2CB2A319DB7F68C0071DDBEB
-:105C6000BBD30F7DC94AA929299D0129901F68919D
-:105C70003270A603D3A0CA9E689210C7AF2AB6A3FB
-:105C80002A2CFC5BFFB3D230D10F000013DBB10331
-:105C9000A3018C311DDB510C8C140DCC012CB6A34F
-:105CA00063FFDC00C020D10FDA205BFFCCC020D125
-:105CB0000FC020D10F0000006C1004DB30C0D019E1
-:105CC000DB3CDA2028300022300708481209880A15
-:105CD00028824CDC200B80001BDB370C4A11ABAA5E
-:105CE00029A28409290B29A684D10F006C1004C0B5
-:105CF0004118DB3017DB320C2611A727277030A89C
-:105D000066256286007104A35500441A7541482235
-:105D1000628415DB5202320BC922882117DB2F085F
-:105D20008414074401754905C834C020D10FD10F30
-:105D30000809471DDB86C0B28E201FDB1D0E0E43F7
-:105D4000AFEC2BC4A00FEE0A2DE6242A6284C020FB
-:105D50000A990B296684D10FC020D10F6C1004DB87
-:105D600030C0D018DB13DA20253000223007085865
-:105D70000A28824CDC200B80008931709E121BDBCC
-:105D80000D0C4A11ABAA29A28409290B29A684D19A
-:105D90000F09C95268532600910418DB08C0A12FCF
-:105DA000811200AA1A0AFF022F85121EDB020C4D77
-:105DB00011AEDD2CD2840C2C0B2CD684D10FC081DB
-:105DC0001FDAFFB89A0A0A472EF11200A1040088D0
-:105DD0001A08EE022EF5121DDAF70C4C11ADCC2B81
-:105DE000C2840B2B0B2BC684D10F00006C1004DB7C
-:105DF00030C0D019DAEFDA202830002230070988C5
-:105E00000A28824CDC200B80001CDAEA0C4B11AC17
-:105E1000BB2AB2840A2A0B2AB684D10F6C1004C0A4
-:105E20004118DAE416DAE60C2711A626266030A817
-:105E300072252286006104A35500441A7541082288
-:105E4000228402320BD10F00C020D10F6C10041538
-:105E5000DB410249142956112452120208430F88CB
-:105E600011C07300810400361A008104C78F0077C7
-:105E70001A087703074401064402245612D10F0082
-:105E80006C10066E23026000AC6420A7C0A08510D1
-:105E900013DB1916DB30C040A6AA2BA2AE0B1941AA
-:105EA00064906668915D68925268933C2AA2AA2821
-:105EB0003C7F288C7F0A0A4D2980012880002AAC6B
-:105EC000F20888110988027589462B3D0129B00026
-:105ED0002BB0010899110B99027A9934B8332A2A08
-:105EE00000B1447249B160004A7FBF0715DB1B63F4
-:105EF000FFB90000253AE863FFB10000253AE863E6
-:105F0000FFA90000250A6463FFA1C05A63FF9C003B
-:105F100000705F082534FF058C142C34FE70AF0B25
-:105F20000A8D142E3D012AE4012DE400DA405BFDC8
-:105F30005063FFA7D10FD10F6C10041ADAA019DA41
-:105F40009D1CDB061BDB07C080C07160000D0000DC
-:105F50000022A430B1AA299C107B915F26928679F9
-:105F6000C2156E6262C0206D080AB12200210400D1
-:105F7000741A764BDB63FFEE2292850D63110325C5
-:105F800014645FCFD650032D436DD9039820B422FB
-:105F90000644146D49229820982198229823982429
-:105FA00098259826982798289829982A982B982CED
-:105FB000982D982E982F222C4063FF971EDA7E276B
-:105FC000E68027E681D10F00C02063FF8300000038
-:105FD0006C1004C062C04112DA791ADA7513DAE182
-:105FE0002AA00023322D19DADB2BACFE2992AE6EEB
-:105FF000A30260008E090E402D1AC2C2CD0EDC39FC
-:106000002C251664B0895BFF9E15DAD71ADAD12BDE
-:106010003AE80A3A0158058C2B21160ABB28D3A06E
-:106020009B505805A32B52000ABB082A0A005805AA
-:10603000A215DACE2D21022C3AE80C3C2804DD0210
-:106040002D25029C5058059A8B50AABBC0A158051B
-:106050009A1CDAC72D21020C3C2806DD0213DAC592
-:106060002D25029C305805928B30AABBC0A2580542
-:10607000922A2102C0B40BAA020A0A4F2A2502580A
-:1060800005A6D10F242423C3CC2C251663FF76004C
-:1060900018DABD1CDAB919DABA1BDAB817DA8B8547
-:1060A000202E0AFD1FDAB92D202E24F47A24F47E46
-:1060B00024F4820EDD0124F4862E0AF70755280603
-:1060C000DD02C0750EDD01050506AB5BA959C0E810
-:1060D000AC5C24C4AB0EDD0227C4AC2E0ADFA8558D
-:1060E00027B4EC0EDD0124B4EBC2E027942C0EDDC6
-:1060F0000224942B2E0A800D0D4627546C24546BD9
-:106100000EDD022D242E63FEFC0000006C10042A1C
-:106110000A302B0A035BFF4D12DA8FC39029261633
-:10612000C3A1C0B3C08A2826175BFF48C03CC3B1D7
-:106130002B26161ADA222AA02023261764A079C358
-:10614000A2C0B15BFF42C3A2C0B15BFF40C3C22C7F
-:106150002616C2AFC0B12326175BFF3CC28F28268C
-:1061600016C0FE2F2617C2E22E26162A0AA1C0B19B
-:10617000C0D82D26175BFF352A0AA12A2616C3A6EA
-:10618000C0B3C1922926175BFF31C3C62C2616C1A6
-:10619000B32A0AA22B2617C0B35BFF2C290AA22917
-:1061A0002616C185282617C2FB2F2616C0E72E26E5
-:1061B000171DDA762D2610D10FC3A2C0B35BFF23C3
-:1061C00063FF82006C10041CDA3F1BDA2C18DA70B3
-:1061D00017DA7116DA7115DA71C0E0C0D414DA3B3F
-:1061E0001FD9F7C0288FF06D2A36DAC0D9C07C5B82
-:1061F000020FC90C1CDA350C9C28A8C3A6C22A368B
-:10620000802A2584A4C2A7CC2D248C2B248A2B245D
-:10621000872E248BB1BB2E369F2C369E2C369DB1FB
-:10622000AC1CDA161BDA5FC0286D2A33DAC0D9C07D
-:106230007C5B020FC90C1CDA240C9C28A8C3A6C2E4
-:106240002A36802B2584A4C2B1BBA7CC2D248C2E4A
-:10625000248B2A248A2E369F2C369E2C369DB1AC58
-:10626000C07919DA141BDA5113DA4F1ADA4F18DA37
-:106270005014DA1516DA5004F42812DA4F04660CBA
-:10628000040506A252A858AA5AA3539B3029A50078
-:1062900027848AC091C0A52A848C29848B17DA4868
-:1062A00018DA47A75726361D26361E2E361F16DA51
-:1062B0004513DA45A65504330C2826C82E75002D43
-:1062C00054AC2E54AB2E54AA2326E62326E52E26C4
-:1062D000E7D10F006C100613DA2317DA1E24723D83
-:1062E0002232937F2F0B6D08052832937F8F026334
-:1062F000FFF3C0C4C0B01AD9B1C051D94004593954
-:1063000029A4206E44020BB502C3281ED9ACDDB00F
-:1063100025E422052D392DE421C0501EDA2C19DA8E
-:106320001C18DA1C16DA1E1DDA2A94102A72451778
-:10633000D9E76DA94BD450B3557A5B17DF50756B15
-:10634000071FD99E8FF00F5F0C12D9DF02F228AE23
-:106350002222D681D54013D9DC746B0715D99885D4
-:106360005005450C035328B145A73FA832A9332255
-:10637000369D22369E2436802B369F2BF48B2CF4B0
-:106380008C14D9F824424DC030041414C84C6D0844
-:1063900006B133041414C84263FFF20015D985C452
-:1063A000400031041AD986C0D193A200DD1AC13849
-:1063B000B0DD9DA318D9EC2B824D29824E29A51C56
-:1063C0002882537A871E2C54008E106FE45D12D9F8
-:1063D0007B2F211D23211C2F251B04330C23251C5F
-:1063E00023251AD10FC06218D9DB88807E87D9890E
-:1063F000102654006F94191BD9712AB11C0A1A1463
-:1064000004AA0C2AB51C2AB51D2AB51A2AB51BD117
-:106410000F1BD96A2AB11C0A1A1403AA0C2AB51C2C
-:106420002AB51D2AB51A2AB51BD10F001CD9642B19
-:10643000C11D2DC11C2BC51B03DD0C2DC51C2DC57D
-:106440001AD10F006C100619D95D14D9C212D9C522
-:1064500015D9E0C73FC0E02E56A82E56A92E56AA41
-:106460002E56AB23262918D985DB101CD9DAC0D4C7
-:106470002A42452D16019C1000B0890A880C2896E6
-:10648000005BFF942B22E318D94D0B5B149B842AED
-:1064900022E48B84B1AA0A5A140BAA0C9A852922E9
-:1064A000E509591499862F22CD0F5F149F875BFF52
-:1064B000455BFF1623463BC1B01DD9401CD99E2A1F
-:1064C000D1022C463A0BAA020A0A4F2AD5025804D6
-:1064D000925BFEBF5BFE98C050C0B016D93614D98F
-:1064E0003E17D9AEC0C0C73E93122C262DC03060D7
-:1064F00000440000007F9F0FB155091914659FF4F7
-:10650000C0500AA9027FA7EF18D92ADA5008580A02
-:1065100028822C2B0A000B8000005104D2A0C091CD
-:10652000C7AF00991A0A99039912CE3864206BD329
-:10653000202B20072516032C12022A62827CA863D6
-:1065400018D91C01110208580A28822CDA500B8035
-:1065500000D2A0643FD58A310A8A1404AA01C82A4D
-:106560002B22010B8B1404BB017BA945DDA07A7B98
-:10657000081DD9122DD2000DAD0CDB3019D90D1A22
-:10658000D95288130ADA28DC801DD99009880A2894
-:10659000823C0DAA080B8000652F93D320C0B06306
-:1065A000FF9400007FAF34B1550050040A0919630D
-:1065B000FF42DAB07B7B081AD9012AA2000ABA0C82
-:1065C0001BD9428C310BAB280C8A141CD980ACBB74
-:1065D0001CD98004AA012BC68163FF8F645F60C051
-:1065E00050C0B0C7CE9C1263FF5500006C1004274A
-:1065F000221EC08008E4311BD8EF0002002AB282BC
-:1066000019D8EF003104C06100661A2991020A6AA4
-:10661000022AB68209E43115D94A0C3811A8532848
-:1066200032822432842A8CFC7841102921022A36B5
-:106630008297A0096902292502D10F002B21022C83
-:1066400032850B6B022CCCFC2C368297C02B25029A
-:10665000D10F00006C1004C0E71DD8D21CD8D40D97
-:106660004911D7208B228A200B4B0BD2A007A80CF4
-:106670009B72288CF4C8346F8E026000A31FD8CAA6
-:10668000A298AF7B78B334C93DC081C0F0028F3887
-:106690000F0F42C9FA2CD67ED5206D4A05003088EE
-:1066A00000508C887008980878B16DD2A09870D18D
-:1066B0000FC0F0038F387FE0DE63FFD8027B0CAFA2
-:1066C000BB0B990C643047D830C0F1C05002F5388C
-:1066D0000505426450792CD67E0B36122F6C100FB4
-:1066E0004F366DFA0500808800208C06440CC0816E
-:1066F000C05003B208237C0C038538050542645062
-:106700005A2CD67ED30F6D4A0500208800308CD2DB
-:10671000A0A798BC889870D10FD2A0BC799970D1ED
-:106720000FD2302BAD08C0F1C0500BF53805054233
-:10673000CB542CD67E083F14260A100F660C064652
-:10674000366D6A0500208800B08C827063FF2D00D2
-:10675000C05003F53875E08063FF7A00C0600286A0
-:106760003876E09F63FF9900C05003F53875E0C4A8
-:1067700063FFBE006C1004D62068520F695324DA00
-:1067800020DB30DC405800F3D2A0D10FDA20DB3020
-:10679000DC405800F09A2424240EC02122640FC04B
-:1067A00020D10F00B83BB04C2A2C7489242D200E28
-:1067B0002E200FA4DDB1EE2E240FB0DD2D240E28E7
-:1067C00090072D9003A488B088B1DD2D9403289400
-:1067D000075BFFA069511DC0E082242A600F18D812
-:1067E000FE2A240329600E8F2029240708FF029F18
-:1067F000209E64D10FC020D10F0000006C100494C3
-:106800002319D8F6C0B3083A110BAA02992019D857
-:10681000699A2116D867C05028929D2564A2288CB9
-:106820001828969DD10F00006C1004282066C038EF
-:10683000232406B788282466D10F00006C100603B5
-:106840005A0C0D36110D5C11D8208B2282210CBB05
-:106850000C06550F9B8202320B928113D853D9201C
-:10686000A38F6450561CD84FC0D71BD850A256C017
-:10687000E1C09004E93809094276F34F044302CAA3
-:10688000912BC67ED30F6DAA0500208800308C891D
-:1068900081A95909FA0C64A07D99818A8264A00FAC
-:1068A000D290D10FC06002E63876D0D763FFD10016
-:1068B000C020BC89998199809282D10F7F230429BD
-:1068C0002DF8998165BFD863FFE50000028F0CA306
-:1068D000FF0F3312931003AA0CD3406490402BC6D1
-:1068E0007E8610D30F6D6A0500208800308CBC8234
-:1068F000C090A4F3C041034938090942CA9B2BC682
-:106900007E6DAA0500208800308C0F590CA989BC27
-:1069100099998163FF8400BC89998163FF7C00C0E1
-:106920006002E63876D0B963FFB300C07002473822
-:1069300077D0CD63FFC700006C100414D82AC15271
-:10694000A424CA3128221D73811C292102659016B6
-:106950002A300075A912022A02033B022C3007C01C
-:10696000D25801D0653FDCD10F2B300703BB0B0B96
-:10697000BA0274B3022ABDF8D3A063FFC4000000BA
-:106980006C1004292006C0706E9741292102C08F27
-:106990002A2014C0B62B240606AA022A24147980C1
-:1069A000022725022A221E2C221D7AC10EC8ABDA2C
-:1069B00020DB302C0A00033D025BF80D6450742D7F
-:1069C00021020D0D4CC9D3C020D10F00002E9CFB1D
-:1069D00064E0822F21020F0F4C65F0911AD7F61C4C
-:1069E000D7F429A29EC08A798B5D2BC22668B00499
-:1069F0008D207BD95229A29DC0F364904A97901DA7
-:106A0000D8062E21049D9608EE110FEE029E979E49
-:106A10009118D802C0E527C4A22E24062BA29D2FD0
-:106A200021022BBC3008FF022F25022BA69DC0207F
-:106A3000D10F00002F300068F939DA20DB30044C28
-:106A40000258004463FF7700022A022B0A0658000E
-:106A5000D3220A00D10F6550102830006889240223
-:106A60002A02033B02DC4058003BC020D10FD27009
-:106A7000D10F00002A2C74033B02044C025BFEF58C
-:106A800063FF3B00DB30DC402A2C745BFEF2C0204D
-:106A9000D10F00006C1004C83F89268829A399995A
-:106AA0002609880C080848282525CC52C020D10F7B
-:106AB000DB402A2C745BF936D2A0D10F6C1004D8BD
-:106AC00020D73082220D451105220C928264207459
-:106AD00007420B13D7B5D420A383732302242DF8C8
-:106AE000858074514CBC82C0906D081600408800AF
-:106AF000708C773903D720C0918680743901D420F7
-:106B000074610263FFE2CA98C097C0411BD835C0C8
-:106B1000A00B8B0C0B4A380A0A42C9AA1DD7A21C2B
-:106B2000D7A32CD67EC140D30F6D4A050020880024
-:106B3000308C9780D270D10FBC8FC0E00F4E387E62
-:106B400090E263FFD6BC8292819280C0209282D173
-:106B50000F0000006C1006C0D71CD7921BD7940DF5
-:106B60004911D7202E221F28221D0E4E0BD280073E
-:106B70008A0C2E761F2AAC80C8346FAE026000CB20
-:106B80002F0A801AD798A29EAA7A7EA33FC93FC037
-:106B9000E1C05002E538050542CA552BC67EDB2010
-:106BA000D30F6D4A0500308800B08C2E721DAE9E4A
-:106BB0000EA50C645086D2802E761DC091298403C8
-:106BC000D10FC05003E53875D0D363FFCD15D785FD
-:106BD000027E0CA5EE643051C0A1250A0002A53842
-:106BE000033A020505426450922BC67E0E3512957B
-:106BF00010255C10054536D30F6D5A0500A088009E
-:106C0000208CC0A1A3E2C05023FA8003730C03A51B
-:106C100038AF730505426450722BC67E851005455A
-:106C20000C6D5A0500208800308CD280C0A10E9BCC
-:106C30000CAB7BAFBB2B761D2A8403D10FD280C057
-:106C4000C1AF7D2D761D2C8403D10F00D2302E8D47
-:106C500008C0F1C0500EF538050542CB592BC67E51
-:106C60000A3F14C1600F660C064636D30F6D6A05E5
-:106C700000208800E08C22721D63FF03C061C050B9
-:106C800003653875D80263FF6263FF5CC05002A5DC
-:106C90003875D08763FF8100C06003F63876D0BFB7
-:106CA00063FFB9006C10042A201529201614D7435D
-:106CB0000A990CCB9D2E200B04ED092BD11C8F289B
-:106CC00009BC36ACAA0CBB0C2BD51C0A0A472A24DB
-:106CD00015CAAF8B438942B0A800910400881AA856
-:106CE000FF0FBB029B278F260FB80C783B1AC020E2
-:106CF000D10F0000292102C0A20A9902292502C051
-:106D000021D10F008B2763FFDC2BD11C0CAA0C0AAE
-:106D10000A472A2415ACBB2BD51CC9AE8B438C2843
-:106D20008F42B0AD00F10400DD1AADCC0CBB029B6C
-:106D300027DA20B7EB580019C021D10F9F2763FF36
-:106D4000EF0000006C100428203C643047053060E0
-:106D500000073E01053EB156076539054928C77F42
-:106D6000A933030641076603B166060641A6337ED2
-:106D7000871E222125291AFC732B1502380C098144
-:106D80006000063E01023EB12406423903220AD1C8
-:106D90000FD230D10FC05163FFC000006C10042728
-:106DA000221EC08008E4311DD7030002002CD282CD
-:106DB0001BD703003104C06100661A2BB1020C6CB2
-:106DC000022CD6820BE43119D7870C3A11AA9328EA
-:106DD00032829780253282243284B45525368275DA
-:106DE000410A292102096902292502D10F2A21021B
-:106DF0002B32830A6A022B36822A2502D10F000029
-:106E00006C100418D6EC0C2711087708267286251A
-:106E10003C04765B1315D6E805220A2222A36820DB
-:106E200002742904227285D10FC020D10F00000006
-:106E30006C100419D6EB27221EC08009770208E4E3
-:106E4000311DD6DC0002002CD2821BD6DC003104BE
-:106E5000C06100661A2BB1020C6C022CD6820BE4C6
-:106E60003119D7600C3A11AA9328328297802532C3
-:106E700082243284B45525368275410B2A21020AB8
-:106E80006A022A2502D10F002B21022C32830B6BC0
-:106E9000022C36822B2502D10F0000006C10041B3F
-:106EA000D6C50C2A11ABAA29A286B438798B221B2D
-:106EB000D6C219D6E80B2B0A2BB2A309290868B051
-:106EC0000274B90D299D0129901F6E920822A28596
-:106ED000D10FC020D10FC892C020D10FDA205BEEB5
-:106EE000B3C020D10F0000006C100414D6B22842A9
-:106EF0009E19D6AF6F88026000BA29922668900763
-:106F00008A2009AA0C65A0AC2A429DC0DC64A0A41A
-:106F10002B200C19D6A90CBC11A4CC2EC28609B901
-:106F20000A7ED30260009A2992A36890078D2009F7
-:106F3000DD0C65D08C25C2856450862D2104C030BF
-:106F40006ED80D2C2066B8CC0C0C472C246665C07E
-:106F50007B1CD72518D6AF1AD6A619D6B61DD6AB28
-:106F6000C0E49E519D508F209357935599539A5644
-:106F70009A5408FF021AD6C29F5288269F5A9E59D9
-:106F80009D58935E9C5D935C9A5B08084805881148
-:106F9000985FC0D81FD6900CB911A499289285AFDC
-:106FA000BF23F4CF288C402896858E262D24069E5C
-:106FB00029C020D10FCA33DA20C0B65BFF78C72FB3
-:106FC000D10FC93ADA205BFF75C72FD10FDBD05B39
-:106FD000FE0B2324662B200C63FF7500C72FD10FF7
-:106FE000C72FD10F6C1004C85B29200668941C6859
-:106FF0009607C020D10FC020D10FDA20DB30DC4053
-:10700000DD502E0A005BFE5ED2A0D10F2E200C18A0
-:10701000D6690CEF11A8FF29F286C088798B791AFE
-:10702000D6660AEA0A2AA2A368A0048B207AB96865
-:1070300023F2856430621BD670290A802C206828D0
-:1070400020672D21040B881104DD1108DD020DCC11
-:1070500002C0842D4A100DCC021DD66898319D3097
-:107060008A2B99379C340BAA02C0C09C359C369A57
-:10707000322A2C74DB4028F285C0D3288C2028F6D5
-:10708000852C25042D24061FD653DD40AFEE2CE4BD
-:10709000CF5BFDEAD2A0D10F00DA20DBE05BFF3F3F
-:1070A000C020D10F6C100AD6302A200624160128E1
-:1070B000ACF86583862B2122C0F22A2124CC572AE2
-:1070C000AC010A0A4F2A25247ABB0260037F2C21D7
-:1070D000020C0C4C65C3192E22158D32C0910EDDA9
-:1070E0000C65D39088381ED63364836B8C37C0B858
-:1070F000C0960CB9399914B49A9A120D9911991332
-:107100008F6718D62EC9FB2880217F83168B142CFD
-:1071100022002A200C5BFF61D4A064A3B38F6760B8
-:10712000002800002B200C89120CBA11AEAA2CA248
-:10713000861DD6217C9B3E0DBD0A2DD2A368D004AE
-:1071400088207D893024A28564436427212E07F797
-:107150003607F90C6F9D01D7F0DA20DB70C1C42D22
-:10716000211F5BFEF889268827DDA009880C7A8B11
-:10717000179A10600006C04063FFCC0000DA208B35
-:10718000105BFEC88D1065A267C0E09E488C649CB1
-:10719000498B658A669B4A9A4B97458F677F730236
-:1071A000600120CD529D10DA20DB302C12015BFEF5
-:1071B000698D10C051D6A08FA7C0C08A68974D9A1C
-:1071C0004C8869896A984E994F8E6A8A69AE7E7733
-:1071D000EB01B1AA9E6A9A698B60C0A00B8E1477EE
-:1071E000B701C0A1C091C08493159D179516C0D05A
-:1071F00025203CC030085801089338C0820833105D
-:10720000085B010535400B9D3807DD100BAB100EF8
-:1072100019402A211F07991003DD020DBB020553F7
-:10722000100933020A55112921250A2A14092914A3
-:107230000499110A99020933028A2B2921040BAA05
-:10724000021BD66A0899110955020855020BAA02B9
-:107250009A408920881408991109880219D5EA1DD5
-:10726000D66409880298418B2A9346954783150D69
-:10727000BB0285168D179B448A658966AACAA97CBC
-:1072800077CB01B1AA07FB0C9C669A6588268E29EC
-:10729000AD87972607EE0C0E0E482E25259B672BF3
-:1072A000200C87131ED5C40CB911AE99289285A75E
-:1072B0008828968517D5C8C090A7BB29B4CF871852
-:1072C00063FE3C008C60C0E0C091C0F0C034C0B828
-:1072D0002A210428203C08AA110B8B0103830103F7
-:1072E0009F380B9B39C03208FF10038801089E3875
-:1072F0000C881407EE100FEE020388010898390578
-:10730000BF1029211F0ABB1107881008FF020BAA12
-:107310000218D5BC09291403AA022B212583200BAE
-:107320002B1404BB110833110FBB020B99028B14F1
-:107330008F2A0B33020833028B2B64708688689780
-:107340004D984C8769886A93419946974E984FC0EB
-:107350007077C701C0719A4718D6260B7C100CECC9
-:107360000208F802984418D6230CBC0208CC029CF0
-:10737000402A200C295CFEC0801FD58E1CD5960C9F
-:10738000AE112B2124ACAAAFEEB0BB8F132CE2853B
-:1073900028A4CFAFCC2CE6852A22152B2524B1AA10
-:1073A0002A26156490DBC9D28F262E22090DFF08EC
-:1073B0002F26060FEE0C0E0E482E25256550E4C034
-:1073C00020D10F00C07093419F4499469A4777C7D8
-:1073D0000A1CD57A2CC022C0810C87381CD6070B1A
-:1073E000781008E80208B8020C8802984063FF8011
-:1073F00000CC57DA20DB608C115BFDD629210268B6
-:107400009806689403C020D10F2B221EC0A0292209
-:107410001D2A25027B9901C0B064BFE813D5652CF5
-:10742000B00728B000DA2003880A28824CC0D10BAC
-:107430008000DBA065AFE763FFCA000068A779DAC8
-:1074400020DB30DC40DD505BFEE7D2A0D10FC16D08
-:10745000C19D29252C60000429252CD6902624675F
-:107460002F2468DA20DB308C11DD502E0A805BFD82
-:1074700044D2A0D10FC168C1A82A252C63FFDD002A
-:107480000000C8DF8C268B29ADCC9C260CBB0C0BD6
-:107490000B482B25252A2C74DB602C12015BFD8701
-:1074A000D2A0D10F2A2C748B115BF6B9D2A0D10FC8
-:1074B000DA205BFE3A63FF3800DA20C0B15BFE8A57
-:1074C00064ABF1655F352D2124B1DD2D252463FFEB
-:1074D0001FDA202B200C5BFE5663FF1412D5C882E6
-:1074E00020028257C82163FFFC12D5C403E8300490
-:1074F000EE3005B13093209421952263FFFC00000B
-:1075000010D5C0910092019302940311D597821077
-:1075100001EA30A21101F031C04004E4160002007B
-:1075200011D5B98210234A00032202921011D5828C
-:10753000C021921004E4318403830282018100009F
-:10754000D23001230000000010D5B09100920193C9
-:1075500002940311D586821001EA30A21101F131A3
-:10756000C04004E41600020011D5A7821013D52BE9
-:10757000032202921004E431840383028201810019
-:1075800000D330013300000010D5A19100810165C6
-:10759000104981026510448103CF1F920193029428
-:1075A0000311D574821001EA30A21101F231C040FA
-:1075B00004E41600020011D593821013D5130322A0
-:1075C00002921004E431840383028201C01091030B
-:1075D00091029101810000D43001430012D542C0D4
-:1075E0003028374028374428374828374C233D0176
-:1075F0007233ED03020063FFFC00000010D585919B
-:107600000092019302940311D5838210921011D538
-:10761000348310032202921011D58012D5469210A5
-:10762000C04004E41600020011D577821013D52D56
-:10763000032202921004E431840383028201810058
-:1076400000D53001530000006C10026E322FD6209E
-:10765000056F04043F04745B2A05440C00410400D8
-:10766000331A220A006D490D73630403660CB122BC
-:107670000F2211031314736302222C01D10FC83B94
-:10768000D10F000073630CC021D10F000000000077
-:1076900044495630C020D10F6C10020040046B4C9E
-:1076A00007032318020219D10F020319C020D10FBA
-:1076B0006C100202EA30D10F6C1002CC2503F031BD
-:1076C00060000F006F220503F1316000056F230594
-:1076D00003F231000200D10F6C1002CC2502F03011
-:1076E000D10F00006F220402F130D10F6F2304028A
-:1076F000F230D10FC020D10F6C1002220A20230AD1
-:10770000006D280E28374028374428374828374C42
-:10771000233D01030200D10F6C100202E431D10FAE
-:107720000A004368656C73696F20465720444542E0
-:1077300055473D3020284275696C74204672692097
-:107740004D61792020382031363A30373A333620AF
-:107750005044542032303039206F6E20636C656F96
-:1077600070617472612E6173696364657369676EB9
-:107770006572732E636F6D3A2F686F6D652F666546
-:107780006C69782F772F66775F372E31292C20563A
-:10779000657273696F6E2054337878203030372EDD
-:1077A00030342E3030202D203130303730343030EE
-:0877B000100704000071489469
-:00000001FF
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 7f437ca..b840a49 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
 #define PHY_ID_BCM50610			0x0143bd60
 #define PHY_ID_BCM50610M		0x0143bd70
+#define PHY_ID_BCM5241			0x0143bc30
 #define PHY_ID_BCMAC131			0x0143bc70
+#define PHY_ID_BCM5481			0x0143bca0
+#define PHY_ID_BCM5482			0x0143bcb0
+#define PHY_ID_BCM5411			0x00206070
+#define PHY_ID_BCM5421			0x002060e0
+#define PHY_ID_BCM5464			0x002060b0
+#define PHY_ID_BCM5461			0x002060c0
 #define PHY_ID_BCM57780			0x03625d90
 
 #define PHY_BCM_OUI_MASK		0xfffffc00
diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h
index 2a61eb1..d9cb19b 100644
--- a/include/linux/caif/caif_socket.h
+++ b/include/linux/caif/caif_socket.h
@@ -62,6 +62,7 @@
  * @CAIFPROTO_DATAGRAM_LOOP:	Datagram loopback channel, used for testing.
  * @CAIFPROTO_UTIL:		Utility (Psock) channel.
  * @CAIFPROTO_RFM:		Remote File Manager
+ * @CAIFPROTO_DEBUG:		Debug link
  *
  * This enum defines the CAIF Channel type to be used. This defines
  * the service to connect to on the modem.
@@ -72,6 +73,7 @@
 	CAIFPROTO_DATAGRAM_LOOP,
 	CAIFPROTO_UTIL,
 	CAIFPROTO_RFM,
+	CAIFPROTO_DEBUG,
 	_CAIFPROTO_MAX
 };
 #define	CAIFPROTO_MAX _CAIFPROTO_MAX
@@ -83,6 +85,28 @@
 enum caif_at_type {
 	CAIF_ATTYPE_PLAIN = 2
 };
+ /**
+ * enum caif_debug_type - Content selection for debug connection
+ * @CAIF_DEBUG_TRACE_INTERACTIVE: Connection will contain
+ *				both trace and interactive debug.
+ * @CAIF_DEBUG_TRACE:		Connection contains trace only.
+ * @CAIF_DEBUG_INTERACTIVE:	Connection to interactive debug.
+ */
+enum caif_debug_type {
+	CAIF_DEBUG_TRACE_INTERACTIVE = 0,
+	CAIF_DEBUG_TRACE,
+	CAIF_DEBUG_INTERACTIVE,
+};
+
+/**
+ * enum caif_debug_service - Debug Service Endpoint
+ * @CAIF_RADIO_DEBUG_SERVICE:	Debug service on the Radio sub-system
+ * @CAIF_APP_DEBUG_SERVICE:	Debug for the applications sub-system
+ */
+enum caif_debug_service {
+	CAIF_RADIO_DEBUG_SERVICE = 1,
+	CAIF_APP_DEBUG_SERVICE
+};
 
 /**
  * struct sockaddr_caif - the sockaddr structure for CAIF sockets.
@@ -109,6 +133,12 @@
  *
  * @u.rfm.volume:            Volume to mount.
  *
+ * @u.dbg:		      Applies when family = CAIFPROTO_DEBUG.
+ *
+ * @u.dbg.type:			     Type of debug connection to set up
+ *			      (caif_debug_type).
+ *
+ * @u.dbg.service:	      Service sub-system to connect (caif_debug_service
  * Description:
  * This structure holds the connect parameters used for setting up a
  * CAIF Channel. It defines the service to connect to on the modem.
@@ -130,6 +160,10 @@
 			__u32 connection_id;
 			char	  volume[16];
 		} rfm;				/* CAIFPROTO_RFM */
+		struct {
+			__u8  type;		/* type:enum caif_debug_type */
+			__u8  service;		/* service:caif_debug_service */
+		} dbg;				/* CAIFPROTO_DEBUG */
 	} u;
 };
 
diff --git a/include/linux/device.h b/include/linux/device.h
index 0713e10..6a8276f 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -638,43 +638,103 @@
 
 /* debugging and troubleshooting/diagnostic helpers. */
 extern const char *dev_driver_string(const struct device *dev);
-#define dev_printk(level, dev, format, arg...)	\
-	printk(level "%s %s: " format , dev_driver_string(dev) , \
-	       dev_name(dev) , ## arg)
 
-#define dev_emerg(dev, format, arg...)		\
-	dev_printk(KERN_EMERG , dev , format , ## arg)
-#define dev_alert(dev, format, arg...)		\
-	dev_printk(KERN_ALERT , dev , format , ## arg)
-#define dev_crit(dev, format, arg...)		\
-	dev_printk(KERN_CRIT , dev , format , ## arg)
-#define dev_err(dev, format, arg...)		\
-	dev_printk(KERN_ERR , dev , format , ## arg)
-#define dev_warn(dev, format, arg...)		\
-	dev_printk(KERN_WARNING , dev , format , ## arg)
-#define dev_notice(dev, format, arg...)		\
-	dev_printk(KERN_NOTICE , dev , format , ## arg)
-#define dev_info(dev, format, arg...)		\
-	dev_printk(KERN_INFO , dev , format , ## arg)
+
+#ifdef CONFIG_PRINTK
+
+extern int dev_printk(const char *level, const struct device *dev,
+		      const char *fmt, ...)
+	__attribute__ ((format (printf, 3, 4)));
+extern int dev_emerg(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int dev_alert(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int dev_crit(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int dev_err(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int dev_warn(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int dev_notice(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int _dev_info(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+
+#else
+
+static inline int dev_printk(const char *level, const struct device *dev,
+		      const char *fmt, ...)
+	__attribute__ ((format (printf, 3, 4)));
+static inline int dev_printk(const char *level, const struct device *dev,
+		      const char *fmt, ...)
+	 { return 0; }
+
+static inline int dev_emerg(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+static inline int dev_emerg(const struct device *dev, const char *fmt, ...)
+	{ return 0; }
+static inline int dev_crit(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+static inline int dev_crit(const struct device *dev, const char *fmt, ...)
+	{ return 0; }
+static inline int dev_alert(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+static inline int dev_alert(const struct device *dev, const char *fmt, ...)
+	{ return 0; }
+static inline int dev_err(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+static inline int dev_err(const struct device *dev, const char *fmt, ...)
+	{ return 0; }
+static inline int dev_warn(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+static inline int dev_warn(const struct device *dev, const char *fmt, ...)
+	{ return 0; }
+static inline int dev_notice(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+static inline int dev_notice(const struct device *dev, const char *fmt, ...)
+	{ return 0; }
+static inline int _dev_info(const struct device *dev, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+static inline int _dev_info(const struct device *dev, const char *fmt, ...)
+	{ return 0; }
+
+#endif
+
+/*
+ * Stupid hackaround for existing uses of non-printk uses dev_info
+ *
+ * Note that the definition of dev_info below is actually _dev_info
+ * and a macro is used to avoid redefining dev_info
+ */
+
+#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
 
 #if defined(DEBUG)
 #define dev_dbg(dev, format, arg...)		\
-	dev_printk(KERN_DEBUG , dev , format , ## arg)
+	dev_printk(KERN_DEBUG, dev, format, ##arg)
 #elif defined(CONFIG_DYNAMIC_DEBUG)
-#define dev_dbg(dev, format, ...) do { \
+#define dev_dbg(dev, format, ...)		     \
+do {						     \
 	dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
-	} while (0)
+} while (0)
 #else
-#define dev_dbg(dev, format, arg...)		\
-	({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
+#define dev_dbg(dev, format, arg...)				\
+({								\
+	if (0)							\
+		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
+	0;							\
+})
 #endif
 
 #ifdef VERBOSE_DEBUG
 #define dev_vdbg	dev_dbg
 #else
-
-#define dev_vdbg(dev, format, arg...)		\
-	({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
+#define dev_vdbg(dev, format, arg...)				\
+({								\
+	if (0)							\
+		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
+	0;							\
+})
 #endif
 
 /*
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index a55c873..c4627cb 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -30,6 +30,7 @@
 #define PCI_EEPROM_WIDTH_93C46	6
 #define PCI_EEPROM_WIDTH_93C56	8
 #define PCI_EEPROM_WIDTH_93C66	8
+#define PCI_EEPROM_WIDTH_93C86	8
 #define PCI_EEPROM_WIDTH_OPCODE	3
 #define PCI_EEPROM_WRITE_OPCODE	0x05
 #define PCI_EEPROM_READ_OPCODE	0x06
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index b4207ca..991269e 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -386,6 +386,15 @@
 	__u32				rule_locs[0];
 };
 
+struct ethtool_rxfh_indir {
+	__u32	cmd;
+	/* On entry, this is the array size of the user buffer.  On
+	 * return from ETHTOOL_GRXFHINDIR, this is the array size of
+	 * the hardware indirection table. */
+	__u32	size;
+	__u32	ring_index[0];	/* ring/queue index for each hash value */
+};
+
 struct ethtool_rx_ntuple_flow_spec {
 	__u32		 flow_type;
 	union {
@@ -459,7 +468,7 @@
 u32 ethtool_op_get_ufo(struct net_device *dev);
 int ethtool_op_set_ufo(struct net_device *dev, u32 data);
 u32 ethtool_op_get_flags(struct net_device *dev);
-int ethtool_op_set_flags(struct net_device *dev, u32 data);
+int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
 void ethtool_ntuple_flush(struct net_device *dev);
 
 /**
@@ -578,6 +587,10 @@
 	int	(*set_rx_ntuple)(struct net_device *,
 				 struct ethtool_rx_ntuple *);
 	int	(*get_rx_ntuple)(struct net_device *, u32 stringset, void *);
+	int	(*get_rxfh_indir)(struct net_device *,
+				  struct ethtool_rxfh_indir *);
+	int	(*set_rxfh_indir)(struct net_device *,
+				  const struct ethtool_rxfh_indir *);
 };
 #endif /* __KERNEL__ */
 
@@ -588,29 +601,29 @@
 #define ETHTOOL_GREGS		0x00000004 /* Get NIC registers. */
 #define ETHTOOL_GWOL		0x00000005 /* Get wake-on-lan options. */
 #define ETHTOOL_SWOL		0x00000006 /* Set wake-on-lan options. */
-#define ETHTOOL_GMSGLVL	0x00000007 /* Get driver message level */
-#define ETHTOOL_SMSGLVL	0x00000008 /* Set driver msg level. */
+#define ETHTOOL_GMSGLVL		0x00000007 /* Get driver message level */
+#define ETHTOOL_SMSGLVL		0x00000008 /* Set driver msg level. */
 #define ETHTOOL_NWAY_RST	0x00000009 /* Restart autonegotiation. */
 #define ETHTOOL_GLINK		0x0000000a /* Get link status (ethtool_value) */
-#define ETHTOOL_GEEPROM	0x0000000b /* Get EEPROM data */
-#define ETHTOOL_SEEPROM	0x0000000c /* Set EEPROM data. */
+#define ETHTOOL_GEEPROM		0x0000000b /* Get EEPROM data */
+#define ETHTOOL_SEEPROM		0x0000000c /* Set EEPROM data. */
 #define ETHTOOL_GCOALESCE	0x0000000e /* Get coalesce config */
 #define ETHTOOL_SCOALESCE	0x0000000f /* Set coalesce config. */
 #define ETHTOOL_GRINGPARAM	0x00000010 /* Get ring parameters */
 #define ETHTOOL_SRINGPARAM	0x00000011 /* Set ring parameters. */
 #define ETHTOOL_GPAUSEPARAM	0x00000012 /* Get pause parameters */
 #define ETHTOOL_SPAUSEPARAM	0x00000013 /* Set pause parameters. */
-#define ETHTOOL_GRXCSUM	0x00000014 /* Get RX hw csum enable (ethtool_value) */
-#define ETHTOOL_SRXCSUM	0x00000015 /* Set RX hw csum enable (ethtool_value) */
-#define ETHTOOL_GTXCSUM	0x00000016 /* Get TX hw csum enable (ethtool_value) */
-#define ETHTOOL_STXCSUM	0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#define ETHTOOL_GRXCSUM		0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#define ETHTOOL_SRXCSUM		0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#define ETHTOOL_GTXCSUM		0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#define ETHTOOL_STXCSUM		0x00000017 /* Set TX hw csum enable (ethtool_value) */
 #define ETHTOOL_GSG		0x00000018 /* Get scatter-gather enable
 					    * (ethtool_value) */
 #define ETHTOOL_SSG		0x00000019 /* Set scatter-gather enable
 					    * (ethtool_value). */
 #define ETHTOOL_TEST		0x0000001a /* execute NIC self-test. */
 #define ETHTOOL_GSTRINGS	0x0000001b /* get specified string set */
-#define ETHTOOL_PHYS_ID	0x0000001c /* identify the NIC */
+#define ETHTOOL_PHYS_ID		0x0000001c /* identify the NIC */
 #define ETHTOOL_GSTATS		0x0000001d /* get NIC-specific statistics */
 #define ETHTOOL_GTSO		0x0000001e /* Get TSO enable (ethtool_value) */
 #define ETHTOOL_STSO		0x0000001f /* Set TSO enable (ethtool_value) */
@@ -621,8 +634,8 @@
 #define ETHTOOL_SGSO		0x00000024 /* Set GSO enable (ethtool_value) */
 #define ETHTOOL_GFLAGS		0x00000025 /* Get flags bitmap(ethtool_value) */
 #define ETHTOOL_SFLAGS		0x00000026 /* Set flags bitmap(ethtool_value) */
-#define ETHTOOL_GPFLAGS	0x00000027 /* Get driver-private flags bitmap */
-#define ETHTOOL_SPFLAGS	0x00000028 /* Set driver-private flags bitmap */
+#define ETHTOOL_GPFLAGS		0x00000027 /* Get driver-private flags bitmap */
+#define ETHTOOL_SPFLAGS		0x00000028 /* Set driver-private flags bitmap */
 
 #define ETHTOOL_GRXFH		0x00000029 /* Get RX flow hash configuration */
 #define ETHTOOL_SRXFH		0x0000002a /* Set RX flow hash configuration */
@@ -639,6 +652,8 @@
 #define ETHTOOL_SRXNTUPLE	0x00000035 /* Add an n-tuple filter to device */
 #define ETHTOOL_GRXNTUPLE	0x00000036 /* Get n-tuple filters from device */
 #define ETHTOOL_GSSET_INFO	0x00000037 /* Get string set info */
+#define ETHTOOL_GRXFHINDIR	0x00000038 /* Get RX flow hash indir'n table */
+#define ETHTOOL_SRXFHINDIR	0x00000039 /* Set RX flow hash indir'n table */
 
 /* compatibility with older code */
 #define SPARC_ETH_GSET		ETHTOOL_GSET
@@ -647,18 +662,18 @@
 /* Indicates what features are supported by the interface. */
 #define SUPPORTED_10baseT_Half		(1 << 0)
 #define SUPPORTED_10baseT_Full		(1 << 1)
-#define SUPPORTED_100baseT_Half	(1 << 2)
-#define SUPPORTED_100baseT_Full	(1 << 3)
+#define SUPPORTED_100baseT_Half		(1 << 2)
+#define SUPPORTED_100baseT_Full		(1 << 3)
 #define SUPPORTED_1000baseT_Half	(1 << 4)
 #define SUPPORTED_1000baseT_Full	(1 << 5)
 #define SUPPORTED_Autoneg		(1 << 6)
 #define SUPPORTED_TP			(1 << 7)
 #define SUPPORTED_AUI			(1 << 8)
 #define SUPPORTED_MII			(1 << 9)
-#define SUPPORTED_FIBRE		(1 << 10)
+#define SUPPORTED_FIBRE			(1 << 10)
 #define SUPPORTED_BNC			(1 << 11)
 #define SUPPORTED_10000baseT_Full	(1 << 12)
-#define SUPPORTED_Pause		(1 << 13)
+#define SUPPORTED_Pause			(1 << 13)
 #define SUPPORTED_Asym_Pause		(1 << 14)
 #define SUPPORTED_2500baseX_Full	(1 << 15)
 #define SUPPORTED_Backplane		(1 << 16)
@@ -668,8 +683,8 @@
 #define SUPPORTED_10000baseR_FEC	(1 << 20)
 
 /* Indicates what features are advertised by the interface. */
-#define ADVERTISED_10baseT_Half	(1 << 0)
-#define ADVERTISED_10baseT_Full	(1 << 1)
+#define ADVERTISED_10baseT_Half		(1 << 0)
+#define ADVERTISED_10baseT_Full		(1 << 1)
 #define ADVERTISED_100baseT_Half	(1 << 2)
 #define ADVERTISED_100baseT_Full	(1 << 3)
 #define ADVERTISED_1000baseT_Half	(1 << 4)
@@ -708,12 +723,12 @@
 #define DUPLEX_FULL		0x01
 
 /* Which connector port. */
-#define PORT_TP		0x00
+#define PORT_TP			0x00
 #define PORT_AUI		0x01
 #define PORT_MII		0x02
 #define PORT_FIBRE		0x03
 #define PORT_BNC		0x04
-#define PORT_DA		0x05
+#define PORT_DA			0x05
 #define PORT_NONE		0xef
 #define PORT_OTHER		0xff
 
@@ -727,7 +742,7 @@
 /* Enable or disable autonegotiation.  If this is set to enable,
  * the forced link modes above are completely ignored.
  */
-#define AUTONEG_DISABLE	0x00
+#define AUTONEG_DISABLE		0x00
 #define AUTONEG_ENABLE		0x01
 
 /* Mode MDI or MDI-X */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 151f5d7..69b43db 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -91,6 +91,54 @@
 #define         BPF_TAX         0x00
 #define         BPF_TXA         0x80
 
+enum {
+	BPF_S_RET_K = 0,
+	BPF_S_RET_A,
+	BPF_S_ALU_ADD_K,
+	BPF_S_ALU_ADD_X,
+	BPF_S_ALU_SUB_K,
+	BPF_S_ALU_SUB_X,
+	BPF_S_ALU_MUL_K,
+	BPF_S_ALU_MUL_X,
+	BPF_S_ALU_DIV_X,
+	BPF_S_ALU_AND_K,
+	BPF_S_ALU_AND_X,
+	BPF_S_ALU_OR_K,
+	BPF_S_ALU_OR_X,
+	BPF_S_ALU_LSH_K,
+	BPF_S_ALU_LSH_X,
+	BPF_S_ALU_RSH_K,
+	BPF_S_ALU_RSH_X,
+	BPF_S_ALU_NEG,
+	BPF_S_LD_W_ABS,
+	BPF_S_LD_H_ABS,
+	BPF_S_LD_B_ABS,
+	BPF_S_LD_W_LEN,
+	BPF_S_LD_W_IND,
+	BPF_S_LD_H_IND,
+	BPF_S_LD_B_IND,
+	BPF_S_LD_IMM,
+	BPF_S_LDX_W_LEN,
+	BPF_S_LDX_B_MSH,
+	BPF_S_LDX_IMM,
+	BPF_S_MISC_TAX,
+	BPF_S_MISC_TXA,
+	BPF_S_ALU_DIV_K,
+	BPF_S_LD_MEM,
+	BPF_S_LDX_MEM,
+	BPF_S_ST,
+	BPF_S_STX,
+	BPF_S_JMP_JA,
+	BPF_S_JMP_JEQ_K,
+	BPF_S_JMP_JEQ_X,
+	BPF_S_JMP_JGE_K,
+	BPF_S_JMP_JGE_X,
+	BPF_S_JMP_JGT_K,
+	BPF_S_JMP_JGT_X,
+	BPF_S_JMP_JSET_K,
+	BPF_S_JMP_JSET_X,
+};
+
 #ifndef BPF_MAXINSNS
 #define BPF_MAXINSNS 4096
 #endif
diff --git a/include/linux/if.h b/include/linux/if.h
index be350e6..53558ec 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -73,6 +73,8 @@
 #define IFF_DONT_BRIDGE 0x800		/* disallow bridging this ether dev */
 #define IFF_IN_NETPOLL	0x1000		/* whether we are processing netpoll */
 #define IFF_DISABLE_NETPOLL	0x2000	/* disable netpoll at run-time */
+#define IFF_MACVLAN_PORT	0x4000	/* device used as macvlan port */
+#define IFF_BRIDGE_PORT	0x8000		/* device used as bridge port */
 
 #define IF_GET_IFACE	0x0001		/* for querying only */
 #define IF_GET_PROTO	0x0002
diff --git a/include/linux/if_bonding.h b/include/linux/if_bonding.h
index cd525fa..2c799437 100644
--- a/include/linux/if_bonding.h
+++ b/include/linux/if_bonding.h
@@ -83,6 +83,7 @@
 
 #define BOND_DEFAULT_MAX_BONDS  1   /* Default maximum number of devices to support */
 
+#define BOND_DEFAULT_TX_QUEUES 16   /* Default number of tx queues per device */
 /* hashing types */
 #define BOND_XMIT_POLICY_LAYER2		0 /* layer 2 (MAC only), default */
 #define BOND_XMIT_POLICY_LAYER34	1 /* layer 3+4 (IP ^ (TCP || UDP)) */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 938b7e8..0d241a5 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -102,8 +102,6 @@
 #include <linux/netdevice.h>
 
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-extern struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
-					       struct sk_buff *skb);
 extern int (*br_should_route_hook)(struct sk_buff *skb);
 
 #endif
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index bed7a46..c831467 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -119,7 +119,7 @@
 	unsigned char	h_dest[ETH_ALEN];	/* destination eth addr	*/
 	unsigned char	h_source[ETH_ALEN];	/* source ether addr	*/
 	__be16		h_proto;		/* packet type ID field	*/
-} __attribute__((packed));
+} __packed;
 
 #ifdef __KERNEL__
 #include <linux/skbuff.h>
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h
index 5459c5c..9947c39 100644
--- a/include/linux/if_fddi.h
+++ b/include/linux/if_fddi.h
@@ -67,7 +67,7 @@
 	__u8	dsap;					/* destination service access point */
 	__u8	ssap;					/* source service access point */
 	__u8	ctrl;					/* control byte #1 */
-} __attribute__ ((packed));
+} __packed;
 
 /* Define 802.2 Type 2 header */
 struct fddi_8022_2_hdr {
@@ -75,7 +75,7 @@
 	__u8	ssap;					/* source service access point */
 	__u8	ctrl_1;					/* control byte #1 */
 	__u8	ctrl_2;					/* control byte #2 */
-} __attribute__ ((packed));
+} __packed;
 
 /* Define 802.2 SNAP header */
 #define FDDI_K_OUI_LEN	3
@@ -85,7 +85,7 @@
 	__u8	ctrl;					/* always 0x03 */
 	__u8	oui[FDDI_K_OUI_LEN];	/* organizational universal id */
 	__be16	ethertype;				/* packet type ID field */
-} __attribute__ ((packed));
+} __packed;
 
 /* Define FDDI LLC frame header */
 struct fddihdr {
@@ -98,7 +98,7 @@
 		struct fddi_8022_2_hdr		llc_8022_2;
 		struct fddi_snap_hdr		llc_snap;
 		} hdr;
-} __attribute__ ((packed));
+} __packed;
 
 #ifdef __KERNEL__
 #include <linux/netdevice.h>
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 80b3a10..191ee08 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -135,7 +135,7 @@
    __be16 PID;
 
 #define IP_NLPID pad 
-} __attribute__((packed));
+} __packed;
 
 /* see RFC 1490 for the definition of the following */
 #define FRAD_I_UI		0x03
diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h
index 8d038eb..5fe5f30 100644
--- a/include/linux/if_hippi.h
+++ b/include/linux/if_hippi.h
@@ -104,7 +104,7 @@
 	__be32		fixed;
 #endif
 	__be32		d2_size;
-} __attribute__ ((packed));
+} __packed;
 
 struct hippi_le_hdr {
 #if defined (__BIG_ENDIAN_BITFIELD)
@@ -129,7 +129,7 @@
 	__u8		daddr[HIPPI_ALEN];
 	__u16		locally_administered;
 	__u8		saddr[HIPPI_ALEN];
-} __attribute__ ((packed));
+} __packed;
 
 #define HIPPI_OUI_LEN	3
 /*
@@ -142,12 +142,12 @@
 	__u8	ctrl;			/* always 0x03 */
 	__u8	oui[HIPPI_OUI_LEN];	/* organizational universal id (zero)*/
 	__be16	ethertype;		/* packet type ID field */
-} __attribute__ ((packed));
+} __packed;
 
 struct hippi_hdr {
 	struct hippi_fp_hdr	fp;
 	struct hippi_le_hdr	le;
 	struct hippi_snap_hdr	snap;
-} __attribute__ ((packed));
+} __packed;
 
 #endif	/* _LINUX_IF_HIPPI_H */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 85c812d..7fcad2e 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -4,7 +4,7 @@
 #include <linux/types.h>
 #include <linux/netlink.h>
 
-/* The struct should be in sync with struct net_device_stats */
+/* This struct should be in sync with struct rtnl_link_stats64 */
 struct rtnl_link_stats {
 	__u32	rx_packets;		/* total packets received	*/
 	__u32	tx_packets;		/* total packets transmitted	*/
@@ -37,6 +37,7 @@
 	__u32	tx_compressed;
 };
 
+/* The main device statistics structure */
 struct rtnl_link_stats64 {
 	__u64	rx_packets;		/* total packets received	*/
 	__u64	tx_packets;		/* total packets transmitted	*/
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 9ea047a..e24ce6e 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -6,6 +6,7 @@
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <net/netlink.h>
+#include <linux/u64_stats_sync.h>
 
 #if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE)
 struct socket *macvtap_get_socket(struct file *);
@@ -27,14 +28,16 @@
  *	struct macvlan_rx_stats - MACVLAN percpu rx stats
  *	@rx_packets: number of received packets
  *	@rx_bytes: number of received bytes
- *	@multicast: number of received multicast packets
+ *	@rx_multicast: number of received multicast packets
+ *	@syncp: synchronization point for 64bit counters
  *	@rx_errors: number of errors
  */
 struct macvlan_rx_stats {
-	unsigned long rx_packets;
-	unsigned long rx_bytes;
-	unsigned long multicast;
-	unsigned long rx_errors;
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			rx_multicast;
+	struct u64_stats_sync	syncp;
+	unsigned long		rx_errors;
 };
 
 struct macvlan_dev {
@@ -56,12 +59,14 @@
 {
 	struct macvlan_rx_stats *rx_stats;
 
-	rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
+	rx_stats = this_cpu_ptr(vlan->rx_stats);
 	if (likely(success)) {
+		u64_stats_update_begin(&rx_stats->syncp);
 		rx_stats->rx_packets++;;
 		rx_stats->rx_bytes += len;
 		if (multicast)
-			rx_stats->multicast++;
+			rx_stats->rx_multicast++;
+		u64_stats_update_end(&rx_stats->syncp);
 	} else {
 		rx_stats->rx_errors++;
 	}
@@ -84,8 +89,4 @@
 extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 				      struct net_device *dev);
 
-
-extern struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *,
-						    struct sk_buff *);
-
 #endif /* _LINUX_IF_MACVLAN_H */
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 6ac23ef..72bfa5a 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -48,6 +48,7 @@
 #define PACKET_LOSS			14
 #define PACKET_VNET_HDR			15
 #define PACKET_TX_TIMESTAMP		16
+#define PACKET_TIMESTAMP		17
 
 struct tpacket_stats {
 	unsigned int	tp_packets;
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index a6577af..1925e0c 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -59,7 +59,7 @@
        union{ 
                struct pppoe_addr       pppoe; 
        }sa_addr; 
-}__attribute__ ((packed)); 
+} __packed;
 
 /* The use of the above union isn't viable because the size of this
  * struct must stay fixed over time -- applications use sizeof(struct
@@ -70,7 +70,7 @@
 	sa_family_t     sa_family;      /* address family, AF_PPPOX */
 	unsigned int    sa_protocol;    /* protocol identifier */
 	struct pppol2tp_addr pppol2tp;
-}__attribute__ ((packed));
+} __packed;
 
 /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
  * bits. So we need a different sockaddr structure.
@@ -79,7 +79,7 @@
 	sa_family_t     sa_family;      /* address family, AF_PPPOX */
 	unsigned int    sa_protocol;    /* protocol identifier */
 	struct pppol2tpv3_addr pppol2tp;
-} __attribute__ ((packed));
+} __packed;
 
 /*********************************************************************
  *
@@ -129,7 +129,7 @@
 	__be16 sid;
 	__be16 length;
 	struct pppoe_tag tag[0];
-} __attribute__ ((packed));
+} __packed;
 
 /* Length of entire PPPoE + PPP header */
 #define PPPOE_SES_HLEN	8
diff --git a/include/linux/in.h b/include/linux/in.h
index 583c76f..41d88a4 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -85,6 +85,7 @@
 #define IP_RECVORIGDSTADDR   IP_ORIGDSTADDR
 
 #define IP_MINTTL       21
+#define IP_NODEFRAG     22
 
 /* IP_MTU_DISCOVER values */
 #define IP_PMTUDISC_DONT		0	/* Never send DF frames */
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index dfc1703..9708de2 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -19,6 +19,7 @@
  */
 #define IP_VS_SVC_F_PERSISTENT	0x0001		/* persistent port */
 #define IP_VS_SVC_F_HASHED	0x0002		/* hashed entry */
+#define IP_VS_SVC_F_ONEPACKET	0x0004		/* one-packet scheduling */
 
 /*
  *      Destination Server Flags
@@ -85,6 +86,7 @@
 #define IP_VS_CONN_F_SEQ_MASK	0x0600		/* in/out sequence mask */
 #define IP_VS_CONN_F_NO_CPORT	0x0800		/* no client port set yet */
 #define IP_VS_CONN_F_TEMPLATE	0x1000		/* template, not connection */
+#define IP_VS_CONN_F_ONE_PACKET	0x2000		/* forward only one packet */
 
 #define IP_VS_SCHEDNAME_MAXLEN	16
 #define IP_VS_IFNAME_MAXLEN	16
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 99e1ab7..940e215 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -58,7 +58,7 @@
 	/* 
 	 * TLV encoded option data follows.
 	 */
-} __attribute__ ((packed));	/* required for some archs */
+} __packed;	/* required for some archs */
 
 #define ipv6_destopt_hdr ipv6_opt_hdr
 #define ipv6_hopopt_hdr  ipv6_opt_hdr
@@ -99,7 +99,7 @@
 	__u8			type;
 	__u8			length;
 	struct in6_addr		addr;
-} __attribute__ ((__packed__));
+} __packed;
 
 /*
  *	IPv6 fixed header
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h
index b9b5a68..b8c23f8 100644
--- a/include/linux/isdnif.h
+++ b/include/linux/isdnif.h
@@ -317,7 +317,7 @@
 	__u8 r_scantime;
 	__u8 r_id[FAXIDLEN];
 	__u8 r_code;
-} __attribute__((packed)) T30_s;
+} __packed T30_s;
 
 #define ISDN_TTY_FAX_CONN_IN	0
 #define ISDN_TTY_FAX_CONN_OUT	1
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 8317ec4..01dfc05 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -171,6 +171,11 @@
 }
 #endif
 
+struct va_format {
+	const char *fmt;
+	va_list *va;
+};
+
 extern struct atomic_notifier_head panic_notifier_list;
 extern long (*panic_blink)(long time);
 NORET_TYPE void panic(const char * fmt, ...)
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 78c3bed..b5e7f22 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -251,7 +251,7 @@
 struct mISDNhead {
 	unsigned int	prim;
 	unsigned int	id;
-}  __attribute__((packed));
+}  __packed;
 
 #define MISDN_HEADER_LEN	sizeof(struct mISDNhead)
 #define MAX_DATA_SIZE		2048
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index 155719d..bb58854 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -88,7 +88,7 @@
 	char handle[8];
 	__be64 from;
 	__be32 len;
-} __attribute__ ((packed));
+} __packed;
 
 /*
  * This is the reply packet that nbd-server sends back to the client after
diff --git a/include/linux/ncp.h b/include/linux/ncp.h
index 99f0ade..3ace837 100644
--- a/include/linux/ncp.h
+++ b/include/linux/ncp.h
@@ -27,7 +27,7 @@
 	__u8 conn_high;
 	__u8 function;
 	__u8 data[0];
-} __attribute__((packed));
+} __packed;
 
 #define NCP_REPLY                (0x3333)
 #define NCP_WATCHDOG		 (0x3E3E)
@@ -42,7 +42,7 @@
 	__u8 completion_code;
 	__u8 connection_state;
 	__u8 data[0];
-} __attribute__((packed));
+} __packed;
 
 #define NCP_VOLNAME_LEN (16)
 #define NCP_NUMBER_OF_VOLUMES (256)
@@ -158,7 +158,7 @@
 #ifdef __KERNEL__
 	struct nw_nfs_info nfs;
 #endif
-} __attribute__((packed));
+} __packed;
 
 /* modify mask - use with MODIFY_DOS_INFO structure */
 #define DM_ATTRIBUTES		  (cpu_to_le32(0x02))
@@ -190,12 +190,12 @@
 	__u16 inheritanceGrantMask;
 	__u16 inheritanceRevokeMask;
 	__u32 maximumSpace;
-} __attribute__((packed));
+} __packed;
 
 struct nw_search_sequence {
 	__u8 volNumber;
 	__u32 dirBase;
 	__u32 sequence;
-} __attribute__((packed));
+} __packed;
 
 #endif				/* _LINUX_NCP_H */
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index 5ec9ca6..8da05bc 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -104,13 +104,13 @@
 
 		unsigned int state;		/* STREAM only: receiver state */
 		struct {
-			__u32 magic __attribute__((packed));
-			__u32 len __attribute__((packed));
-			__u16 type __attribute__((packed));
-			__u16 p1 __attribute__((packed));
-			__u16 p2 __attribute__((packed));
-			__u16 p3 __attribute__((packed));
-			__u16 type2 __attribute__((packed));
+			__u32 magic __packed;
+			__u32 len __packed;
+			__u16 type __packed;
+			__u16 p1 __packed;
+			__u16 p2 __packed;
+			__u16 p3 __packed;
+			__u16 type2 __packed;
 		} buf;				/* STREAM only: temporary buffer */
 		unsigned char* ptr;		/* STREAM only: pointer to data */
 		size_t len;			/* STREAM only: length of data to receive */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b21e405..8018f6b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -159,45 +159,49 @@
 #define MAX_HEADER (LL_MAX_HEADER + 48)
 #endif
 
-#endif  /*  __KERNEL__  */
-
 /*
- *	Network device statistics. Akin to the 2.0 ether stats but
- *	with byte counters.
+ *	Old network device statistics. Fields are native words
+ *	(unsigned long) so they can be read and written atomically.
+ *	Each field is padded to 64 bits for compatibility with
+ *	rtnl_link_stats64.
  */
 
+#if BITS_PER_LONG == 64
+#define NET_DEVICE_STATS_DEFINE(name)	unsigned long name
+#elif defined(__LITTLE_ENDIAN)
+#define NET_DEVICE_STATS_DEFINE(name)	unsigned long name, pad_ ## name
+#else
+#define NET_DEVICE_STATS_DEFINE(name)	unsigned long pad_ ## name, name
+#endif
+
 struct net_device_stats {
-	unsigned long	rx_packets;		/* total packets received	*/
-	unsigned long	tx_packets;		/* total packets transmitted	*/
-	unsigned long	rx_bytes;		/* total bytes received 	*/
-	unsigned long	tx_bytes;		/* total bytes transmitted	*/
-	unsigned long	rx_errors;		/* bad packets received		*/
-	unsigned long	tx_errors;		/* packet transmit problems	*/
-	unsigned long	rx_dropped;		/* no space in linux buffers	*/
-	unsigned long	tx_dropped;		/* no space available in linux	*/
-	unsigned long	multicast;		/* multicast packets received	*/
-	unsigned long	collisions;
-
-	/* detailed rx_errors: */
-	unsigned long	rx_length_errors;
-	unsigned long	rx_over_errors;		/* receiver ring buff overflow	*/
-	unsigned long	rx_crc_errors;		/* recved pkt with crc error	*/
-	unsigned long	rx_frame_errors;	/* recv'd frame alignment error */
-	unsigned long	rx_fifo_errors;		/* recv'r fifo overrun		*/
-	unsigned long	rx_missed_errors;	/* receiver missed packet	*/
-
-	/* detailed tx_errors */
-	unsigned long	tx_aborted_errors;
-	unsigned long	tx_carrier_errors;
-	unsigned long	tx_fifo_errors;
-	unsigned long	tx_heartbeat_errors;
-	unsigned long	tx_window_errors;
-	
-	/* for cslip etc */
-	unsigned long	rx_compressed;
-	unsigned long	tx_compressed;
+	NET_DEVICE_STATS_DEFINE(rx_packets);
+	NET_DEVICE_STATS_DEFINE(tx_packets);
+	NET_DEVICE_STATS_DEFINE(rx_bytes);
+	NET_DEVICE_STATS_DEFINE(tx_bytes);
+	NET_DEVICE_STATS_DEFINE(rx_errors);
+	NET_DEVICE_STATS_DEFINE(tx_errors);
+	NET_DEVICE_STATS_DEFINE(rx_dropped);
+	NET_DEVICE_STATS_DEFINE(tx_dropped);
+	NET_DEVICE_STATS_DEFINE(multicast);
+	NET_DEVICE_STATS_DEFINE(collisions);
+	NET_DEVICE_STATS_DEFINE(rx_length_errors);
+	NET_DEVICE_STATS_DEFINE(rx_over_errors);
+	NET_DEVICE_STATS_DEFINE(rx_crc_errors);
+	NET_DEVICE_STATS_DEFINE(rx_frame_errors);
+	NET_DEVICE_STATS_DEFINE(rx_fifo_errors);
+	NET_DEVICE_STATS_DEFINE(rx_missed_errors);
+	NET_DEVICE_STATS_DEFINE(tx_aborted_errors);
+	NET_DEVICE_STATS_DEFINE(tx_carrier_errors);
+	NET_DEVICE_STATS_DEFINE(tx_fifo_errors);
+	NET_DEVICE_STATS_DEFINE(tx_heartbeat_errors);
+	NET_DEVICE_STATS_DEFINE(tx_window_errors);
+	NET_DEVICE_STATS_DEFINE(rx_compressed);
+	NET_DEVICE_STATS_DEFINE(tx_compressed);
 };
 
+#endif  /*  __KERNEL__  */
+
 
 /* Media selection options. */
 enum {
@@ -381,6 +385,8 @@
 };
 typedef enum gro_result gro_result_t;
 
+typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb);
+
 extern void __napi_schedule(struct napi_struct *n);
 
 static inline int napi_disable_pending(struct napi_struct *n)
@@ -660,10 +666,20 @@
  *	Callback uses when the transmitter has not made any progress
  *	for dev->watchdog ticks.
  *
+ * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev
+ *                      struct rtnl_link_stats64 *storage);
  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  *	Called when a user wants to get the network device usage
- *	statistics. If not defined, the counters in dev->stats will
- *	be used.
+ *	statistics. Drivers must do one of the following:
+ *	1. Define @ndo_get_stats64 to update a rtnl_link_stats64 structure
+ *	   (which should normally be dev->stats64) and return a ponter to
+ *	   it. The structure must not be changed asynchronously.
+ *	2. Define @ndo_get_stats to update a net_device_stats structure
+ *	   (which should normally be dev->stats) and return a pointer to
+ *	   it. The structure may be changed asynchronously only if each
+ *	   field is written atomically.
+ *	3. Update dev->stats asynchronously and atomically, and define
+ *	   neither operation.
  *
  * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
  *	If device support VLAN receive accleration
@@ -718,6 +734,8 @@
 						   struct neigh_parms *);
 	void			(*ndo_tx_timeout) (struct net_device *dev);
 
+	struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
+						     struct rtnl_link_stats64 *storage);
 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 
 	void			(*ndo_vlan_rx_register)(struct net_device *dev,
@@ -728,6 +746,8 @@
 						        unsigned short vid);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	void                    (*ndo_poll_controller)(struct net_device *dev);
+	int			(*ndo_netpoll_setup)(struct net_device *dev,
+						     struct netpoll_info *info);
 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
 #endif
 	int			(*ndo_set_vf_mac)(struct net_device *dev,
@@ -847,7 +867,8 @@
 #define NETIF_F_FSO		(SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
 
 	/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+#define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | \
+				 NETIF_F_TSO6 | NETIF_F_UFO)
 
 
 #define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
@@ -867,7 +888,10 @@
 	int			ifindex;
 	int			iflink;
 
-	struct net_device_stats	stats;
+	union {
+		struct rtnl_link_stats64 stats64;
+		struct net_device_stats stats;
+	};
 
 #ifdef CONFIG_WIRELESS_EXT
 	/* List of functions to handle Wireless Extensions (instead of ioctl).
@@ -957,6 +981,8 @@
 #endif
 
 	struct netdev_queue	rx_queue;
+	rx_handler_func_t	*rx_handler;
+	void			*rx_handler_data;
 
 	struct netdev_queue	*_tx ____cacheline_aligned_in_smp;
 
@@ -1024,10 +1050,6 @@
 	/* mid-layer private */
 	void			*ml_priv;
 
-	/* bridge stuff */
-	struct net_bridge_port	*br_port;
-	/* macvlan */
-	struct macvlan_port	*macvlan_port;
 	/* GARP */
 	struct garp_port	*garp_port;
 
@@ -1087,11 +1109,7 @@
 static inline
 struct net *dev_net(const struct net_device *dev)
 {
-#ifdef CONFIG_NET_NS
-	return dev->nd_net;
-#else
-	return &init_net;
-#endif
+	return read_pnet(&dev->nd_net);
 }
 
 static inline
@@ -1272,8 +1290,8 @@
 extern void		dev_remove_pack(struct packet_type *pt);
 extern void		__dev_remove_pack(struct packet_type *pt);
 
-extern struct net_device	*dev_get_by_flags(struct net *net, unsigned short flags,
-						  unsigned short mask);
+extern struct net_device	*dev_get_by_flags_rcu(struct net *net, unsigned short flags,
+						      unsigned short mask);
 extern struct net_device	*dev_get_by_name(struct net *net, const char *name);
 extern struct net_device	*dev_get_by_name_rcu(struct net *net, const char *name);
 extern struct net_device	*__dev_get_by_name(struct net *net, const char *name);
@@ -1696,6 +1714,11 @@
 	napi->skb = NULL;
 }
 
+extern int netdev_rx_handler_register(struct net_device *dev,
+				      rx_handler_func_t *rx_handler,
+				      void *rx_handler_data);
+extern void netdev_rx_handler_unregister(struct net_device *dev);
+
 extern void		netif_nit_deliver(struct sk_buff *skb);
 extern int		dev_valid_name(const char *name);
 extern int		dev_ioctl(struct net *net, unsigned int cmd, void __user *);
@@ -1775,6 +1798,8 @@
 
 extern void netif_carrier_off(struct net_device *dev);
 
+extern void netif_notify_peers(struct net_device *dev);
+
 /**
  *	netif_dormant_on - mark device as dormant.
  *	@dev: network device
@@ -2119,8 +2144,10 @@
 /* Load a device via the kmod */
 extern void		dev_load(struct net *net, const char *name);
 extern void		dev_mcast_init(void);
-extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
-extern void		dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
+extern const struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+						     struct rtnl_link_stats64 *storage);
+extern void		dev_txq_stats_fold(const struct net_device *dev,
+					   struct net_device_stats *stats);
 
 extern int		netdev_max_backlog;
 extern int		netdev_tstamp_prequeue;
@@ -2230,25 +2257,23 @@
 	return dev->name;
 }
 
-#define netdev_printk(level, netdev, format, args...)		\
-	dev_printk(level, (netdev)->dev.parent,			\
-		   "%s: " format,				\
-		   netdev_name(netdev), ##args)
-
-#define netdev_emerg(dev, format, args...)			\
-	netdev_printk(KERN_EMERG, dev, format, ##args)
-#define netdev_alert(dev, format, args...)			\
-	netdev_printk(KERN_ALERT, dev, format, ##args)
-#define netdev_crit(dev, format, args...)			\
-	netdev_printk(KERN_CRIT, dev, format, ##args)
-#define netdev_err(dev, format, args...)			\
-	netdev_printk(KERN_ERR, dev, format, ##args)
-#define netdev_warn(dev, format, args...)			\
-	netdev_printk(KERN_WARNING, dev, format, ##args)
-#define netdev_notice(dev, format, args...)			\
-	netdev_printk(KERN_NOTICE, dev, format, ##args)
-#define netdev_info(dev, format, args...)			\
-	netdev_printk(KERN_INFO, dev, format, ##args)
+extern int netdev_printk(const char *level, const struct net_device *dev,
+			 const char *format, ...)
+	__attribute__ ((format (printf, 3, 4)));
+extern int netdev_emerg(const struct net_device *dev, const char *format, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int netdev_alert(const struct net_device *dev, const char *format, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int netdev_crit(const struct net_device *dev, const char *format, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int netdev_err(const struct net_device *dev, const char *format, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int netdev_warn(const struct net_device *dev, const char *format, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int netdev_notice(const struct net_device *dev, const char *format, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern int netdev_info(const struct net_device *dev, const char *format, ...)
+	__attribute__ ((format (printf, 2, 3)));
 
 #if defined(DEBUG)
 #define netdev_dbg(__dev, format, args...)			\
@@ -2296,20 +2321,26 @@
 		netdev_printk(level, (dev), fmt, ##args);	\
 } while (0)
 
+#define netif_level(level, priv, type, dev, fmt, args...)	\
+do {								\
+	if (netif_msg_##type(priv))				\
+		netdev_##level(dev, fmt, ##args);		\
+} while (0)
+
 #define netif_emerg(priv, type, dev, fmt, args...)		\
-	netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args)
+	netif_level(emerg, priv, type, dev, fmt, ##args)
 #define netif_alert(priv, type, dev, fmt, args...)		\
-	netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args)
+	netif_level(alert, priv, type, dev, fmt, ##args)
 #define netif_crit(priv, type, dev, fmt, args...)		\
-	netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args)
+	netif_level(crit, priv, type, dev, fmt, ##args)
 #define netif_err(priv, type, dev, fmt, args...)		\
-	netif_printk(priv, type, KERN_ERR, dev, fmt, ##args)
+	netif_level(err, priv, type, dev, fmt, ##args)
 #define netif_warn(priv, type, dev, fmt, args...)		\
-	netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args)
+	netif_level(warn, priv, type, dev, fmt, ##args)
 #define netif_notice(priv, type, dev, fmt, args...)		\
-	netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args)
+	netif_level(notice, priv, type, dev, fmt, ##args)
 #define netif_info(priv, type, dev, fmt, args...)		\
-	netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args)
+	netif_level(info, priv, type, dev, fmt, ##args)
 
 #if defined(DEBUG)
 #define netif_dbg(priv, type, dev, format, args...)		\
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 48767cd..bb103f4 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -8,6 +8,7 @@
 header-y += xt_CONNSECMARK.h
 header-y += xt_CT.h
 header-y += xt_DSCP.h
+header-y += xt_IDLETIMER.h
 header-y += xt_LED.h
 header-y += xt_MARK.h
 header-y += xt_NFLOG.h
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 14e6d32..1afd18c 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -76,6 +76,10 @@
 	/* Conntrack is a template */
 	IPS_TEMPLATE_BIT = 11,
 	IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT),
+
+	/* Conntrack is a fake untracked entry */
+	IPS_UNTRACKED_BIT = 12,
+	IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
 };
 
 /* Connection tracking event types */
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
index d3bab7a..1d0b84a 100644
--- a/include/linux/netfilter/nfnetlink_log.h
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -89,6 +89,7 @@
 #define NFULNL_COPY_NONE	0x00
 #define NFULNL_COPY_META	0x01
 #define NFULNL_COPY_PACKET	0x02
+#define NFULNL_COPY_DISABLED	0x03
 
 #define NFULNL_CFG_F_SEQ	0x0001
 #define NFULNL_CFG_F_SEQ_GLOBAL	0x0002
diff --git a/include/linux/netfilter/xt_IDLETIMER.h b/include/linux/netfilter/xt_IDLETIMER.h
new file mode 100644
index 0000000..3e1aa1b
--- /dev/null
+++ b/include/linux/netfilter/xt_IDLETIMER.h
@@ -0,0 +1,45 @@
+/*
+ * linux/include/linux/netfilter/xt_IDLETIMER.h
+ *
+ * Header file for Xtables timer target module.
+ *
+ * Copyright (C) 2004, 2010 Nokia Corporation
+ * Written by Timo Teras <ext-timo.teras@nokia.com>
+ *
+ * Converted to x_tables and forward-ported to 2.6.34
+ * by Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef _XT_IDLETIMER_H
+#define _XT_IDLETIMER_H
+
+#include <linux/types.h>
+
+#define MAX_IDLETIMER_LABEL_SIZE 28
+
+struct idletimer_tg_info {
+	__u32 timeout;
+
+	char label[MAX_IDLETIMER_LABEL_SIZE];
+
+	/* for kernel module internal use only */
+	struct idletimer_tg *timer __attribute((aligned(8)));
+};
+
+#endif
diff --git a/include/linux/netfilter_ipv4/ipt_LOG.h b/include/linux/netfilter_ipv4/ipt_LOG.h
index 90fa652..dcdbadf 100644
--- a/include/linux/netfilter_ipv4/ipt_LOG.h
+++ b/include/linux/netfilter_ipv4/ipt_LOG.h
@@ -7,7 +7,8 @@
 #define IPT_LOG_IPOPT		0x04	/* Log IP options */
 #define IPT_LOG_UID		0x08	/* Log UID owning local socket */
 #define IPT_LOG_NFLOG		0x10	/* Unsupported, don't reuse */
-#define IPT_LOG_MASK		0x1f
+#define IPT_LOG_MACDECODE	0x20	/* Decode MAC header */
+#define IPT_LOG_MASK		0x2f
 
 struct ipt_log_info {
 	unsigned char level;
diff --git a/include/linux/netfilter_ipv6/ip6t_LOG.h b/include/linux/netfilter_ipv6/ip6t_LOG.h
index 0d0119b..9dd5579 100644
--- a/include/linux/netfilter_ipv6/ip6t_LOG.h
+++ b/include/linux/netfilter_ipv6/ip6t_LOG.h
@@ -7,7 +7,8 @@
 #define IP6T_LOG_IPOPT		0x04	/* Log IP options */
 #define IP6T_LOG_UID		0x08	/* Log UID owning local socket */
 #define IP6T_LOG_NFLOG		0x10	/* Unsupported, don't use */
-#define IP6T_LOG_MASK		0x1f
+#define IP6T_LOG_MACDECODE	0x20	/* Decode MAC header */
+#define IP6T_LOG_MASK		0x2f
 
 struct ip6t_log_info {
 	unsigned char level;
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e9e2312..413742c 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -46,9 +46,11 @@
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
 void netpoll_print_options(struct netpoll *np);
 int netpoll_parse_options(struct netpoll *np, char *opt);
+int __netpoll_setup(struct netpoll *np);
 int netpoll_setup(struct netpoll *np);
 int netpoll_trap(void);
 void netpoll_set_trap(int trap);
+void __netpoll_cleanup(struct netpoll *np);
 void netpoll_cleanup(struct netpoll *np);
 int __netpoll_rx(struct sk_buff *skb);
 void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
@@ -57,12 +59,15 @@
 #ifdef CONFIG_NETPOLL
 static inline bool netpoll_rx(struct sk_buff *skb)
 {
-	struct netpoll_info *npinfo = skb->dev->npinfo;
+	struct netpoll_info *npinfo;
 	unsigned long flags;
 	bool ret = false;
 
+	rcu_read_lock_bh();
+	npinfo = rcu_dereference_bh(skb->dev->npinfo);
+
 	if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
-		return false;
+		goto out;
 
 	spin_lock_irqsave(&npinfo->rx_lock, flags);
 	/* check rx_flags again with the lock held */
@@ -70,12 +75,14 @@
 		ret = true;
 	spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 
+out:
+	rcu_read_unlock_bh();
 	return ret;
 }
 
 static inline int netpoll_rx_on(struct sk_buff *skb)
 {
-	struct netpoll_info *npinfo = skb->dev->npinfo;
+	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
 
 	return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
 }
@@ -91,7 +98,6 @@
 {
 	struct net_device *dev = napi->dev;
 
-	rcu_read_lock(); /* deal with race on ->npinfo */
 	if (dev && dev->npinfo) {
 		spin_lock(&napi->poll_lock);
 		napi->poll_owner = smp_processor_id();
@@ -108,7 +114,11 @@
 		napi->poll_owner = -1;
 		spin_unlock(&napi->poll_lock);
 	}
-	rcu_read_unlock();
+}
+
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+	return irqs_disabled();
 }
 
 #else
@@ -134,6 +144,10 @@
 static inline void netpoll_netdev_init(struct net_device *dev)
 {
 }
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+	return 0;
+}
 #endif
 
 #endif
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index b7c77f9..2c87016 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -132,7 +132,7 @@
  * 	%NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and
  * 	%NL80211_ATTR_REG_RULE_POWER_MAX_EIRP.
  * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain
- * 	to the the specified ISO/IEC 3166-1 alpha2 country code. The core will
+ * 	to the specified ISO/IEC 3166-1 alpha2 country code. The core will
  * 	store this as a valid request and then query userspace for it.
  *
  * @NL80211_CMD_GET_MESH_PARAMS: Get mesh networking properties for the
@@ -725,6 +725,12 @@
  * @NL80211_ATTR_AP_ISOLATE: (AP mode) Do not forward traffic between stations
  *	connected to this BSS.
  *
+ * @NL80211_ATTR_WIPHY_TX_POWER_SETTING: Transmit power setting type. See
+ *      &enum nl80211_tx_power_setting for possible values.
+ * @NL80211_ATTR_WIPHY_TX_POWER_LEVEL: Transmit power level in signed mBm units.
+ *      This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING
+ *      for non-automatic settings.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -882,6 +888,9 @@
 
 	NL80211_ATTR_AP_ISOLATE,
 
+	NL80211_ATTR_WIPHY_TX_POWER_SETTING,
+	NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -1659,4 +1668,17 @@
 	NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
 };
 
+
+/**
+ * enum nl80211_tx_power_setting - TX power adjustment
+ * @NL80211_TX_POWER_AUTOMATIC: automatically determine transmit power
+ * @NL80211_TX_POWER_LIMITED: limit TX power by the mBm parameter
+ * @NL80211_TX_POWER_FIXED: fix TX power to the mBm parameter
+ */
+enum nl80211_tx_power_setting {
+	NL80211_TX_POWER_AUTOMATIC,
+	NL80211_TX_POWER_LIMITED,
+	NL80211_TX_POWER_FIXED,
+};
+
 #endif /* __LINUX_NL80211_H */
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 540703b..b2f1a4d 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -210,6 +210,7 @@
 #define NETDEV_POST_INIT	0x0010
 #define NETDEV_UNREGISTER_BATCH 0x0011
 #define NETDEV_BONDING_DESLAVE  0x0012
+#define NETDEV_NOTIFY_PEERS	0x0013
 
 #define SYS_DOWN	0x0001	/* Notify of system down */
 #define SYS_RESTART	SYS_DOWN
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index e5126cf..24426c3 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -56,7 +56,7 @@
 	__be16	pn_length;
 	__u8	pn_robj;
 	__u8	pn_sobj;
-} __attribute__((packed));
+} __packed;
 
 /* Common Phonet payload header */
 struct phonetmsg {
@@ -98,7 +98,7 @@
 	__u8 spn_dev;
 	__u8 spn_resource;
 	__u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
-} __attribute__ ((packed));
+} __packed;
 
 /* Well known address */
 #define PN_DEV_PC	0x10
diff --git a/include/linux/rds.h b/include/linux/rds.h
index cab4994..24bce3d 100644
--- a/include/linux/rds.h
+++ b/include/linux/rds.h
@@ -100,7 +100,7 @@
 struct rds_info_counter {
 	u_int8_t	name[32];
 	u_int64_t	value;
-} __attribute__((packed));
+} __packed;
 
 #define RDS_INFO_CONNECTION_FLAG_SENDING	0x01
 #define RDS_INFO_CONNECTION_FLAG_CONNECTING	0x02
@@ -115,7 +115,7 @@
 	__be32		faddr;
 	u_int8_t	transport[TRANSNAMSIZ];		/* null term ascii */
 	u_int8_t	flags;
-} __attribute__((packed));
+} __packed;
 
 struct rds_info_flow {
 	__be32		laddr;
@@ -123,7 +123,7 @@
 	u_int32_t	bytes;
 	__be16		lport;
 	__be16		fport;
-} __attribute__((packed));
+} __packed;
 
 #define RDS_INFO_MESSAGE_FLAG_ACK               0x01
 #define RDS_INFO_MESSAGE_FLAG_FAST_ACK          0x02
@@ -136,7 +136,7 @@
 	__be16		lport;
 	__be16		fport;
 	u_int8_t	flags;
-} __attribute__((packed));
+} __packed;
 
 struct rds_info_socket {
 	u_int32_t	sndbuf;
@@ -146,7 +146,7 @@
 	__be16		connected_port;
 	u_int32_t	rcvbuf;
 	u_int64_t	inum;
-} __attribute__((packed));
+} __packed;
 
 struct rds_info_tcp_socket {
 	__be32          local_addr;
@@ -158,7 +158,7 @@
 	u_int32_t       last_sent_nxt;
 	u_int32_t       last_expected_una;
 	u_int32_t       last_seen_una;
-} __attribute__((packed));
+} __packed;
 
 #define RDS_IB_GID_LEN	16
 struct rds_info_rdma_connection {
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index c20d3ce..c11a287 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -61,7 +61,7 @@
 	__be16 dest;
 	__be32 vtag;
 	__le32 checksum;
-} __attribute__((packed)) sctp_sctphdr_t;
+} __packed sctp_sctphdr_t;
 
 #ifdef __KERNEL__
 #include <linux/skbuff.h>
@@ -77,7 +77,7 @@
 	__u8 type;
 	__u8 flags;
 	__be16 length;
-} __attribute__((packed)) sctp_chunkhdr_t;
+} __packed sctp_chunkhdr_t;
 
 
 /* Section 3.2.  Chunk Type Values.
@@ -167,7 +167,7 @@
 typedef struct sctp_paramhdr {
 	__be16 type;
 	__be16 length;
-} __attribute__((packed)) sctp_paramhdr_t;
+} __packed sctp_paramhdr_t;
 
 typedef enum {
 
@@ -228,12 +228,12 @@
 	__be16 ssn;
 	__be32 ppid;
 	__u8  payload[0];
-} __attribute__((packed)) sctp_datahdr_t;
+} __packed sctp_datahdr_t;
 
 typedef struct sctp_data_chunk {
         sctp_chunkhdr_t chunk_hdr;
         sctp_datahdr_t  data_hdr;
-} __attribute__((packed)) sctp_data_chunk_t;
+} __packed sctp_data_chunk_t;
 
 /* DATA Chuck Specific Flags */
 enum {
@@ -259,78 +259,78 @@
 	__be16 num_inbound_streams;
 	__be32 initial_tsn;
 	__u8  params[0];
-} __attribute__((packed)) sctp_inithdr_t;
+} __packed sctp_inithdr_t;
 
 typedef struct sctp_init_chunk {
 	sctp_chunkhdr_t chunk_hdr;
 	sctp_inithdr_t init_hdr;
-} __attribute__((packed)) sctp_init_chunk_t;
+} __packed sctp_init_chunk_t;
 
 
 /* Section 3.3.2.1. IPv4 Address Parameter (5) */
 typedef struct sctp_ipv4addr_param {
 	sctp_paramhdr_t param_hdr;
 	struct in_addr  addr;
-} __attribute__((packed)) sctp_ipv4addr_param_t;
+} __packed sctp_ipv4addr_param_t;
 
 /* Section 3.3.2.1. IPv6 Address Parameter (6) */
 typedef struct sctp_ipv6addr_param {
 	sctp_paramhdr_t param_hdr;
 	struct in6_addr addr;
-} __attribute__((packed)) sctp_ipv6addr_param_t;
+} __packed sctp_ipv6addr_param_t;
 
 /* Section 3.3.2.1 Cookie Preservative (9) */
 typedef struct sctp_cookie_preserve_param {
 	sctp_paramhdr_t param_hdr;
 	__be32          lifespan_increment;
-} __attribute__((packed)) sctp_cookie_preserve_param_t;
+} __packed sctp_cookie_preserve_param_t;
 
 /* Section 3.3.2.1 Host Name Address (11) */
 typedef struct sctp_hostname_param {
 	sctp_paramhdr_t param_hdr;
 	uint8_t hostname[0];
-} __attribute__((packed)) sctp_hostname_param_t;
+} __packed sctp_hostname_param_t;
 
 /* Section 3.3.2.1 Supported Address Types (12) */
 typedef struct sctp_supported_addrs_param {
 	sctp_paramhdr_t param_hdr;
 	__be16 types[0];
-} __attribute__((packed)) sctp_supported_addrs_param_t;
+} __packed sctp_supported_addrs_param_t;
 
 /* Appendix A. ECN Capable (32768) */
 typedef struct sctp_ecn_capable_param {
 	sctp_paramhdr_t param_hdr;
-} __attribute__((packed)) sctp_ecn_capable_param_t;
+} __packed sctp_ecn_capable_param_t;
 
 /* ADDIP Section 3.2.6 Adaptation Layer Indication */
 typedef struct sctp_adaptation_ind_param {
 	struct sctp_paramhdr param_hdr;
 	__be32 adaptation_ind;
-} __attribute__((packed)) sctp_adaptation_ind_param_t;
+} __packed sctp_adaptation_ind_param_t;
 
 /* ADDIP Section 4.2.7 Supported Extensions Parameter */
 typedef struct sctp_supported_ext_param {
 	struct sctp_paramhdr param_hdr;
 	__u8 chunks[0];
-} __attribute__((packed)) sctp_supported_ext_param_t;
+} __packed sctp_supported_ext_param_t;
 
 /* AUTH Section 3.1 Random */
 typedef struct sctp_random_param {
 	sctp_paramhdr_t param_hdr;
 	__u8 random_val[0];
-} __attribute__((packed)) sctp_random_param_t;
+} __packed sctp_random_param_t;
 
 /* AUTH Section 3.2 Chunk List */
 typedef struct sctp_chunks_param {
 	sctp_paramhdr_t param_hdr;
 	__u8 chunks[0];
-} __attribute__((packed)) sctp_chunks_param_t;
+} __packed sctp_chunks_param_t;
 
 /* AUTH Section 3.3 HMAC Algorithm */
 typedef struct sctp_hmac_algo_param {
 	sctp_paramhdr_t param_hdr;
 	__be16 hmac_ids[0];
-} __attribute__((packed)) sctp_hmac_algo_param_t;
+} __packed sctp_hmac_algo_param_t;
 
 /* RFC 2960.  Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
  *   The INIT ACK chunk is used to acknowledge the initiation of an SCTP
@@ -342,13 +342,13 @@
 typedef struct sctp_cookie_param {
 	sctp_paramhdr_t p;
 	__u8 body[0];
-} __attribute__((packed)) sctp_cookie_param_t;
+} __packed sctp_cookie_param_t;
 
 /* Section 3.3.3.1 Unrecognized Parameters (8) */
 typedef struct sctp_unrecognized_param {
 	sctp_paramhdr_t param_hdr;
 	sctp_paramhdr_t unrecognized;
-} __attribute__((packed)) sctp_unrecognized_param_t;
+} __packed sctp_unrecognized_param_t;
 
 
 
@@ -363,7 +363,7 @@
 typedef struct sctp_gap_ack_block {
 	__be16 start;
 	__be16 end;
-} __attribute__((packed)) sctp_gap_ack_block_t;
+} __packed sctp_gap_ack_block_t;
 
 typedef __be32 sctp_dup_tsn_t;
 
@@ -378,12 +378,12 @@
 	__be16 num_gap_ack_blocks;
 	__be16 num_dup_tsns;
 	sctp_sack_variable_t variable[0];
-} __attribute__((packed)) sctp_sackhdr_t;
+} __packed sctp_sackhdr_t;
 
 typedef struct sctp_sack_chunk {
 	sctp_chunkhdr_t chunk_hdr;
 	sctp_sackhdr_t sack_hdr;
-} __attribute__((packed)) sctp_sack_chunk_t;
+} __packed sctp_sack_chunk_t;
 
 
 /* RFC 2960.  Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
@@ -395,12 +395,12 @@
 
 typedef struct sctp_heartbeathdr {
 	sctp_paramhdr_t info;
-} __attribute__((packed)) sctp_heartbeathdr_t;
+} __packed sctp_heartbeathdr_t;
 
 typedef struct sctp_heartbeat_chunk {
 	sctp_chunkhdr_t chunk_hdr;
 	sctp_heartbeathdr_t hb_hdr;
-} __attribute__((packed)) sctp_heartbeat_chunk_t;
+} __packed sctp_heartbeat_chunk_t;
 
 
 /* For the abort and shutdown ACK we must carry the init tag in the
@@ -409,7 +409,7 @@
  */
 typedef struct sctp_abort_chunk {
         sctp_chunkhdr_t uh;
-} __attribute__((packed)) sctp_abort_chunk_t;
+} __packed sctp_abort_chunk_t;
 
 
 /* For the graceful shutdown we must carry the tag (in common header)
@@ -417,12 +417,12 @@
  */
 typedef struct sctp_shutdownhdr {
 	__be32 cum_tsn_ack;
-} __attribute__((packed)) sctp_shutdownhdr_t;
+} __packed sctp_shutdownhdr_t;
 
 struct sctp_shutdown_chunk_t {
         sctp_chunkhdr_t    chunk_hdr;
         sctp_shutdownhdr_t shutdown_hdr;
-} __attribute__ ((packed));
+} __packed;
 
 /* RFC 2960.  Section 3.3.10 Operation Error (ERROR) (9) */
 
@@ -430,12 +430,12 @@
 	__be16 cause;
 	__be16 length;
 	__u8  variable[0];
-} __attribute__((packed)) sctp_errhdr_t;
+} __packed sctp_errhdr_t;
 
 typedef struct sctp_operr_chunk {
         sctp_chunkhdr_t chunk_hdr;
 	sctp_errhdr_t   err_hdr;
-} __attribute__((packed)) sctp_operr_chunk_t;
+} __packed sctp_operr_chunk_t;
 
 /* RFC 2960 3.3.10 - Operation Error
  *
@@ -525,7 +525,7 @@
 typedef struct sctp_ecne_chunk {
 	sctp_chunkhdr_t chunk_hdr;
 	sctp_ecnehdr_t ence_hdr;
-} __attribute__((packed)) sctp_ecne_chunk_t;
+} __packed sctp_ecne_chunk_t;
 
 /* RFC 2960.  Appendix A.  Explicit Congestion Notification.
  *   Congestion Window Reduced (CWR) (13)
@@ -537,7 +537,7 @@
 typedef struct sctp_cwr_chunk {
 	sctp_chunkhdr_t chunk_hdr;
 	sctp_cwrhdr_t cwr_hdr;
-} __attribute__((packed)) sctp_cwr_chunk_t;
+} __packed sctp_cwr_chunk_t;
 
 /* PR-SCTP
  * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
@@ -588,17 +588,17 @@
 struct sctp_fwdtsn_skip {
 	__be16 stream;
 	__be16 ssn;
-} __attribute__((packed));
+} __packed;
 
 struct sctp_fwdtsn_hdr {
 	__be32 new_cum_tsn;
 	struct sctp_fwdtsn_skip skip[0];
-} __attribute((packed));
+} __packed;
 
 struct sctp_fwdtsn_chunk {
 	struct sctp_chunkhdr chunk_hdr;
 	struct sctp_fwdtsn_hdr fwdtsn_hdr;
-} __attribute((packed));
+} __packed;
 
 
 /* ADDIP
@@ -636,17 +636,17 @@
 typedef struct sctp_addip_param {
 	sctp_paramhdr_t	param_hdr;
 	__be32		crr_id;
-} __attribute__((packed)) sctp_addip_param_t;
+} __packed sctp_addip_param_t;
 
 typedef struct sctp_addiphdr {
 	__be32	serial;
 	__u8	params[0];
-} __attribute__((packed)) sctp_addiphdr_t;
+} __packed sctp_addiphdr_t;
 
 typedef struct sctp_addip_chunk {
 	sctp_chunkhdr_t chunk_hdr;
 	sctp_addiphdr_t addip_hdr;
-} __attribute__((packed)) sctp_addip_chunk_t;
+} __packed sctp_addip_chunk_t;
 
 /* AUTH
  * Section 4.1  Authentication Chunk (AUTH)
@@ -701,11 +701,11 @@
 	__be16 shkey_id;
 	__be16 hmac_id;
 	__u8   hmac[0];
-} __attribute__((packed)) sctp_authhdr_t;
+} __packed sctp_authhdr_t;
 
 typedef struct sctp_auth_chunk {
 	sctp_chunkhdr_t chunk_hdr;
 	sctp_authhdr_t auth_hdr;
-} __attribute__((packed)) sctp_auth_chunk_t;
+} __packed sctp_auth_chunk_t;
 
 #endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f89e7fd..ac74ee0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1414,12 +1414,14 @@
  *
  * Various parts of the networking layer expect at least 32 bytes of
  * headroom, you should not reduce this.
- * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
- * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
+ *
+ * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
+ * to reduce average number of cache lines per packet.
+ * get_rps_cpus() for example only access one 64 bytes aligned block :
  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
  */
 #ifndef NET_SKB_PAD
-#define NET_SKB_PAD	64
+#define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
 #endif
 
 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
@@ -2132,7 +2134,8 @@
 	/* LRO sets gso_size but not gso_type, whereas if GSO is really
 	 * wanted then gso_type will be set. */
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
-	if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
+	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
+	    unlikely(shinfo->gso_type == 0)) {
 		__skb_warn_lro_forwarding(skb);
 		return true;
 	}
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 5279771..ebb0c80 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -229,6 +229,7 @@
 	LINUX_MIB_TCPBACKLOGDROP,
 	LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
 	LINUX_MIB_TCPDEFERACCEPTDROP,
+	LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
 	__LINUX_MIB_MAX
 };
 
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 032a19e..a2fada9 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -24,6 +24,9 @@
 #include <linux/types.h>		/* pid_t			*/
 #include <linux/compiler.h>		/* __user			*/
 
+struct pid;
+struct cred;
+
 #define __sockaddr_check_size(size)	\
 	BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
 
@@ -309,6 +312,8 @@
 #define IPX_TYPE	1
 
 #ifdef __KERNEL__
+extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
+
 extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
 extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
 			       int offset, int len);
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index a2608bf..623b704 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -167,7 +167,7 @@
 	 * is an optimization. */
 	const struct ssb_bus_ops *ops;
 
-	struct device *dev;
+	struct device *dev, *dma_dev;
 
 	struct ssb_bus *bus;
 	struct ssb_device_id id;
@@ -470,14 +470,6 @@
 #define SSB_DMA_TRANSLATION_MASK	0xC0000000
 #define SSB_DMA_TRANSLATION_SHIFT	30
 
-extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask);
-
-extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t gfp_flags);
-extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
-				    void *vaddr, dma_addr_t dma_handle,
-				    gfp_t gfp_flags);
-
 static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
 {
 #ifdef CONFIG_SSB_DEBUG
@@ -486,155 +478,6 @@
 #endif /* DEBUG */
 }
 
-static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
-{
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		return pci_dma_mapping_error(dev->bus->host_pci, addr);
-#endif
-		break;
-	case SSB_BUSTYPE_SSB:
-		return dma_mapping_error(dev->dev, addr);
-	default:
-		break;
-	}
-	__ssb_dma_not_implemented(dev);
-	return -ENOSYS;
-}
-
-static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
-					    size_t size, enum dma_data_direction dir)
-{
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		return pci_map_single(dev->bus->host_pci, p, size, dir);
-#endif
-		break;
-	case SSB_BUSTYPE_SSB:
-		return dma_map_single(dev->dev, p, size, dir);
-	default:
-		break;
-	}
-	__ssb_dma_not_implemented(dev);
-	return 0;
-}
-
-static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr,
-					size_t size, enum dma_data_direction dir)
-{
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
-		return;
-#endif
-		break;
-	case SSB_BUSTYPE_SSB:
-		dma_unmap_single(dev->dev, dma_addr, size, dir);
-		return;
-	default:
-		break;
-	}
-	__ssb_dma_not_implemented(dev);
-}
-
-static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
-					       dma_addr_t dma_addr,
-					       size_t size,
-					       enum dma_data_direction dir)
-{
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
-					    size, dir);
-		return;
-#endif
-		break;
-	case SSB_BUSTYPE_SSB:
-		dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
-		return;
-	default:
-		break;
-	}
-	__ssb_dma_not_implemented(dev);
-}
-
-static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
-						  dma_addr_t dma_addr,
-						  size_t size,
-						  enum dma_data_direction dir)
-{
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
-					       size, dir);
-		return;
-#endif
-		break;
-	case SSB_BUSTYPE_SSB:
-		dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
-		return;
-	default:
-		break;
-	}
-	__ssb_dma_not_implemented(dev);
-}
-
-static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
-						     dma_addr_t dma_addr,
-						     unsigned long offset,
-						     size_t size,
-						     enum dma_data_direction dir)
-{
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		/* Just sync everything. That's all the PCI API can do. */
-		pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
-					    offset + size, dir);
-		return;
-#endif
-		break;
-	case SSB_BUSTYPE_SSB:
-		dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
-					      size, dir);
-		return;
-	default:
-		break;
-	}
-	__ssb_dma_not_implemented(dev);
-}
-
-static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
-							dma_addr_t dma_addr,
-							unsigned long offset,
-							size_t size,
-							enum dma_data_direction dir)
-{
-	switch (dev->bus->bustype) {
-	case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
-		/* Just sync everything. That's all the PCI API can do. */
-		pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
-					       offset + size, dir);
-		return;
-#endif
-		break;
-	case SSB_BUSTYPE_SSB:
-		dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
-						 size, dir);
-		return;
-	default:
-		break;
-	}
-	__ssb_dma_not_implemented(dev);
-}
-
-
 #ifdef CONFIG_SSB_PCIHOST
 /* PCI-host wrapper driver */
 extern int ssb_pcihost_register(struct pci_driver *driver);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
new file mode 100644
index 0000000..fa261a0
--- /dev/null
+++ b/include/linux/u64_stats_sync.h
@@ -0,0 +1,140 @@
+#ifndef _LINUX_U64_STATS_SYNC_H
+#define _LINUX_U64_STATS_SYNC_H
+
+/*
+ * To properly implement 64bits network statistics on 32bit and 64bit hosts,
+ * we provide a synchronization point, that is a noop on 64bit or UP kernels.
+ *
+ * Key points :
+ * 1) Use a seqcount on SMP 32bits, with low overhead.
+ * 2) Whole thing is a noop on 64bit arches or UP kernels.
+ * 3) Write side must ensure mutual exclusion or one seqcount update could
+ *    be lost, thus blocking readers forever.
+ *    If this synchronization point is not a mutex, but a spinlock or
+ *    spinlock_bh() or disable_bh() :
+ * 3.1) Write side should not sleep.
+ * 3.2) Write side should not allow preemption.
+ * 3.3) If applicable, interrupts should be disabled.
+ *
+ * 4) If reader fetches several counters, there is no guarantee the whole values
+ *    are consistent (remember point 1) : this is a noop on 64bit arches anyway)
+ *
+ * 5) readers are allowed to sleep or be preempted/interrupted : They perform
+ *    pure reads. But if they have to fetch many values, it's better to not allow
+ *    preemptions/interruptions to avoid many retries.
+ *
+ * 6) If counter might be written by an interrupt, readers should block interrupts.
+ *    (On UP, there is no seqcount_t protection, a reader allowing interrupts could
+ *     read partial values)
+ *
+ * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
+ *    u64_stats_fetch_retry_bh() helpers
+ *
+ * Usage :
+ *
+ * Stats producer (writer) should use following template granted it already got
+ * an exclusive access to counters (a lock is already taken, or per cpu
+ * data is used [in a non preemptable context])
+ *
+ *   spin_lock_bh(...) or other synchronization to get exclusive access
+ *   ...
+ *   u64_stats_update_begin(&stats->syncp);
+ *   stats->bytes64 += len; // non atomic operation
+ *   stats->packets64++;    // non atomic operation
+ *   u64_stats_update_end(&stats->syncp);
+ *
+ * While a consumer (reader) should use following template to get consistent
+ * snapshot for each variable (but no guarantee on several ones)
+ *
+ * u64 tbytes, tpackets;
+ * unsigned int start;
+ *
+ * do {
+ *         start = u64_stats_fetch_begin(&stats->syncp);
+ *         tbytes = stats->bytes64; // non atomic operation
+ *         tpackets = stats->packets64; // non atomic operation
+ * } while (u64_stats_fetch_retry(&stats->syncp, start));
+ *
+ *
+ * Example of use in drivers/net/loopback.c, using per_cpu containers,
+ * in BH disabled context.
+ */
+#include <linux/seqlock.h>
+
+struct u64_stats_sync {
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	seqcount_t	seq;
+#endif
+};
+
+static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	write_seqcount_begin(&syncp->seq);
+#endif
+}
+
+static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	write_seqcount_end(&syncp->seq);
+#endif
+}
+
+static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+	preempt_disable();
+#endif
+	return 0;
+#endif
+}
+
+static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+					 unsigned int start)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+	preempt_enable();
+#endif
+	return false;
+#endif
+}
+
+/*
+ * In case softirq handlers can update u64 counters, readers can use following helpers
+ * - SMP 32bit arches use seqcount protection, irq safe.
+ * - UP 32bit must disable BH.
+ * - 64bit have no problem atomically reading u64 values, irq safe.
+ */
+static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+	local_bh_disable();
+#endif
+	return 0;
+#endif
+}
+
+static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
+					 unsigned int start)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+	local_bh_enable();
+#endif
+	return false;
+#endif
+}
+
+#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index cc4f453..8178156 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -36,6 +36,9 @@
 		kref_put(&ns->kref, free_user_ns);
 }
 
+uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid);
+gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid);
+
 #else
 
 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -52,6 +55,17 @@
 {
 }
 
+static inline uid_t user_ns_map_uid(struct user_namespace *to,
+	const struct cred *cred, uid_t uid)
+{
+	return uid;
+}
+static inline gid_t user_ns_map_gid(struct user_namespace *to,
+	const struct cred *cred, gid_t gid)
+{
+	return gid;
+}
+
 #endif
 
 #endif /* _LINUX_USER_H */
diff --git a/include/linux/wlp.h b/include/linux/wlp.h
index ac95ce6..c76fe23 100644
--- a/include/linux/wlp.h
+++ b/include/linux/wlp.h
@@ -300,7 +300,7 @@
 	__le16 cycle_param;
 	__le16 acw_anchor_addr;
 	u8 wssid_hash_list[];
-} __attribute__((packed));
+} __packed;
 
 static inline int wlp_ie_hash_length(struct wlp_ie *ie)
 {
@@ -324,7 +324,7 @@
  */
 struct wlp_nonce {
 	u8 data[16];
-} __attribute__((packed));
+} __packed;
 
 /**
  * WLP UUID
@@ -336,7 +336,7 @@
  */
 struct wlp_uuid {
 	u8 data[16];
-} __attribute__((packed));
+} __packed;
 
 
 /**
@@ -348,7 +348,7 @@
 	u8 OUI[3];
 	u8 OUIsubdiv;
 	__le16 subID;
-} __attribute__((packed));
+} __packed;
 
 /**
  * WLP frame header
@@ -357,7 +357,7 @@
 struct wlp_frame_hdr {
 	__le16 mux_hdr;			/* WLP_PROTOCOL_ID */
 	enum wlp_frame_type type:8;
-} __attribute__((packed));
+} __packed;
 
 /**
  * WLP attribute field header
@@ -368,7 +368,7 @@
 struct wlp_attr_hdr {
 	__le16 type;
 	__le16 length;
-} __attribute__((packed));
+} __packed;
 
 /**
  * Device information commonly used together
@@ -401,13 +401,13 @@
 struct wlp_attr_##name {						\
 	struct wlp_attr_hdr hdr;					\
 	type name;							\
-} __attribute__((packed));
+} __packed;
 
 #define wlp_attr_array(type, name)					\
 struct wlp_attr_##name {						\
 	struct wlp_attr_hdr hdr;					\
 	type name[];							\
-} __attribute__((packed));
+} __packed;
 
 /**
  * WLP association attribute fields
@@ -483,7 +483,7 @@
 	struct wlp_attr_accept_enrl accept;
 	struct wlp_attr_wss_sec_status sec_stat;
 	struct wlp_attr_wss_bcast bcast;
-} __attribute__((packed));
+} __packed;
 
 /* WLP WSS Information */
 wlp_attr_array(struct wlp_wss_info, wss_info)
@@ -520,7 +520,7 @@
 struct wlp_frame_std_abbrv_hdr {
 	struct wlp_frame_hdr hdr;
 	u8 tag;
-} __attribute__((packed));
+} __packed;
 
 /**
  * WLP association frames
@@ -533,7 +533,7 @@
 	struct wlp_attr_version version;
 	struct wlp_attr_msg_type msg_type;
 	u8 attr[];
-} __attribute__((packed));
+} __packed;
 
 /* Ethernet to dev address mapping */
 struct wlp_eda {
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c05fd71..bab385f 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -20,6 +20,7 @@
 	struct gnet_stats_queue		tcfc_qstats;
 	struct gnet_stats_rate_est	tcfc_rate_est;
 	spinlock_t			tcfc_lock;
+	struct rcu_head			tcfc_rcu;
 };
 #define tcf_next	common.tcfc_next
 #define tcf_index	common.tcfc_index
@@ -32,6 +33,7 @@
 #define tcf_qstats	common.tcfc_qstats
 #define tcf_rate_est	common.tcfc_rate_est
 #define tcf_lock	common.tcfc_lock
+#define tcf_rcu		common.tcfc_rcu
 
 struct tcf_police {
 	struct tcf_common	common;
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 20725e2..90c9e28 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -23,7 +23,8 @@
 };
 
 struct unix_skb_parms {
-	struct ucred		creds;		/* Skb credentials	*/
+	struct pid		*pid;		/* Skb credentials	*/
+	const struct cred	*cred;
 	struct scm_fp_list	*fp;		/* Passed files		*/
 #ifdef CONFIG_SECURITY_NETWORK
 	u32			secid;		/* Security ID		*/
@@ -31,7 +32,6 @@
 };
 
 #define UNIXCB(skb) 	(*(struct unix_skb_parms *)&((skb)->cb))
-#define UNIXCREDS(skb)	(&UNIXCB((skb)).creds)
 #define UNIXSID(skb)	(&UNIXCB((skb)).secid)
 
 #define unix_state_lock(s)	spin_lock(&unix_sk(s)->lock)
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 318ab94..6da573c 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -50,6 +50,9 @@
  * @client_layer:	User implementation of client layer. This layer
  *			MUST have receive and control callback functions
  *			implemented.
+ * @ifindex:		Link layer interface index used for this connection.
+ * @headroom:		Head room needed by CAIF protocol.
+ * @tailroom:		Tail room needed by CAIF protocol.
  *
  * This function connects a CAIF channel. The Client must implement
  * the struct cflayer. This layer represents the Client layer and holds
@@ -59,8 +62,9 @@
  * E.g. CAIF Socket will call this function for each socket it connects
  * and have one client_layer instance for each socket.
  */
-int caif_connect_client(struct caif_connect_request *config,
-			   struct cflayer *client_layer);
+int caif_connect_client(struct caif_connect_request *conn_req,
+			struct cflayer *client_layer, int *ifindex,
+			int *headroom, int *tailroom);
 
 /**
  * caif_disconnect_client - Disconnects a client from the CAIF stack.
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 25c472f..c8b07a9 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -15,14 +15,8 @@
 struct caif_payload_info;
 struct caif_packet_funcs;
 
-#define CAIF_MAX_FRAMESIZE 4096
-#define CAIF_MAX_PAYLOAD_SIZE (4096 - 64)
-#define CAIF_NEEDED_HEADROOM (10)
-#define CAIF_NEEDED_TAILROOM (2)
 
 #define CAIF_LAYER_NAME_SZ 16
-#define CAIF_SUCCESS	1
-#define CAIF_FAILURE	0
 
 /**
  * caif_assert() - Assert function for CAIF.
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
new file mode 100644
index 0000000..ce4570d
--- /dev/null
+++ b/include/net/caif/caif_spi.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author:	Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_SPI_H_
+#define CAIF_SPI_H_
+
+#include <net/caif/caif_device.h>
+
+#define SPI_CMD_WR			0x00
+#define SPI_CMD_RD			0x01
+#define SPI_CMD_EOT			0x02
+#define SPI_CMD_IND			0x04
+
+#define SPI_DMA_BUF_LEN			8192
+
+#define WL_SZ				2	/* 16 bits. */
+#define SPI_CMD_SZ			4	/* 32 bits. */
+#define SPI_IND_SZ			4	/* 32 bits. */
+
+#define SPI_XFER			0
+#define SPI_SS_ON			1
+#define SPI_SS_OFF			2
+#define SPI_TERMINATE			3
+
+/* Minimum time between different levels is 50 microseconds. */
+#define MIN_TRANSITION_TIME_USEC	50
+
+/* Defines for calculating duration of SPI transfers for a particular
+ * number of bytes.
+ */
+#define SPI_MASTER_CLK_MHZ		13
+#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk)
+
+/* Normally this should be aligned on the modem in order to benefit from full
+ * duplex transfers. However a size of 8188 provokes errors when running with
+ * the modem. These errors occur when packet sizes approaches 4 kB of data.
+ */
+#define CAIF_MAX_SPI_FRAME 4092
+
+/* Maximum number of uplink CAIF frames that can reside in the same SPI frame.
+ * This number should correspond with the modem setting. The application side
+ * CAIF accepts any number of embedded downlink CAIF frames.
+ */
+#define CAIF_MAX_SPI_PKTS 9
+
+/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier
+ * debugging. Both TX and RX buffers will be filled before the transfer.
+ */
+#define CFSPI_DBG_PREFILL		0
+
+/* Structure describing a SPI transfer. */
+struct cfspi_xfer {
+	u16 tx_dma_len;
+	u16 rx_dma_len;
+	void *va_tx;
+	dma_addr_t pa_tx;
+	void *va_rx;
+	dma_addr_t pa_rx;
+};
+
+/* Structure implemented by the SPI interface. */
+struct cfspi_ifc {
+	void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
+	void (*xfer_done_cb) (struct cfspi_ifc *ifc);
+	void *priv;
+};
+
+/* Structure implemented by SPI clients. */
+struct cfspi_dev {
+	int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev);
+	void (*sig_xfer) (bool xfer, struct cfspi_dev *dev);
+	struct cfspi_ifc *ifc;
+	char *name;
+	u32 clk_mhz;
+	void *priv;
+};
+
+/* Enumeration describing the CAIF SPI state. */
+enum cfspi_state {
+	CFSPI_STATE_WAITING = 0,
+	CFSPI_STATE_AWAKE,
+	CFSPI_STATE_FETCH_PKT,
+	CFSPI_STATE_GET_NEXT,
+	CFSPI_STATE_INIT_XFER,
+	CFSPI_STATE_WAIT_ACTIVE,
+	CFSPI_STATE_SIG_ACTIVE,
+	CFSPI_STATE_WAIT_XFER_DONE,
+	CFSPI_STATE_XFER_DONE,
+	CFSPI_STATE_WAIT_INACTIVE,
+	CFSPI_STATE_SIG_INACTIVE,
+	CFSPI_STATE_DELIVER_PKT,
+	CFSPI_STATE_MAX,
+};
+
+/* Structure implemented by SPI physical interfaces. */
+struct cfspi {
+	struct caif_dev_common cfdev;
+	struct net_device *ndev;
+	struct platform_device *pdev;
+	struct sk_buff_head qhead;
+	struct sk_buff_head chead;
+	u16 cmd;
+	u16 tx_cpck_len;
+	u16 tx_npck_len;
+	u16 rx_cpck_len;
+	u16 rx_npck_len;
+	struct cfspi_ifc ifc;
+	struct cfspi_xfer xfer;
+	struct cfspi_dev *dev;
+	unsigned long state;
+	struct work_struct work;
+	struct workqueue_struct *wq;
+	struct list_head list;
+	int    flow_off_sent;
+	u32 qd_low_mark;
+	u32 qd_high_mark;
+	struct completion comp;
+	wait_queue_head_t wait;
+	spinlock_t lock;
+	bool flow_stop;
+#ifdef CONFIG_DEBUG_FS
+	enum cfspi_state dbg_state;
+	u16 pcmd;
+	u16 tx_ppck_len;
+	u16 rx_ppck_len;
+	struct dentry *dbgfs_dir;
+	struct dentry *dbgfs_state;
+	struct dentry *dbgfs_frame;
+#endif				/* CONFIG_DEBUG_FS */
+};
+
+extern int spi_frm_align;
+extern int spi_up_head_align;
+extern int spi_up_tail_align;
+extern int spi_down_head_align;
+extern int spi_down_tail_align;
+extern struct platform_driver cfspi_spi_driver;
+
+void cfspi_dbg_state(struct cfspi *cfspi, int state);
+int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+int cfspi_xmitlen(struct cfspi *cfspi);
+int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+int cfspi_spi_remove(struct platform_device *pdev);
+int cfspi_spi_probe(struct platform_device *pdev);
+int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+int cfspi_xmitlen(struct cfspi *cfspi);
+int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+void cfspi_xfer(struct work_struct *work);
+
+#endif				/* CAIF_SPI_H_ */
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 9fc2fc2..bd646fa 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -7,6 +7,7 @@
 #ifndef CFCNFG_H_
 #define CFCNFG_H_
 #include <linux/spinlock.h>
+#include <linux/netdevice.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfctrl.h>
 
@@ -73,8 +74,8 @@
 
 void
 cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
-		     void *dev, struct cflayer *phy_layer, u16 *phyid,
-		     enum cfcnfg_phy_preference pref,
+		     struct net_device *dev, struct cflayer *phy_layer,
+		     u16 *phyid, enum cfcnfg_phy_preference pref,
 		     bool fcs, bool stx);
 
 /**
@@ -114,11 +115,18 @@
  * @param:		Link setup parameters.
  * @adap_layer:		Specify the adaptation layer; the receive and
  *			flow-control functions MUST be set in the structure.
- *
+ * @ifindex:		Link layer interface index used for this connection.
+ * @proto_head:		Protocol head-space needed by CAIF protocol,
+ *			excluding link layer.
+ * @proto_tail:		Protocol tail-space needed by CAIF protocol,
+ *			excluding link layer.
  */
 int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
 			    struct cfctrl_link_param *param,
-			    struct cflayer *adap_layer);
+			    struct cflayer *adap_layer,
+			    int *ifindex,
+			    int *proto_head,
+			    int *proto_tail);
 
 /**
  * cfcnfg_get_phyid() - Get physical ID, given type.
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index 2dc9eb1..b1fa87e 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -16,6 +16,8 @@
 	bool open;
 	bool phy_flow_on;
 	bool modem_flow_on;
+	bool supports_flowctrl;
+	void (*release)(struct kref *);
 	struct dev_info dev_info;
 	struct kref ref;
 };
@@ -25,13 +27,15 @@
 struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info);
 struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info);
 struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info);
-struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info,
+				int mtu_size);
 struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
 bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
 void cfservl_destroy(struct cflayer *layer);
 void cfsrvl_init(struct cfsrvl *service,
-		 u8 channel_id,
-		 struct dev_info *dev_info);
+			u8 channel_id,
+			struct dev_info *dev_info,
+			bool supports_flowctrl);
 bool cfsrvl_ready(struct cfsrvl *service, int *err);
 u8 cfsrvl_getphyid(struct cflayer *layer);
 
@@ -50,7 +54,10 @@
 	if (layr == NULL)
 		return;
 	s = container_of(layr, struct cfsrvl, layer);
-	kref_put(&s->ref, cfsrvl_release);
+
+	WARN_ON(!s->release);
+	if (s->release)
+		kref_put(&s->ref, s->release);
 }
 
 #endif				/* CFSRVL_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b44a2e5..168fe53 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -37,6 +37,7 @@
  *
  * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band
  * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
+ * @IEEE80211_NUM_BANDS: number of defined bands
  */
 enum ieee80211_band {
 	IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
@@ -89,7 +90,7 @@
  * @max_power: maximum transmission power (in dBm)
  * @beacon_found: helper to regulatory code to indicate when a beacon
  *	has been found on this channel. Use regulatory_hint_found_beacon()
- *	to enable this, this is is useful only on 5 GHz band.
+ *	to enable this, this is useful only on 5 GHz band.
  * @orig_mag: internal use
  * @orig_mpwr: internal use
  */
@@ -188,6 +189,7 @@
  *	in this band. Must be sorted to give a valid "supported
  *	rates" IE, i.e. CCK rates first, then OFDM.
  * @n_bitrates: Number of bitrates in @bitrates
+ * @ht_cap: HT capabilities in this band
  */
 struct ieee80211_supported_band {
 	struct ieee80211_channel *channels;
@@ -225,6 +227,7 @@
  * @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used
  *	with the get_key() callback, must be in little endian,
  *	length given by @seq_len.
+ * @seq_len: length of @seq.
  */
 struct key_params {
 	u8 *key;
@@ -237,6 +240,8 @@
 /**
  * enum survey_info_flags - survey information flags
  *
+ * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
+ *
  * Used by the driver to indicate which info in &struct survey_info
  * it has filled in during the get_survey().
  */
@@ -247,13 +252,13 @@
 /**
  * struct survey_info - channel survey response
  *
- * Used by dump_survey() to report back per-channel survey information.
- *
  * @channel: the channel this survey record reports, mandatory
  * @filled: bitflag of flags from &enum survey_info_flags
  * @noise: channel noise in dBm. This and all following fields are
  *     optional
  *
+ * Used by dump_survey() to report back per-channel survey information.
+ *
  * This structure can later be expanded with things like
  * channel duty cycle etc.
  */
@@ -288,7 +293,7 @@
  *
  * @PLINK_ACTION_INVALID: action 0 is reserved
  * @PLINK_ACTION_OPEN: start mesh peer link establishment
- * @PLINK_ACTION_BLOCL: block traffic from this mesh peer
+ * @PLINK_ACTION_BLOCK: block traffic from this mesh peer
  */
 enum plink_actions {
 	PLINK_ACTION_INVALID,
@@ -311,6 +316,8 @@
  *	(bitmask of BIT(NL80211_STA_FLAG_...))
  * @listen_interval: listen interval or -1 for no change
  * @aid: AID or zero for no change
+ * @plink_action: plink action to take
+ * @ht_capa: HT capabilities of station
  */
 struct station_parameters {
 	u8 *supported_rates;
@@ -448,13 +455,13 @@
  * Used by the driver to indicate which info in &struct mpath_info it has filled
  * in during get_station() or dump_station().
  *
- * MPATH_INFO_FRAME_QLEN: @frame_qlen filled
- * MPATH_INFO_SN: @sn filled
- * MPATH_INFO_METRIC: @metric filled
- * MPATH_INFO_EXPTIME: @exptime filled
- * MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled
- * MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled
- * MPATH_INFO_FLAGS: @flags filled
+ * @MPATH_INFO_FRAME_QLEN: @frame_qlen filled
+ * @MPATH_INFO_SN: @sn filled
+ * @MPATH_INFO_METRIC: @metric filled
+ * @MPATH_INFO_EXPTIME: @exptime filled
+ * @MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled
+ * @MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled
+ * @MPATH_INFO_FLAGS: @flags filled
  */
 enum mpath_info_flags {
 	MPATH_INFO_FRAME_QLEN		= BIT(0),
@@ -587,6 +594,7 @@
  * @ie_len: length of ie in octets
  * @wiphy: the wiphy this was for
  * @dev: the interface
+ * @aborted: (internal) scan request was notified as aborted
  */
 struct cfg80211_scan_request {
 	struct cfg80211_ssid *ssids;
@@ -623,6 +631,7 @@
  * This structure describes a BSS (which may also be a mesh network)
  * for use in scan results and similar.
  *
+ * @channel: channel this BSS is on
  * @bssid: BSSID of the BSS
  * @tsf: timestamp of last received update
  * @beacon_interval: the beacon interval as from the frame
@@ -801,6 +810,7 @@
  * @beacon_interval: beacon interval to use
  * @privacy: this is a protected network, keys will be configured
  *	after joining
+ * @basic_rates: bitmap of basic rates to use when creating the IBSS
  */
 struct cfg80211_ibss_params {
 	u8 *ssid;
@@ -809,6 +819,7 @@
 	u8 *ie;
 	u8 ssid_len, ie_len;
 	u16 beacon_interval;
+	u32 basic_rates;
 	bool channel_fixed;
 	bool privacy;
 };
@@ -826,8 +837,8 @@
  * @ssid: SSID
  * @ssid_len: Length of ssid in octets
  * @auth_type: Authentication type (algorithm)
- * @assoc_ie: IEs for association request
- * @assoc_ie_len: Length of assoc_ie in octets
+ * @ie: IEs for association request
+ * @ie_len: Length of assoc_ie in octets
  * @privacy: indicates whether privacy-enabled APs should be used
  * @crypto: crypto settings
  * @key_len: length of WEP key for shared key authentication
@@ -850,10 +861,11 @@
 
 /**
  * enum wiphy_params_flags - set_wiphy_params bitfield values
- * WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed
- * WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed
- * WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed
- * WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
+ * @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed
+ * @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed
+ * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed
+ * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
+ * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed
  */
 enum wiphy_params_flags {
 	WIPHY_PARAM_RETRY_SHORT		= 1 << 0,
@@ -863,19 +875,6 @@
 	WIPHY_PARAM_COVERAGE_CLASS	= 1 << 4,
 };
 
-/**
- * enum tx_power_setting - TX power adjustment
- *
- * @TX_POWER_AUTOMATIC: the dbm parameter is ignored
- * @TX_POWER_LIMITED: limit TX power by the dbm parameter
- * @TX_POWER_FIXED: fix TX power to the dbm parameter
- */
-enum tx_power_setting {
-	TX_POWER_AUTOMATIC,
-	TX_POWER_LIMITED,
-	TX_POWER_FIXED,
-};
-
 /*
  * cfg80211_bitrate_mask - masks for bitrate control
  */
@@ -949,10 +948,16 @@
  * @del_beacon: Remove beacon configuration and stop sending the beacon.
  *
  * @add_station: Add a new station.
- *
  * @del_station: Remove a station; @mac may be NULL to remove all stations.
- *
  * @change_station: Modify a given station.
+ * @get_station: get station information for the station identified by @mac
+ * @dump_station: dump station callback -- resume dump at index @idx
+ *
+ * @add_mpath: add a fixed mesh path
+ * @del_mpath: delete a given mesh path
+ * @change_mpath: change a given mesh path
+ * @get_mpath: get a mesh path for the given parameters
+ * @dump_mpath: dump mesh path callback -- resume dump at index @idx
  *
  * @get_mesh_params: Put the current mesh parameters into *params
  *
@@ -960,8 +965,6 @@
  *	The mask is a bitfield which tells us which parameters to
  *	set, and which to leave alone.
  *
- * @set_mesh_cfg: set mesh parameters (by now, just mesh id)
- *
  * @change_bss: Modify parameters for a given BSS.
  *
  * @set_txq_params: Set TX queue parameters
@@ -1002,6 +1005,8 @@
  * @get_tx_power: store the current TX power into the dbm variable;
  *	return 0 if successful
  *
+ * @set_wds_peer: set the WDS peer for a WDS interface
+ *
  * @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting
  *	functions to adjust rfkill hw state
  *
@@ -1019,6 +1024,8 @@
  *
  * @testmode_cmd: run a test mode command
  *
+ * @set_bitrate_mask: set the bitrate mask configuration
+ *
  * @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac
  *	devices running firmwares capable of generating the (re) association
  *	RSN IE. It allows for faster roaming between WPA2 BSSIDs.
@@ -1129,7 +1136,7 @@
 	int	(*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
 
 	int	(*set_tx_power)(struct wiphy *wiphy,
-				enum tx_power_setting type, int dbm);
+				enum nl80211_tx_power_setting type, int mbm);
 	int	(*get_tx_power)(struct wiphy *wiphy, int *dbm);
 
 	int	(*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev,
@@ -1168,6 +1175,7 @@
 	int	(*action)(struct wiphy *wiphy, struct net_device *dev,
 			  struct ieee80211_channel *chan,
 			  enum nl80211_channel_type channel_type,
+			  bool channel_type_valid,
 			  const u8 *buf, size_t len, u64 *cookie);
 
 	int	(*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
@@ -1230,8 +1238,6 @@
 
 /**
  * struct wiphy - wireless hardware description
- * @idx: the wiphy index assigned to this item
- * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name>
  * @reg_notifier: the driver's regulatory notification callback
  * @regd: the driver's regulatory domain, if one was requested via
  * 	the regulatory_hint() API. This can be used by the driver
@@ -1245,7 +1251,7 @@
  * @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold);
  *	-1 = fragmentation disabled, only odd values >= 256 used
  * @rts_threshold: RTS threshold (dot11RTSThreshold); -1 = RTS/CTS disabled
- * @net: the network namespace this wiphy currently lives in
+ * @_net: the network namespace this wiphy currently lives in
  * @perm_addr: permanent MAC address of this device
  * @addr_mask: If the device supports multiple MAC addresses by masking,
  *	set this to a mask with variable bits set to 1, e.g. if the last
@@ -1258,6 +1264,28 @@
  *	by default for perm_addr. In this case, the mask should be set to
  *	all-zeroes. In this case it is assumed that the device can handle
  *	the same number of arbitrary MAC addresses.
+ * @debugfsdir: debugfs directory used for this wiphy, will be renamed
+ *	automatically on wiphy renames
+ * @dev: (virtual) struct device for this wiphy
+ * @wext: wireless extension handlers
+ * @priv: driver private data (sized according to wiphy_new() parameter)
+ * @interface_modes: bitmask of interfaces types valid for this wiphy,
+ *	must be set by driver
+ * @flags: wiphy flags, see &enum wiphy_flags
+ * @bss_priv_size: each BSS struct has private data allocated with it,
+ *	this variable determines its size
+ * @max_scan_ssids: maximum number of SSIDs the device can scan for in
+ *	any given scan
+ * @max_scan_ie_len: maximum length of user-controlled IEs device can
+ *	add to probe request frames transmitted during a scan, must not
+ *	include fixed IEs like supported rates
+ * @coverage_class: current coverage class
+ * @fw_version: firmware version for ethtool reporting
+ * @hw_version: hardware version for ethtool reporting
+ * @max_num_pmkids: maximum number of PMKIDs supported by device
+ * @privid: a pointer that drivers can use to identify if an arbitrary
+ *	wiphy is theirs, e.g. in global notifiers
+ * @bands: information about bands/channels supported by this device
  */
 struct wiphy {
 	/* assign these fields before you register the wiphy */
@@ -1330,26 +1358,15 @@
 	char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
 };
 
-#ifdef CONFIG_NET_NS
 static inline struct net *wiphy_net(struct wiphy *wiphy)
 {
-	return wiphy->_net;
+	return read_pnet(&wiphy->_net);
 }
 
 static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net)
 {
-	wiphy->_net = net;
+	write_pnet(&wiphy->_net, net);
 }
-#else
-static inline struct net *wiphy_net(struct wiphy *wiphy)
-{
-	return &init_net;
-}
-
-static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net)
-{
-}
-#endif
 
 /**
  * wiphy_priv - return priv from wiphy
@@ -1471,13 +1488,14 @@
  * @ssid: (private) Used by the internal configuration code
  * @ssid_len: (private) Used by the internal configuration code
  * @wext: (private) Used by the internal wireless extensions compat code
- * @wext_bssid: (private) Used by the internal wireless extensions compat code
  * @use_4addr: indicates 4addr mode is used on this interface, must be
  *	set by driver (if supported) on add_interface BEFORE registering the
  *	netdev and may otherwise be used by driver read-only, will be update
  *	by cfg80211 on change_interface
  * @action_registrations: list of registrations for action frames
  * @action_registrations_lock: lock for the list
+ * @mtx: mutex used to lock data in this struct
+ * @cleanup_work: work struct used for cleanup that can't be done directly
  */
 struct wireless_dev {
 	struct wiphy *wiphy;
@@ -1551,11 +1569,13 @@
 
 /**
  * ieee80211_channel_to_frequency - convert channel number to frequency
+ * @chan: channel number
  */
 extern int ieee80211_channel_to_frequency(int chan);
 
 /**
  * ieee80211_frequency_to_channel - convert frequency to channel number
+ * @freq: center frequency
  */
 extern int ieee80211_frequency_to_channel(int freq);
 
@@ -1570,6 +1590,8 @@
 							 int freq);
 /**
  * ieee80211_get_channel - get channel struct from wiphy for specified frequency
+ * @wiphy: the struct wiphy to get the channel for
+ * @freq: the center frequency of the channel
  */
 static inline struct ieee80211_channel *
 ieee80211_get_channel(struct wiphy *wiphy, int freq)
@@ -1630,9 +1652,6 @@
  * @is_radiotap_ns: indicates whether the current namespace is the default
  *	radiotap namespace or not
  *
- * @overrides: override standard radiotap fields
- * @n_overrides: number of overrides
- *
  * @_rtheader: pointer to the radiotap header we are walking through
  * @_max_length: length of radiotap header in cpu byte ordering
  * @_arg_index: next argument index
@@ -1948,10 +1967,12 @@
 void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted);
 
 /**
- * cfg80211_inform_bss - inform cfg80211 of a new BSS
+ * cfg80211_inform_bss_frame - inform cfg80211 of a received BSS frame
  *
  * @wiphy: the wiphy reporting the BSS
- * @bss: the found BSS
+ * @channel: The channel the frame was received on
+ * @mgmt: the management frame (probe response or beacon)
+ * @len: length of the management frame
  * @signal: the signal strength, type depends on the wiphy's signal_type
  * @gfp: context flags
  *
@@ -1964,6 +1985,23 @@
 			  struct ieee80211_mgmt *mgmt, size_t len,
 			  s32 signal, gfp_t gfp);
 
+/**
+ * cfg80211_inform_bss - inform cfg80211 of a new BSS
+ *
+ * @wiphy: the wiphy reporting the BSS
+ * @channel: The channel the frame was received on
+ * @bssid: the BSSID of the BSS
+ * @timestamp: the TSF timestamp sent by the peer
+ * @capability: the capability field sent by the peer
+ * @beacon_interval: the beacon interval announced by the peer
+ * @ie: additional IEs sent by the peer
+ * @ielen: length of the additional IEs
+ * @signal: the signal strength, type depends on the wiphy's signal_type
+ * @gfp: context flags
+ *
+ * This informs cfg80211 that BSS information was found and
+ * the BSS should be updated/added.
+ */
 struct cfg80211_bss*
 cfg80211_inform_bss(struct wiphy *wiphy,
 		    struct ieee80211_channel *channel,
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index 511a459..0916bbf 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -101,7 +101,7 @@
 	__le16 dstnode;
 	__le16 srcnode;
 	__u8   forward;
-} __attribute__((packed));
+} __packed;
 
 struct dn_long_packet {
 	__u8   msgflg;
@@ -115,7 +115,7 @@
 	__u8   visit_ct;
 	__u8   s_class;
 	__u8   pt;
-} __attribute__((packed));
+} __packed;
 
 /*------------------------- DRP - Routing messages ---------------------*/
 
@@ -132,7 +132,7 @@
 	__u8   mpd;
 	__u8   datalen;
 	__u8   data[2];
-} __attribute__((packed));
+} __packed;
 
 struct rtnode_hello_message {
 	__u8   msgflg;
@@ -144,7 +144,7 @@
 	__u8   area;
 	__le16  timer;
 	__u8   mpd;
-} __attribute__((packed));
+} __packed;
 
 
 extern void dn_dev_init(void);
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index 17d43d2..e43a289 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -74,18 +74,18 @@
 	__u8   msgflg;
 	__le16 dstaddr;
 	__le16 srcaddr;
-} __attribute__((packed));
+} __packed;
 
 struct nsp_data_opt_msg {
 	__le16 acknum;
 	__le16 segnum;
 	__le16 lsflgs;
-} __attribute__((packed));
+} __packed;
 
 struct nsp_data_opt_msg1 {
 	__le16 acknum;
 	__le16 segnum;
-} __attribute__((packed));
+} __packed;
 
 
 /* Acknowledgment Message (data/other data)                             */
@@ -94,13 +94,13 @@
 	__le16 dstaddr;
 	__le16 srcaddr;
 	__le16 acknum;
-} __attribute__((packed));
+} __packed;
 
 /* Connect Acknowledgment Message */
 struct  nsp_conn_ack_msg {
 	__u8 msgflg;
 	__le16 dstaddr;
-} __attribute__((packed));
+} __packed;
 
 
 /* Connect Initiate/Retransmit Initiate/Connect Confirm */
@@ -117,7 +117,7 @@
 #define NSP_FC_MASK   0x0c            /* FC type mask         */
 	__u8   info;
 	__le16 segsize;
-} __attribute__((packed));
+} __packed;
 
 /* Disconnect Initiate/Disconnect Confirm */
 struct  nsp_disconn_init_msg {
@@ -125,7 +125,7 @@
 	__le16 dstaddr;
 	__le16 srcaddr;
 	__le16 reason;
-} __attribute__((packed));
+} __packed;
 
 
 
@@ -135,7 +135,7 @@
 	__le16 grpcode;
 	__le16 usrcode;
 	__u8   dlen;
-} __attribute__((packed));
+} __packed;
 
 /*
  * A collection of functions for manipulating the sequence
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 60c9f22..ccadab3 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -65,9 +65,7 @@
  * packets to the originating host.
  */
 struct dn_route {
-	union {
-		struct dst_entry dst;
-	} u;
+	struct dst_entry dst;
 
 	struct flowi fl;
 
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index eb551ba..f7dcd2c 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -68,26 +68,15 @@
 #endif
 };
 
-#ifdef CONFIG_NET_NS
 static inline struct net *genl_info_net(struct genl_info *info)
 {
-	return info->_net;
+	return read_pnet(&info->_net);
 }
 
 static inline void genl_info_net_set(struct genl_info *info, struct net *net)
 {
-	info->_net = net;
+	write_pnet(&info->_net, net);
 }
-#else
-static inline struct net *genl_info_net(struct genl_info *info)
-{
-	return &init_net;
-}
-
-static inline void genl_info_net_set(struct genl_info *info, struct net *net)
-{
-}
-#endif
 
 /**
  * struct genl_ops - generic netlink operations
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 39f2dc9..16ff29a 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -20,6 +20,7 @@
 	atomic_t		refcnt;
 	struct timer_list	timer;      /* when will this queue expire? */
 	struct sk_buff		*fragments; /* list of received fragments */
+	struct sk_buff		*fragments_tail;
 	ktime_t			stamp;
 	int			len;        /* total length of orig datagram */
 	int			meat;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 1653de5..1989cfd 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -137,7 +137,8 @@
 				hdrincl:1,
 				mc_loop:1,
 				transparent:1,
-				mc_all:1;
+				mc_all:1,
+				nodefrag:1;
 	int			mc_index;
 	__be32			mc_addr;
 	struct ip_mc_socklist	*mc_list;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 87b1df0..417d0c8 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -22,10 +22,21 @@
 	__u32			dtime;		/* the time of last use of not
 						 * referenced entries */
 	atomic_t		refcnt;
-	atomic_t		rid;		/* Frag reception counter */
-	atomic_t		ip_id_count;	/* IP ID for the next packet */
-	__u32			tcp_ts;
-	__u32			tcp_ts_stamp;
+	/*
+	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
+	 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+	 * We can share memory with rcu_head to keep inet_peer small
+	 * (less then 64 bytes)
+	 */
+	union {
+		struct {
+			atomic_t	rid;		/* Frag reception counter */
+			atomic_t	ip_id_count;	/* IP ID for the next packet */
+			__u32		tcp_ts;
+			__u32		tcp_ts_stamp;
+		};
+		struct rcu_head         rcu;
+	};
 };
 
 void			inet_initpeers(void) __init;
@@ -36,10 +47,21 @@
 /* can be called from BH context or outside */
 extern void inet_putpeer(struct inet_peer *p);
 
+/*
+ * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * tcp_ts_stamp if no refcount is taken on inet_peer
+ */
+static inline void inet_peer_refcheck(const struct inet_peer *p)
+{
+	WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
+}
+
+
 /* can be called with or without local BH being disabled */
 static inline __u16	inet_getid(struct inet_peer *p, int more)
 {
 	more++;
+	inet_peer_refcheck(p);
 	return atomic_add_return(more, &p->ip_id_count) - more;
 }
 
diff --git a/include/net/ip.h b/include/net/ip.h
index 452f229..890f972 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -61,11 +61,14 @@
 struct ip_ra_chain {
 	struct ip_ra_chain	*next;
 	struct sock		*sk;
-	void			(*destructor)(struct sock *);
+	union {
+		void			(*destructor)(struct sock *);
+		struct sock		*saved_sk;
+	};
+	struct rcu_head		rcu;
 };
 
 extern struct ip_ra_chain *ip_ra_chain;
-extern rwlock_t ip_ra_lock;
 
 /* IP flags. */
 #define IP_CE		0x8000		/* Flag: "Congestion"		*/
@@ -162,12 +165,12 @@
 };
 
 extern struct ipv4_config ipv4_config;
-#define IP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.ip_statistics, field)
-#define IP_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.ip_statistics, field)
-#define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.ip_statistics, field, val)
-#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS_BH((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS_BH((net)->mib.ip_statistics, field, val)
+#define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
+#define IP_INC_STATS_BH(net, field)	SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
+#define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
+#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
+#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
 #define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
 #define NET_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
 #define NET_INC_STATS_USER(net, field) 	SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
@@ -175,7 +178,15 @@
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
 extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
-extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize);
+#if BITS_PER_LONG==32
+extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+#else
+static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
+{
+	return snmp_fold_field(mib, offt);
+}
+#endif
+extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
 extern void snmp_mib_free(void __percpu *ptr[2]);
 
 extern struct local_ports {
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 4b1dc11..062a823 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -84,13 +84,11 @@
 struct fib6_table;
 
 struct rt6_info {
-	union {
-		struct dst_entry	dst;
-	} u;
+	struct dst_entry		dst;
 
-#define rt6i_dev			u.dst.dev
-#define rt6i_nexthop			u.dst.neighbour
-#define rt6i_expires			u.dst.expires
+#define rt6i_dev			dst.dev
+#define rt6i_nexthop			dst.neighbour
+#define rt6i_expires			dst.expires
 
 	/*
 	 * Tail elements of dst_entry (__refcnt etc.)
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fbf9d1c..fc94ec5 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -27,6 +27,6 @@
 	__u8 type;		/* type-code for option         */
 	__u8 length;		/* option length                */
 	__u8 encap_limit;	/* tunnel encapsulation limit   */
-} __attribute__ ((packed));
+} __packed;
 
 #endif
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 11e8513..65caea8 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -50,7 +50,7 @@
 	int pkt_len = skb->len - skb_transport_offset(skb);		\
 									\
 	skb->ip_summed = CHECKSUM_NONE;					\
-	ip_select_ident(iph, &rt->u.dst, NULL);				\
+	ip_select_ident(iph, &rt->dst, NULL);				\
 									\
 	err = ip_local_out(skb);					\
 	if (likely(net_xmit_eval(err) == 0)) {				\
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 2600b69..1f84124 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -136,17 +136,17 @@
 /* MIBs */
 
 #define IP6_INC_STATS(net, idev,field)		\
-		_DEVINC(net, ipv6, , idev, field)
+		_DEVINC(net, ipv6, 64, idev, field)
 #define IP6_INC_STATS_BH(net, idev,field)	\
-		_DEVINC(net, ipv6, _BH, idev, field)
+		_DEVINC(net, ipv6, 64_BH, idev, field)
 #define IP6_ADD_STATS(net, idev,field,val)	\
-		_DEVADD(net, ipv6, , idev, field, val)
+		_DEVADD(net, ipv6, 64, idev, field, val)
 #define IP6_ADD_STATS_BH(net, idev,field,val)	\
-		_DEVADD(net, ipv6, _BH, idev, field, val)
+		_DEVADD(net, ipv6, 64_BH, idev, field, val)
 #define IP6_UPD_PO_STATS(net, idev,field,val)   \
-		_DEVUPD(net, ipv6, , idev, field, val)
+		_DEVUPD(net, ipv6, 64, idev, field, val)
 #define IP6_UPD_PO_STATS_BH(net, idev,field,val)   \
-		_DEVUPD(net, ipv6, _BH, idev, field, val)
+		_DEVUPD(net, ipv6, 64_BH, idev, field, val)
 #define ICMP6_INC_STATS(net, idev, field)	\
 		_DEVINC(net, icmpv6, , idev, field)
 #define ICMP6_INC_STATS_BH(net, idev, field)	\
@@ -551,6 +551,10 @@
 
 extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
 
+extern struct in6_addr *fl6_update_dst(struct flowi *fl,
+				       const struct ipv6_txoptions *opt,
+				       struct in6_addr *orig);
+
 /*
  *	socket options (ipv6_sockglue.c)
  */
diff --git a/include/net/ipx.h b/include/net/ipx.h
index ef51a66..05d7e4a 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -27,9 +27,9 @@
 #define IPX_MAX_PPROP_HOPS 8
 
 struct ipxhdr {
-	__be16			ipx_checksum __attribute__ ((packed));
+	__be16			ipx_checksum __packed;
 #define IPX_NO_CHECKSUM	cpu_to_be16(0xFFFF)
-	__be16			ipx_pktsize __attribute__ ((packed));
+	__be16			ipx_pktsize __packed;
 	__u8			ipx_tctrl;
 	__u8			ipx_type;
 #define IPX_TYPE_UNKNOWN	0x00
@@ -38,8 +38,8 @@
 #define IPX_TYPE_SPX		0x05	/* SPX protocol */
 #define IPX_TYPE_NCP		0x11	/* $lots for docs on this (SPIT) */
 #define IPX_TYPE_PPROP		0x14	/* complicated flood fill brdcast */
-	struct ipx_address	ipx_dest __attribute__ ((packed));
-	struct ipx_address	ipx_source __attribute__ ((packed));
+	struct ipx_address	ipx_dest __packed;
+	struct ipx_address	ipx_source __packed;
 };
 
 static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index de22cbf..7f256e2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -146,6 +146,7 @@
  *	enabled/disabled (beaconing modes)
  * @BSS_CHANGED_CQM: Connection quality monitor config changed
  * @BSS_CHANGED_IBSS: IBSS join status changed
+ * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed.
  */
 enum ieee80211_bss_change {
 	BSS_CHANGED_ASSOC		= 1<<0,
@@ -160,10 +161,18 @@
 	BSS_CHANGED_BEACON_ENABLED	= 1<<9,
 	BSS_CHANGED_CQM			= 1<<10,
 	BSS_CHANGED_IBSS		= 1<<11,
+	BSS_CHANGED_ARP_FILTER		= 1<<12,
 
 	/* when adding here, make sure to change ieee80211_reconfig */
 };
 
+/*
+ * The maximum number of IPv4 addresses listed for ARP filtering. If the number
+ * of addresses for an interface increase beyond this value, hardware ARP
+ * filtering will be disabled.
+ */
+#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
+
 /**
  * struct ieee80211_bss_conf - holds the BSS's changing parameters
  *
@@ -199,6 +208,15 @@
  * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
  *	implies disabled
  * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
+ * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
+ *	may filter ARP queries targeted for other addresses than listed here.
+ *	The driver must allow ARP queries targeted for all address listed here
+ *	to pass through. An empty list implies no ARP queries need to pass.
+ * @arp_addr_cnt: Number of addresses currently on the list.
+ * @arp_filter_enabled: Enable ARP filtering - if enabled, the hardware may
+ *	filter ARP queries based on the @arp_addr_list, if disabled, the
+ *	hardware must not perform any ARP filtering. Note, that the filter will
+ *	be enabled also in promiscuous mode.
  */
 struct ieee80211_bss_conf {
 	const u8 *bssid;
@@ -219,6 +237,9 @@
 	s32 cqm_rssi_thold;
 	u32 cqm_rssi_hyst;
 	enum nl80211_channel_type channel_type;
+	__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+	u8 arp_addr_cnt;
+	bool arp_filter_enabled;
 };
 
 /**
@@ -312,9 +333,10 @@
 	IEEE80211_TX_INTFL_NL80211_FRAME_TX	= BIT(21),
 	IEEE80211_TX_CTL_LDPC			= BIT(22),
 	IEEE80211_TX_CTL_STBC			= BIT(23) | BIT(24),
-#define IEEE80211_TX_CTL_STBC_SHIFT		23
 };
 
+#define IEEE80211_TX_CTL_STBC_SHIFT		23
+
 /**
  * enum mac80211_rate_control_flags - per-rate flags set by the
  *	Rate Control algorithm.
@@ -412,8 +434,6 @@
  * @driver_data: array of driver_data pointers
  * @ampdu_ack_len: number of acked aggregated frames.
  * 	relevant only if IEEE80211_TX_STAT_AMPDU was set.
- * @ampdu_ack_map: block ack bit map for the aggregation.
- * 	relevant only if IEEE80211_TX_STAT_AMPDU was set.
  * @ampdu_len: number of aggregated frames.
  * 	relevant only if IEEE80211_TX_STAT_AMPDU was set.
  * @ack_signal: signal strength of the ACK frame
@@ -448,10 +468,9 @@
 		struct {
 			struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
 			u8 ampdu_ack_len;
-			u64 ampdu_ack_map;
 			int ack_signal;
 			u8 ampdu_len;
-			/* 7 bytes free */
+			/* 15 bytes free */
 		} status;
 		struct {
 			struct ieee80211_tx_rate driver_rates[
@@ -676,9 +695,6 @@
  * @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the
  *	powersave documentation below. This variable is valid only when
  *	the CONF_PS flag is set.
- * @dynamic_ps_forced_timeout: The dynamic powersave timeout (in ms) configured
- *	by cfg80211 (essentially, wext) If set, this value overrules the value
- *	chosen by mac80211 based on ps qos network latency.
  *
  * @power_level: requested transmit power (in dBm)
  *
@@ -698,7 +714,7 @@
  */
 struct ieee80211_conf {
 	u32 flags;
-	int power_level, dynamic_ps_timeout, dynamic_ps_forced_timeout;
+	int power_level, dynamic_ps_timeout;
 	int max_sleep_period;
 
 	u16 listen_interval;
@@ -815,7 +831,6 @@
  *	encrypted in hardware.
  * @alg: The key algorithm.
  * @flags: key flags, see &enum ieee80211_key_flags.
- * @ap_addr: AP's MAC address
  * @keyidx: the key index (0-3)
  * @keylen: key material length
  * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte)
@@ -881,16 +896,12 @@
  * enum sta_notify_cmd - sta notify command
  *
  * Used with the sta_notify() callback in &struct ieee80211_ops, this
- * indicates addition and removal of a station to station table,
- * or if a associated station made a power state transition.
+ * indicates if an associated station made a power state transition.
  *
- * @STA_NOTIFY_ADD: (DEPRECATED) a station was added to the station table
- * @STA_NOTIFY_REMOVE: (DEPRECATED) a station being removed from the station table
  * @STA_NOTIFY_SLEEP: a station is now sleeping
  * @STA_NOTIFY_AWAKE: a sleeping station woke up
  */
 enum sta_notify_cmd {
-	STA_NOTIFY_ADD, STA_NOTIFY_REMOVE,
 	STA_NOTIFY_SLEEP, STA_NOTIFY_AWAKE,
 };
 
@@ -1260,6 +1271,15 @@
  * dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS
  * enabled whenever user has enabled powersave.
  *
+ * Some hardware need to toggle a single shared antenna between WLAN and
+ * Bluetooth to facilitate co-existence. These types of hardware set
+ * limitations on the use of host controlled dynamic powersave whenever there
+ * is simultaneous WLAN and Bluetooth traffic. For these types of hardware, the
+ * driver may request temporarily going into full power save, in order to
+ * enable toggling the antenna between BT and WLAN. If the driver requests
+ * disabling dynamic powersave, the @dynamic_ps_timeout value will be
+ * temporarily set to zero until the driver re-enables dynamic powersave.
+ *
  * Driver informs U-APSD client support by enabling
  * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
  * uapsd paramater in conf_tx() operation. Hardware needs to send the QoS
@@ -1451,7 +1471,7 @@
  *
  * Note that drivers MUST be able to deal with a TX aggregation
  * session being stopped even before they OK'ed starting it by
- * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer
+ * calling ieee80211_start_tx_ba_cb_irqsafe, because the peer
  * might receive the addBA frame and send a delBA right away!
  *
  * @IEEE80211_AMPDU_RX_START: start Rx aggregation
@@ -1636,7 +1656,7 @@
  * 	is the first frame we expect to perform the action on. Notice
  * 	that TX/RX_STOP can pass NULL for this parameter.
  *	Returns a negative error code on failure.
- *	The callback must be atomic.
+ *	The callback can sleep.
  *
  * @get_survey: Return per-channel survey information
  *
@@ -2307,25 +2327,14 @@
 int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
 
 /**
- * ieee80211_start_tx_ba_cb - low level driver ready to aggregate.
- * @vif: &struct ieee80211_vif pointer from the add_interface callback
- * @ra: receiver address of the BA session recipient.
- * @tid: the TID to BA on.
- *
- * This function must be called by low level driver once it has
- * finished with preparations for the BA session.
- */
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
-
-/**
  * ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate.
  * @vif: &struct ieee80211_vif pointer from the add_interface callback
  * @ra: receiver address of the BA session recipient.
  * @tid: the TID to BA on.
  *
  * This function must be called by low level driver once it has
- * finished with preparations for the BA session.
- * This version of the function is IRQ-safe.
+ * finished with preparations for the BA session. It can be called
+ * from any context.
  */
 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
 				      u16 tid);
@@ -2334,27 +2343,14 @@
  * ieee80211_stop_tx_ba_session - Stop a Block Ack session.
  * @sta: the station whose BA session to stop
  * @tid: the TID to stop BA.
- * @initiator: if indicates initiator DELBA frame will be sent.
  *
- * Return: error if no sta with matching da found, success otherwise
+ * Return: negative error if the TID is invalid, or no aggregation active
  *
  * Although mac80211/low level driver/user space application can estimate
  * the need to stop aggregation on a certain RA/TID, the session level
  * will be managed by the mac80211.
  */
-int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid,
-				 enum ieee80211_back_parties initiator);
-
-/**
- * ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate.
- * @vif: &struct ieee80211_vif pointer from the add_interface callback
- * @ra: receiver address of the BA session recipient.
- * @tid: the desired TID to BA on.
- *
- * This function must be called by low level driver once it has
- * finished with preparations for the BA session tear down.
- */
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
 
 /**
  * ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate.
@@ -2363,8 +2359,8 @@
  * @tid: the desired TID to BA on.
  *
  * This function must be called by low level driver once it has
- * finished with preparations for the BA session tear down.
- * This version of the function is IRQ-safe.
+ * finished with preparations for the BA session tear down. It
+ * can be called from any context.
  */
 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
 				     u16 tid);
@@ -2460,6 +2456,36 @@
 void ieee80211_connection_loss(struct ieee80211_vif *vif);
 
 /**
+ * ieee80211_disable_dyn_ps - force mac80211 to temporarily disable dynamic psm
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Some hardware require full power save to manage simultaneous BT traffic
+ * on the WLAN frequency. Full PSM is required periodically, whenever there are
+ * burst of BT traffic. The hardware gets information of BT traffic via
+ * hardware co-existence lines, and consequentially requests mac80211 to
+ * (temporarily) enter full psm.
+ * This function will only temporarily disable dynamic PS, not enable PSM if
+ * it was not already enabled.
+ * The driver must make sure to re-enable dynamic PS using
+ * ieee80211_enable_dyn_ps() if the driver has disabled it.
+ *
+ */
+void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_enable_dyn_ps - restore dynamic psm after being disabled
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * This function restores dynamic PS after being temporarily disabled via
+ * ieee80211_disable_dyn_ps(). Each ieee80211_disable_dyn_ps() call must
+ * be coupled with an eventual call to this function.
+ *
+ */
+void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif);
+
+/**
  * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
  *	rssi threshold triggered
  *
diff --git a/include/net/mip6.h b/include/net/mip6.h
index a83ad19..26ba99b 100644
--- a/include/net/mip6.h
+++ b/include/net/mip6.h
@@ -39,7 +39,7 @@
 	__u16	ip6mh_cksum;
 	/* Followed by type specific messages */
 	__u8	data[0];
-} __attribute__ ((__packed__));
+} __packed;
 
 #define IP6_MH_TYPE_BRR		0   /* Binding Refresh Request */
 #define IP6_MH_TYPE_HOTI	1   /* HOTI Message   */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index f76f22d..895997b 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -82,7 +82,7 @@
 struct nd_opt_hdr {
 	__u8		nd_opt_type;
 	__u8		nd_opt_len;
-} __attribute__((__packed__));
+} __packed;
 
 
 extern int			ndisc_init(void);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index eb21340..242879b 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -151,7 +151,7 @@
 	void			(*proxy_redo)(struct sk_buff *skb);
 	char			*id;
 	struct neigh_parms	parms;
-	/* HACK. gc_* shoul follow parms without a gap! */
+	/* HACK. gc_* should follow parms without a gap! */
 	int			gc_interval;
 	int			gc_thresh1;
 	int			gc_thresh2;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index bde095f..e624dae 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -152,11 +152,7 @@
 
 static inline struct net *nf_ct_net(const struct nf_conn *ct)
 {
-#ifdef CONFIG_NET_NS
-	return ct->ct_net;
-#else
-	return &init_net;
-#endif
+	return read_pnet(&ct->ct_net);
 }
 
 /* Alter reply tuple (maybe alter helper). */
@@ -261,7 +257,12 @@
 			       u32 seq);
 
 /* Fake conntrack entry for untracked connections */
-extern struct nf_conn nf_conntrack_untracked;
+DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+static inline struct nf_conn *nf_ct_untracked_get(void)
+{
+	return &__raw_get_cpu_var(nf_conntrack_untracked);
+}
+extern void nf_ct_untracked_status_or(unsigned long bits);
 
 /* Iterate over all conntracks: if iter returns true, it's deleted. */
 extern void
@@ -289,9 +290,9 @@
 	return test_bit(IPS_DYING_BIT, &ct->status);
 }
 
-static inline int nf_ct_is_untracked(const struct sk_buff *skb)
+static inline int nf_ct_is_untracked(const struct nf_conn *ct)
 {
-	return (skb->nfct == &nf_conntrack_untracked.ct_general);
+	return test_bit(IPS_UNTRACKED_BIT, &ct->status);
 }
 
 extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 03e218f..4e9c63a 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -45,6 +45,18 @@
 extern unsigned int
 seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir);
 
+/* Check if connection tracking accounting is enabled */
+static inline bool nf_ct_acct_enabled(struct net *net)
+{
+	return net->ct.sysctl_acct != 0;
+}
+
+/* Enable/disable connection tracking accounting */
+static inline void nf_ct_set_acct(struct net *net, bool enable)
+{
+	net->ct.sysctl_acct = enable;
+}
+
 extern int nf_conntrack_acct_init(struct net *net);
 extern void nf_conntrack_acct_fini(struct net *net);
 
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 3d7524f..aced085 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -60,7 +60,7 @@
 	struct nf_conn *ct = (struct nf_conn *)skb->nfct;
 	int ret = NF_ACCEPT;
 
-	if (ct && ct != &nf_conntrack_untracked) {
+	if (ct && !nf_ct_is_untracked(ct)) {
 		if (!nf_ct_is_confirmed(ct))
 			ret = __nf_conntrack_confirm(skb);
 		if (likely(ret == NF_ACCEPT))
diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h
index e4a18ae..2890bdc 100644
--- a/include/net/netfilter/nf_nat_rule.h
+++ b/include/net/netfilter/nf_nat_rule.h
@@ -12,6 +12,4 @@
 			    const struct net_device *out,
 			    struct nf_conn *ct);
 
-extern unsigned int
-alloc_null_binding(struct nf_conn *ct, unsigned int hooknum);
 #endif /* _NF_NAT_RULE_H */
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index ddbf37e..5a2978d 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -2,13 +2,18 @@
 #define _XT_RATEEST_H
 
 struct xt_rateest {
+	/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
+	struct gnet_stats_basic_packed	bstats;
+	spinlock_t			lock;
+	/* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
+	struct gnet_stats_rate_est	rstats;
+
+	/* following fields not accessed in hot path */
 	struct hlist_node		list;
 	char				name[IFNAMSIZ];
 	unsigned int			refcnt;
-	spinlock_t			lock;
 	struct gnet_estimator		params;
-	struct gnet_stats_rate_est	rstats;
-	struct gnet_stats_basic_packed	bstats;
+	struct rcu_head			rcu;
 };
 
 extern struct xt_rateest *xt_rateest_lookup(const char *name);
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 4fc05b5..f3b201d 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -35,7 +35,7 @@
  *   nlmsg_new()			create a new netlink message
  *   nlmsg_put()			add a netlink message to an skb
  *   nlmsg_put_answer()			callback based nlmsg_put()
- *   nlmsg_end()			finanlize netlink message
+ *   nlmsg_end()			finalize netlink message
  *   nlmsg_get_pos()			return current position in message
  *   nlmsg_trim()			trim part of message
  *   nlmsg_cancel()			cancel message construction
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index d7b989c..2d16783 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -34,6 +34,7 @@
 	struct list_head list;
 	struct net_device *netdev;
 	DECLARE_BITMAP(addrs, 64);
+	struct rcu_head	rcu;
 };
 
 int phonet_device_init(void);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 9d4d87c..d9549af 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -95,7 +95,7 @@
 
 static inline void qdisc_run(struct Qdisc *q)
 {
-	if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
+	if (qdisc_run_begin(q))
 		__qdisc_run(q);
 }
 
diff --git a/include/net/route.h b/include/net/route.h
index af6cf4b..bd732d6 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -50,9 +50,7 @@
 struct fib_nh;
 struct inet_peer;
 struct rtable {
-	union {
-		struct dst_entry	dst;
-	} u;
+	struct dst_entry	dst;
 
 	/* Cache lookup keys */
 	struct flowi		fl;
@@ -144,7 +142,7 @@
 static inline void ip_rt_put(struct rtable * rt)
 {
 	if (rt)
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 }
 
 #define IPTOS_RT_MASK	(IPTOS_TOS_MASK & ~3)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 433604b..3c8728a 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -23,11 +23,17 @@
 };
 
 enum qdisc_state_t {
-	__QDISC_STATE_RUNNING,
 	__QDISC_STATE_SCHED,
 	__QDISC_STATE_DEACTIVATED,
 };
 
+/*
+ * following bits are only changed while qdisc lock is held
+ */
+enum qdisc___state_t {
+	__QDISC___STATE_RUNNING,
+};
+
 struct qdisc_size_table {
 	struct list_head	list;
 	struct tc_sizespec	szopts;
@@ -72,10 +78,27 @@
 	unsigned long		state;
 	struct sk_buff_head	q;
 	struct gnet_stats_basic_packed bstats;
+	unsigned long		__state;
 	struct gnet_stats_queue	qstats;
-	struct rcu_head     rcu_head;
+	struct rcu_head		rcu_head;
+	spinlock_t		busylock;
 };
 
+static inline bool qdisc_is_running(struct Qdisc *qdisc)
+{
+	return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+}
+
+static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+{
+	return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+}
+
+static inline void qdisc_run_end(struct Qdisc *qdisc)
+{
+	__clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+}
+
 struct Qdisc_class_ops {
 	/* Child qdisc manipulation */
 	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
@@ -583,9 +606,16 @@
 }
 
 #ifdef CONFIG_NET_CLS_ACT
-static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
+static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
+					    int action)
 {
-	struct sk_buff *n = skb_clone(skb, gfp_mask);
+	struct sk_buff *n;
+
+	if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) &&
+	    !skb_shared(skb))
+		n = skb_get(skb);
+	else
+		n = skb_clone(skb, gfp_mask);
 
 	if (n) {
 		n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
diff --git a/include/net/scm.h b/include/net/scm.h
index 8360e47..3165650 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -19,8 +19,10 @@
 };
 
 struct scm_cookie {
-	struct ucred		creds;		/* Skb credentials	*/
+	struct pid		*pid;		/* Skb credentials */
+	const struct cred	*cred;
 	struct scm_fp_list	*fp;		/* Passed files		*/
+	struct ucred		creds;		/* Skb credentials	*/
 #ifdef CONFIG_SECURITY_NETWORK
 	u32			secid;		/* Passed security ID 	*/
 #endif
@@ -42,8 +44,27 @@
 { }
 #endif /* CONFIG_SECURITY_NETWORK */
 
+static __inline__ void scm_set_cred(struct scm_cookie *scm,
+				    struct pid *pid, const struct cred *cred)
+{
+	scm->pid  = get_pid(pid);
+	scm->cred = get_cred(cred);
+	cred_to_ucred(pid, cred, &scm->creds);
+}
+
+static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
+{
+	put_pid(scm->pid);
+	scm->pid  = NULL;
+
+	if (scm->cred)
+		put_cred(scm->cred);
+	scm->cred = NULL;
+}
+
 static __inline__ void scm_destroy(struct scm_cookie *scm)
 {
+	scm_destroy_cred(scm);
 	if (scm && scm->fp)
 		__scm_destroy(scm);
 }
@@ -51,10 +72,7 @@
 static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
 			       struct scm_cookie *scm)
 {
-	struct task_struct *p = current;
-	scm->creds.uid = current_uid();
-	scm->creds.gid = current_gid();
-	scm->creds.pid = task_tgid_vnr(p);
+	scm_set_cred(scm, task_tgid(current), current_cred());
 	scm->fp = NULL;
 	unix_get_peersec_dgram(sock, scm);
 	if (msg->msg_controllen <= 0)
@@ -96,6 +114,8 @@
 	if (test_bit(SOCK_PASSCRED, &sock->flags))
 		put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds);
 
+	scm_destroy_cred(scm);
+
 	scm_passec(sock, msg, scm);
 
 	if (!scm->fp)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 4b86011..f9e7473 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -443,7 +443,7 @@
 	__u8 signature[SCTP_SECRET_SIZE];
 	__u32 __pad;		/* force sctp_cookie alignment to 64 bits */
 	struct sctp_cookie c;
-} __attribute__((packed));
+} __packed;
 
 /* This is another convenience type to allocate memory for address
  * params for the maximum size and pass such structures around
@@ -488,7 +488,7 @@
 	union sctp_addr daddr;
 	unsigned long sent_at;
 	__u64 hb_nonce;
-} __attribute__((packed)) sctp_sender_hb_info_t;
+} __packed sctp_sender_hb_info_t;
 
 /*
  *  RFC 2960 1.3.2 Sequenced Delivery within Streams
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 899003d..a0e6180 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -47,15 +47,16 @@
 }
 
 /*
- * We use all unsigned longs. Linux will soon be so reliable that even 
- * these will rapidly get too small 8-). Seriously consider the IpInReceives 
- * count on the 20Gb/s + networks people expect in a few years time!
+ * We use unsigned longs for most mibs but u64 for ipstats.
  */
+#include <linux/u64_stats_sync.h>
 
 /* IPstats */
 #define IPSTATS_MIB_MAX	__IPSTATS_MIB_MAX
 struct ipstats_mib {
-	unsigned long	mibs[IPSTATS_MIB_MAX];
+	/* mibs[] must be first field of struct ipstats_mib */
+	u64		mibs[IPSTATS_MIB_MAX];
+	struct u64_stats_sync syncp;
 };
 
 /* ICMP */
@@ -155,4 +156,70 @@
 		ptr->mibs[basefield##PKTS]++; \
 		ptr->mibs[basefield##OCTETS] += addend;\
 	} while (0)
+
+
+#if BITS_PER_LONG==32
+
+#define SNMP_ADD_STATS64_BH(mib, field, addend) 			\
+	do {								\
+		__typeof__(*mib[0]) *ptr = __this_cpu_ptr((mib)[0]);	\
+		u64_stats_update_begin(&ptr->syncp);			\
+		ptr->mibs[field] += addend;				\
+		u64_stats_update_end(&ptr->syncp);			\
+	} while (0)
+#define SNMP_ADD_STATS64_USER(mib, field, addend) 			\
+	do {								\
+		__typeof__(*mib[0]) *ptr;				\
+		preempt_disable();					\
+		ptr = __this_cpu_ptr((mib)[1]);				\
+		u64_stats_update_begin(&ptr->syncp);			\
+		ptr->mibs[field] += addend;				\
+		u64_stats_update_end(&ptr->syncp);			\
+		preempt_enable();					\
+	} while (0)
+#define SNMP_ADD_STATS64(mib, field, addend)				\
+	do {								\
+		__typeof__(*mib[0]) *ptr;				\
+		preempt_disable();					\
+		ptr = __this_cpu_ptr((mib)[!in_softirq()]);		\
+		u64_stats_update_begin(&ptr->syncp);			\
+		ptr->mibs[field] += addend;				\
+		u64_stats_update_end(&ptr->syncp);			\
+		preempt_enable();					\
+	} while (0)
+#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
+#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
+#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend)			\
+	do {								\
+		__typeof__(*mib[0]) *ptr;				\
+		preempt_disable();					\
+		ptr = __this_cpu_ptr((mib)[!in_softirq()]);		\
+		u64_stats_update_begin(&ptr->syncp);			\
+		ptr->mibs[basefield##PKTS]++;				\
+		ptr->mibs[basefield##OCTETS] += addend;			\
+		u64_stats_update_end(&ptr->syncp);			\
+		preempt_enable();					\
+	} while (0)
+#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend)			\
+	do {								\
+		__typeof__(*mib[0]) *ptr;				\
+		ptr = __this_cpu_ptr((mib)[!in_softirq()]);		\
+		u64_stats_update_begin(&ptr->syncp);			\
+		ptr->mibs[basefield##PKTS]++;				\
+		ptr->mibs[basefield##OCTETS] += addend;			\
+		u64_stats_update_end(&ptr->syncp);			\
+	} while (0)
+#else
+#define SNMP_INC_STATS64_BH(mib, field)		SNMP_INC_STATS_BH(mib, field)
+#define SNMP_INC_STATS64_USER(mib, field)	SNMP_INC_STATS_USER(mib, field)
+#define SNMP_INC_STATS64(mib, field)		SNMP_INC_STATS(mib, field)
+#define SNMP_DEC_STATS64(mib, field)		SNMP_DEC_STATS(mib, field)
+#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend)
+#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
+#define SNMP_ADD_STATS64(mib, field, addend)	SNMP_ADD_STATS(mib, field, addend)
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
+#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend)
+#endif
+
 #endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 731150d5..4f26f2f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -295,7 +295,8 @@
 	unsigned short		sk_ack_backlog;
 	unsigned short		sk_max_ack_backlog;
 	__u32			sk_priority;
-	struct ucred		sk_peercred;
+	struct pid		*sk_peer_pid;
+	const struct cred	*sk_peer_cred;
 	long			sk_rcvtimeo;
 	long			sk_sndtimeo;
 	struct sk_filter      	*sk_filter;
@@ -1711,19 +1712,13 @@
 static inline
 struct net *sock_net(const struct sock *sk)
 {
-#ifdef CONFIG_NET_NS
-	return sk->sk_net;
-#else
-	return &init_net;
-#endif
+	return read_pnet(&sk->sk_net);
 }
 
 static inline
 void sock_net_set(struct sock *sk, struct net *net)
 {
-#ifdef CONFIG_NET_NS
-	sk->sk_net = net;
-#endif
+	write_pnet(&sk->sk_net, net);
 }
 
 /*
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a144914..c2f96c2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -464,7 +464,7 @@
 				     __u16 *mss);
 
 extern __u32 cookie_init_timestamp(struct request_sock *req);
-extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
+extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
 
 /* From net/ipv6/syncookies.c */
 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
@@ -602,6 +602,17 @@
  */
 #define tcp_time_stamp		((__u32)(jiffies))
 
+#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
+
+#define TCPHDR_FIN 0x01
+#define TCPHDR_SYN 0x02
+#define TCPHDR_RST 0x04
+#define TCPHDR_PSH 0x08
+#define TCPHDR_ACK 0x10
+#define TCPHDR_URG 0x20
+#define TCPHDR_ECE 0x40
+#define TCPHDR_CWR 0x80
+
 /* This is what the send packet queuing engine uses to pass
  * TCP per-packet control information to the transmission
  * code.  We also store the host-order sequence numbers in
@@ -620,19 +631,6 @@
 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 	__u32		when;		/* used to compute rtt's	*/
 	__u8		flags;		/* TCP header flags.		*/
-
-	/* NOTE: These must match up to the flags byte in a
-	 *       real TCP header.
-	 */
-#define TCPCB_FLAG_FIN		0x01
-#define TCPCB_FLAG_SYN		0x02
-#define TCPCB_FLAG_RST		0x04
-#define TCPCB_FLAG_PSH		0x08
-#define TCPCB_FLAG_ACK		0x10
-#define TCPCB_FLAG_URG		0x20
-#define TCPCB_FLAG_ECE		0x40
-#define TCPCB_FLAG_CWR		0x80
-
 	__u8		sacked;		/* State flags for SACK/FACK.	*/
 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
@@ -1413,7 +1411,8 @@
 	sa_family_t		family;
 	enum tcp_seq_states	state;
 	struct sock		*syn_wait_sk;
-	int			bucket, sbucket, num, uid;
+	int			bucket, offset, sbucket, num, uid;
+	loff_t			last_pos;
 };
 
 extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index b69e6e1..9b2c308 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -65,7 +65,7 @@
 	};
 	__be16		serviceId;	/* service ID */
 
-} __attribute__((packed));
+} __packed;
 
 #define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X)
 
@@ -120,7 +120,7 @@
 #define RXRPC_ACK_TYPE_NACK		0
 #define RXRPC_ACK_TYPE_ACK		1
 
-} __attribute__((packed));
+} __packed;
 
 /*
  * ACK packets can have a further piece of information tagged on the end
@@ -141,7 +141,7 @@
 	__be32		nonce;		/* encrypted random number */
 	__be32		min_level;	/* minimum security level */
 	__be32		__padding;	/* padding to 8-byte boundary */
-} __attribute__((packed));
+} __packed;
 
 /*****************************************************************************/
 /*
@@ -164,7 +164,7 @@
 
 	__be32		kvno;		/* Kerberos key version number */
 	__be32		ticket_len;	/* Kerberos ticket length  */
-} __attribute__((packed));
+} __packed;
 
 /*****************************************************************************/
 /*
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index b2d70d3..2591583 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -9,6 +9,7 @@
 #include <linux/nsproxy.h>
 #include <linux/slab.h>
 #include <linux/user_namespace.h>
+#include <linux/highuid.h>
 #include <linux/cred.h>
 
 /*
@@ -82,3 +83,46 @@
 	schedule_work(&ns->destroyer);
 }
 EXPORT_SYMBOL(free_user_ns);
+
+uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid)
+{
+	struct user_namespace *tmp;
+
+	if (likely(to == cred->user->user_ns))
+		return uid;
+
+
+	/* Is cred->user the creator of the target user_ns
+	 * or the creator of one of it's parents?
+	 */
+	for ( tmp = to; tmp != &init_user_ns;
+	      tmp = tmp->creator->user_ns ) {
+		if (cred->user == tmp->creator) {
+			return (uid_t)0;
+		}
+	}
+
+	/* No useful relationship so no mapping */
+	return overflowuid;
+}
+
+gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid)
+{
+	struct user_namespace *tmp;
+
+	if (likely(to == cred->user->user_ns))
+		return gid;
+
+	/* Is cred->user the creator of the target user_ns
+	 * or the creator of one of it's parents?
+	 */
+	for ( tmp = to; tmp != &init_user_ns;
+	      tmp = tmp->creator->user_ns ) {
+		if (cred->user == tmp->creator) {
+			return (gid_t)0;
+		}
+	}
+
+	/* No useful relationship so no mapping */
+	return overflowgid;
+}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b8a2f54..4ee19d0 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -980,6 +980,11 @@
  *             [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
  *           little endian output byte order is:
  *             [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
+ * - 'V' For a struct va_format which contains a format string * and va_list *,
+ *       call vsnprintf(->format, *->va_list).
+ *       Implements a "recursive vsnprintf".
+ *       Do not use this feature without some mechanism to verify the
+ *       correctness of the format string and va_list arguments.
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
@@ -1025,6 +1030,10 @@
 		break;
 	case 'U':
 		return uuid_string(buf, end, ptr, spec, fmt);
+	case 'V':
+		return buf + vsnprintf(buf, end - buf,
+				       ((struct va_format *)ptr)->fmt,
+				       *(((struct va_format *)ptr)->va));
 	}
 	spec.flags |= SMALL;
 	if (spec.field_width == -1) {
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 6abdcac..8d9503a 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -2,6 +2,7 @@
 #define __BEN_VLAN_802_1Q_INC__
 
 #include <linux/if_vlan.h>
+#include <linux/u64_stats_sync.h>
 
 
 /**
@@ -21,14 +22,16 @@
  *	struct vlan_rx_stats - VLAN percpu rx stats
  *	@rx_packets: number of received packets
  *	@rx_bytes: number of received bytes
- *	@multicast: number of received multicast packets
+ *	@rx_multicast: number of received multicast packets
+ *	@syncp: synchronization point for 64bit counters
  *	@rx_errors: number of errors
  */
 struct vlan_rx_stats {
-	unsigned long rx_packets;
-	unsigned long rx_bytes;
-	unsigned long multicast;
-	unsigned long rx_errors;
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			rx_multicast;
+	struct u64_stats_sync	syncp;
+	unsigned long		rx_errors;
 };
 
 /**
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 50f58f5..1b9406a 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -41,9 +41,9 @@
 	skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
 	skb->vlan_tci = 0;
 
-	rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
-			       smp_processor_id());
+	rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats);
 
+	u64_stats_update_begin(&rx_stats->syncp);
 	rx_stats->rx_packets++;
 	rx_stats->rx_bytes += skb->len;
 
@@ -51,7 +51,7 @@
 	case PACKET_BROADCAST:
 		break;
 	case PACKET_MULTICAST:
-		rx_stats->multicast++;
+		rx_stats->rx_multicast++;
 		break;
 	case PACKET_OTHERHOST:
 		/* Our lower layer thinks this is not local, let's make sure.
@@ -62,6 +62,7 @@
 			skb->pkt_type = PACKET_HOST;
 		break;
 	}
+	u64_stats_update_end(&rx_stats->syncp);
 	return 0;
 }
 
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 5298426..7865a4c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -166,6 +166,7 @@
 
 	rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
 			       smp_processor_id());
+	u64_stats_update_begin(&rx_stats->syncp);
 	rx_stats->rx_packets++;
 	rx_stats->rx_bytes += skb->len;
 
@@ -182,7 +183,7 @@
 		break;
 
 	case PACKET_MULTICAST:
-		rx_stats->multicast++;
+		rx_stats->rx_multicast++;
 		break;
 
 	case PACKET_OTHERHOST:
@@ -197,6 +198,7 @@
 	default:
 		break;
 	}
+	u64_stats_update_end(&rx_stats->syncp);
 
 	vlan_set_encap_proto(skb, vhdr);
 
@@ -801,27 +803,35 @@
 	return dev_ethtool_get_flags(vlan->real_dev);
 }
 
-static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
-	struct net_device_stats *stats = &dev->stats;
-
-	dev_txq_stats_fold(dev, stats);
+	dev_txq_stats_fold(dev, (struct net_device_stats *)stats);
 
 	if (vlan_dev_info(dev)->vlan_rx_stats) {
-		struct vlan_rx_stats *p, rx = {0};
+		struct vlan_rx_stats *p, accum = {0};
 		int i;
 
 		for_each_possible_cpu(i) {
+			u64 rxpackets, rxbytes, rxmulticast;
+			unsigned int start;
+
 			p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
-			rx.rx_packets += p->rx_packets;
-			rx.rx_bytes   += p->rx_bytes;
-			rx.rx_errors  += p->rx_errors;
-			rx.multicast  += p->multicast;
+			do {
+				start = u64_stats_fetch_begin_bh(&p->syncp);
+				rxpackets	= p->rx_packets;
+				rxbytes		= p->rx_bytes;
+				rxmulticast	= p->rx_multicast;
+			} while (u64_stats_fetch_retry_bh(&p->syncp, start));
+			accum.rx_packets += rxpackets;
+			accum.rx_bytes   += rxbytes;
+			accum.rx_multicast += rxmulticast;
+			/* rx_errors is an ulong, not protected by syncp */
+			accum.rx_errors  += p->rx_errors;
 		}
-		stats->rx_packets = rx.rx_packets;
-		stats->rx_bytes   = rx.rx_bytes;
-		stats->rx_errors  = rx.rx_errors;
-		stats->multicast  = rx.multicast;
+		stats->rx_packets = accum.rx_packets;
+		stats->rx_bytes   = accum.rx_bytes;
+		stats->rx_errors  = accum.rx_errors;
+		stats->multicast  = accum.rx_multicast;
 	}
 	return stats;
 }
@@ -848,7 +858,7 @@
 	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
 	.ndo_do_ioctl		= vlan_dev_ioctl,
 	.ndo_neigh_setup	= vlan_dev_neigh_setup,
-	.ndo_get_stats		= vlan_dev_get_stats,
+	.ndo_get_stats64	= vlan_dev_get_stats64,
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
 	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
@@ -872,7 +882,7 @@
 	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
 	.ndo_do_ioctl		= vlan_dev_ioctl,
 	.ndo_neigh_setup	= vlan_dev_neigh_setup,
-	.ndo_get_stats		= vlan_dev_get_stats,
+	.ndo_get_stats64	= vlan_dev_get_stats64,
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
 	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
@@ -897,7 +907,7 @@
 	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
 	.ndo_do_ioctl		= vlan_dev_ioctl,
 	.ndo_neigh_setup	= vlan_dev_neigh_setup,
-	.ndo_get_stats		= vlan_dev_get_stats,
+	.ndo_get_stats64	= vlan_dev_get_stats64,
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
 	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
@@ -922,7 +932,7 @@
 	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
 	.ndo_do_ioctl		= vlan_dev_ioctl,
 	.ndo_neigh_setup	= vlan_dev_neigh_setup,
-	.ndo_get_stats		= vlan_dev_get_stats,
+	.ndo_get_stats64	= vlan_dev_get_stats64,
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
 	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index afead35..80e280f 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -278,25 +278,27 @@
 {
 	struct net_device *vlandev = (struct net_device *) seq->private;
 	const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
-	const struct net_device_stats *stats;
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *stats;
 	static const char fmt[] = "%30s %12lu\n";
+	static const char fmt64[] = "%30s %12llu\n";
 	int i;
 
 	if (!is_vlan_dev(vlandev))
 		return 0;
 
-	stats = dev_get_stats(vlandev);
+	stats = dev_get_stats(vlandev, &temp);
 	seq_printf(seq,
 		   "%s  VID: %d	 REORDER_HDR: %i  dev->priv_flags: %hx\n",
 		   vlandev->name, dev_info->vlan_id,
 		   (int)(dev_info->flags & 1), vlandev->priv_flags);
 
-	seq_printf(seq, fmt, "total frames received", stats->rx_packets);
-	seq_printf(seq, fmt, "total bytes received", stats->rx_bytes);
-	seq_printf(seq, fmt, "Broadcast/Multicast Rcvd", stats->multicast);
+	seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
+	seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
+	seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast);
 	seq_puts(seq, "\n");
-	seq_printf(seq, fmt, "total frames transmitted", stats->tx_packets);
-	seq_printf(seq, fmt, "total bytes transmitted", stats->tx_bytes);
+	seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
+	seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
 	seq_printf(seq, fmt, "total headroom inc",
 		   dev_info->cnt_inc_headroom_on_tx);
 	seq_printf(seq, fmt, "total encap on xmit",
diff --git a/net/Makefile b/net/Makefile
index cb7bdc1..41d4200 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -50,7 +50,7 @@
 obj-$(CONFIG_IP_DCCP)		+= dccp/
 obj-$(CONFIG_IP_SCTP)		+= sctp/
 obj-$(CONFIG_RDS)		+= rds/
-obj-y				+= wireless/
+obj-$(CONFIG_WIRELESS)		+= wireless/
 obj-$(CONFIG_MAC80211)		+= mac80211/
 obj-$(CONFIG_TIPC)		+= tipc/
 obj-$(CONFIG_NETLABEL)		+= netlabel/
@@ -61,7 +61,7 @@
 ifneq ($(CONFIG_DCB),)
 obj-y				+= dcb/
 endif
-obj-y				+= ieee802154/
+obj-$(CONFIG_IEEE802154)	+= ieee802154/
 
 ifeq ($(CONFIG_NET),y)
 obj-$(CONFIG_SYSCTL)		+= sysctl_net.o
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 313aba1..95fdd11 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -522,7 +522,7 @@
 	error = ip_route_output_key(&init_net, &rt, &fl);
 	if (error)
 		return error;
-	neigh = __neigh_lookup(&clip_tbl, &ip, rt->u.dst.dev, 1);
+	neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
 	ip_rt_put(rt);
 	if (!neigh)
 		return -ENOMEM;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 0d9e506..70672544 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -86,26 +86,26 @@
 	__u8  ctrl;
 	__u8  uuid_size;
 	__u8  service[0];
-} __attribute__((packed));
+} __packed;
 
 struct bnep_set_filter_req {
 	__u8  type;
 	__u8  ctrl;
 	__be16 len;
 	__u8  list[0];
-} __attribute__((packed));
+} __packed;
 
 struct bnep_control_rsp {
 	__u8  type;
 	__u8  ctrl;
 	__be16 resp;
-} __attribute__((packed));
+} __packed;
 
 struct bnep_ext_hdr {
 	__u8  type;
 	__u8  len;
 	__u8  data[0];
-} __attribute__((packed));
+} __packed;
 
 /* BNEP ioctl defines */
 #define BNEPCONNADD	_IOW('B', 200, int)
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 76357b5..c8436fa 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -63,7 +63,6 @@
 		goto err_out4;
 
 	brioctl_set(br_ioctl_deviceless_stub);
-	br_handle_frame_hook = br_handle_frame;
 
 #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
 	br_fdb_test_addr_hook = br_fdb_test_addr;
@@ -100,7 +99,6 @@
 	br_fdb_test_addr_hook = NULL;
 #endif
 
-	br_handle_frame_hook = NULL;
 	br_fdb_fini();
 }
 
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index eedf2c9..075c435 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -38,8 +38,10 @@
 	}
 #endif
 
+	u64_stats_update_begin(&brstats->syncp);
 	brstats->tx_packets++;
 	brstats->tx_bytes += skb->len;
+	u64_stats_update_end(&brstats->syncp);
 
 	BR_INPUT_SKB_CB(skb)->brdev = dev;
 
@@ -47,6 +49,10 @@
 	skb_pull(skb, ETH_HLEN);
 
 	if (is_multicast_ether_addr(dest)) {
+		if (unlikely(netpoll_tx_running(dev))) {
+			br_flood_deliver(br, skb);
+			goto out;
+		}
 		if (br_multicast_rcv(br, NULL, skb))
 			goto out;
 
@@ -92,21 +98,25 @@
 	return 0;
 }
 
-static struct net_device_stats *br_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
+						struct rtnl_link_stats64 *stats)
 {
 	struct net_bridge *br = netdev_priv(dev);
-	struct net_device_stats *stats = &dev->stats;
-	struct br_cpu_netstats sum = { 0 };
+	struct br_cpu_netstats tmp, sum = { 0 };
 	unsigned int cpu;
 
 	for_each_possible_cpu(cpu) {
+		unsigned int start;
 		const struct br_cpu_netstats *bstats
 			= per_cpu_ptr(br->stats, cpu);
-
-		sum.tx_bytes   += bstats->tx_bytes;
-		sum.tx_packets += bstats->tx_packets;
-		sum.rx_bytes   += bstats->rx_bytes;
-		sum.rx_packets += bstats->rx_packets;
+		do {
+			start = u64_stats_fetch_begin(&bstats->syncp);
+			memcpy(&tmp, bstats, sizeof(tmp));
+		} while (u64_stats_fetch_retry(&bstats->syncp, start));
+		sum.tx_bytes   += tmp.tx_bytes;
+		sum.tx_packets += tmp.tx_packets;
+		sum.rx_bytes   += tmp.rx_bytes;
+		sum.rx_packets += tmp.rx_packets;
 	}
 
 	stats->tx_bytes   = sum.tx_bytes;
@@ -127,7 +137,7 @@
 
 #ifdef CONFIG_BRIDGE_NETFILTER
 	/* remember the MTU in the rtable for PMTU */
-	br->fake_rtable.u.dst.metrics[RTAX_MTU - 1] = new_mtu;
+	br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu;
 #endif
 
 	return 0;
@@ -199,73 +209,81 @@
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static bool br_devices_support_netpoll(struct net_bridge *br)
-{
-	struct net_bridge_port *p;
-	bool ret = true;
-	int count = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&br->lock, flags);
-	list_for_each_entry(p, &br->port_list, list) {
-		count++;
-		if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
-		    !p->dev->netdev_ops->ndo_poll_controller)
-			ret = false;
-	}
-	spin_unlock_irqrestore(&br->lock, flags);
-	return count != 0 && ret;
-}
-
 static void br_poll_controller(struct net_device *br_dev)
 {
-	struct netpoll *np = br_dev->npinfo->netpoll;
-
-	if (np->real_dev != br_dev)
-		netpoll_poll_dev(np->real_dev);
 }
 
-void br_netpoll_cleanup(struct net_device *dev)
+static void br_netpoll_cleanup(struct net_device *dev)
 {
 	struct net_bridge *br = netdev_priv(dev);
 	struct net_bridge_port *p, *n;
-	const struct net_device_ops *ops;
 
-	br->dev->npinfo = NULL;
 	list_for_each_entry_safe(p, n, &br->port_list, list) {
-		if (p->dev) {
-			ops = p->dev->netdev_ops;
-			if (ops->ndo_netpoll_cleanup)
-				ops->ndo_netpoll_cleanup(p->dev);
-			else
-				p->dev->npinfo = NULL;
-		}
+		br_netpoll_disable(p);
 	}
 }
 
-void br_netpoll_disable(struct net_bridge *br,
-			struct net_device *dev)
+static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
 {
-	if (br_devices_support_netpoll(br))
-		br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-	if (dev->netdev_ops->ndo_netpoll_cleanup)
-		dev->netdev_ops->ndo_netpoll_cleanup(dev);
-	else
-		dev->npinfo = NULL;
+	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_port *p, *n;
+	int err = 0;
+
+	list_for_each_entry_safe(p, n, &br->port_list, list) {
+		if (!p->dev)
+			continue;
+
+		err = br_netpoll_enable(p);
+		if (err)
+			goto fail;
+	}
+
+out:
+	return err;
+
+fail:
+	br_netpoll_cleanup(dev);
+	goto out;
 }
 
-void br_netpoll_enable(struct net_bridge *br,
-		       struct net_device *dev)
+int br_netpoll_enable(struct net_bridge_port *p)
 {
-	if (br_devices_support_netpoll(br)) {
-		br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-		if (br->dev->npinfo)
-			dev->npinfo = br->dev->npinfo;
-	} else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) {
-		br->dev->priv_flags |= IFF_DISABLE_NETPOLL;
-		br_info(br,"new device %s does not support netpoll (disabling)",
-			dev->name);
+	struct netpoll *np;
+	int err = 0;
+
+	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
+	err = -ENOMEM;
+	if (!np)
+		goto out;
+
+	np->dev = p->dev;
+
+	err = __netpoll_setup(np);
+	if (err) {
+		kfree(np);
+		goto out;
 	}
+
+	p->np = np;
+
+out:
+	return err;
+}
+
+void br_netpoll_disable(struct net_bridge_port *p)
+{
+	struct netpoll *np = p->np;
+
+	if (!np)
+		return;
+
+	p->np = NULL;
+
+	/* Wait for transmitting packets to finish before freeing. */
+	synchronize_rcu_bh();
+
+	__netpoll_cleanup(np);
+	kfree(np);
 }
 
 #endif
@@ -288,12 +306,13 @@
 	.ndo_open		 = br_dev_open,
 	.ndo_stop		 = br_dev_stop,
 	.ndo_start_xmit		 = br_dev_xmit,
-	.ndo_get_stats		 = br_get_stats,
+	.ndo_get_stats64	 = br_get_stats64,
 	.ndo_set_mac_address	 = br_set_mac_address,
 	.ndo_set_multicast_list	 = br_dev_set_multicast_list,
 	.ndo_change_mtu		 = br_change_mtu,
 	.ndo_do_ioctl		 = br_dev_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_netpoll_setup	 = br_netpoll_setup,
 	.ndo_netpoll_cleanup	 = br_netpoll_cleanup,
 	.ndo_poll_controller	 = br_poll_controller,
 #endif
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index b01dde3..a744296 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -240,11 +240,11 @@
 	struct net_bridge_fdb_entry *fdb;
 	int ret;
 
-	if (!dev->br_port)
+	if (!br_port_exists(dev))
 		return 0;
 
 	rcu_read_lock();
-	fdb = __br_fdb_get(dev->br_port->br, addr);
+	fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr);
 	ret = fdb && fdb->dst->dev != dev &&
 		fdb->dst->state == BR_STATE_FORWARDING;
 	rcu_read_unlock();
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a4e72a8..cbfe87f 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -50,14 +50,7 @@
 			kfree_skb(skb);
 		else {
 			skb_push(skb, ETH_HLEN);
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-			if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
-				netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
-				skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
-			} else
-#endif
-				dev_queue_xmit(skb);
+			dev_queue_xmit(skb);
 		}
 	}
 
@@ -73,23 +66,20 @@
 
 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	struct net_bridge *br = to->br;
-	if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
-		struct netpoll *np;
-		to->dev->npinfo = skb->dev->npinfo;
-		np = skb->dev->npinfo->netpoll;
-		np->real_dev = np->dev = to->dev;
-		to->dev->priv_flags |= IFF_IN_NETPOLL;
-	}
-#endif
 	skb->dev = to->dev;
+
+	if (unlikely(netpoll_tx_running(to->dev))) {
+		if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
+			kfree_skb(skb);
+		else {
+			skb_push(skb, ETH_HLEN);
+			br_netpoll_send_skb(to, skb);
+		}
+		return;
+	}
+
 	NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
 		br_forward_finish);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	if (skb->dev->npinfo)
-		skb->dev->npinfo->netpoll->dev = br->dev;
-#endif
 }
 
 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 18b245e..c03d2c3 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -147,14 +147,17 @@
 
 	list_del_rcu(&p->list);
 
-	rcu_assign_pointer(dev->br_port, NULL);
+	dev->priv_flags &= ~IFF_BRIDGE_PORT;
+
+	netdev_rx_handler_unregister(dev);
 
 	br_multicast_del_port(p);
 
 	kobject_uevent(&p->kobj, KOBJ_REMOVE);
 	kobject_del(&p->kobj);
 
-	br_netpoll_disable(br, dev);
+	br_netpoll_disable(p);
+
 	call_rcu(&p->rcu, destroy_nbp_rcu);
 }
 
@@ -167,8 +170,6 @@
 		del_nbp(p);
 	}
 
-	br_netpoll_cleanup(br->dev);
-
 	del_timer_sync(&br->gc_timer);
 
 	br_sysfs_delbr(br->dev);
@@ -400,7 +401,7 @@
 		return -ELOOP;
 
 	/* Device is already being bridged */
-	if (dev->br_port != NULL)
+	if (br_port_exists(dev))
 		return -EBUSY;
 
 	/* No bridging devices that dislike that (e.g. wireless) */
@@ -428,7 +429,15 @@
 	if (err)
 		goto err2;
 
-	rcu_assign_pointer(dev->br_port, p);
+	if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
+		goto err3;
+
+	err = netdev_rx_handler_register(dev, br_handle_frame, p);
+	if (err)
+		goto err3;
+
+	dev->priv_flags |= IFF_BRIDGE_PORT;
+
 	dev_disable_lro(dev);
 
 	list_add_rcu(&p->list, &br->port_list);
@@ -448,9 +457,9 @@
 
 	kobject_uevent(&p->kobj, KOBJ_ADD);
 
-	br_netpoll_enable(br, dev);
-
 	return 0;
+err3:
+	sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
 	br_fdb_delete_by_port(br, p, 1);
 err1:
@@ -467,9 +476,13 @@
 /* called with RTNL */
 int br_del_if(struct net_bridge *br, struct net_device *dev)
 {
-	struct net_bridge_port *p = dev->br_port;
+	struct net_bridge_port *p;
 
-	if (!p || p->br != br)
+	if (!br_port_exists(dev))
+		return -EINVAL;
+
+	p = br_port_get(dev);
+	if (p->br != br)
 		return -EINVAL;
 
 	del_nbp(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index d36e700..5fc1c5b 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -27,8 +27,10 @@
 	struct net_bridge *br = netdev_priv(brdev);
 	struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
 
+	u64_stats_update_begin(&brstats->syncp);
 	brstats->rx_packets++;
 	brstats->rx_bytes += skb->len;
+	u64_stats_update_end(&brstats->syncp);
 
 	indev = skb->dev;
 	skb->dev = brdev;
@@ -41,7 +43,7 @@
 int br_handle_frame_finish(struct sk_buff *skb)
 {
 	const unsigned char *dest = eth_hdr(skb)->h_dest;
-	struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
 	struct net_bridge *br;
 	struct net_bridge_fdb_entry *dst;
 	struct net_bridge_mdb_entry *mdst;
@@ -111,10 +113,9 @@
 /* note: already called with rcu_read_lock (preempt_disabled) */
 static int br_handle_local_finish(struct sk_buff *skb)
 {
-	struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
 
-	if (p)
-		br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
+	br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
 	return 0;	 /* process further */
 }
 
@@ -131,15 +132,19 @@
 }
 
 /*
- * Called via br_handle_frame_hook.
  * Return NULL if skb is handled
- * note: already called with rcu_read_lock (preempt_disabled)
+ * note: already called with rcu_read_lock (preempt_disabled) from
+ * netif_receive_skb
  */
-struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
+struct sk_buff *br_handle_frame(struct sk_buff *skb)
 {
+	struct net_bridge_port *p;
 	const unsigned char *dest = eth_hdr(skb)->h_dest;
 	int (*rhook)(struct sk_buff *skb);
 
+	if (skb->pkt_type == PACKET_LOOPBACK)
+		return skb;
+
 	if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
 		goto drop;
 
@@ -147,6 +152,8 @@
 	if (!skb)
 		return NULL;
 
+	p = br_port_get_rcu(skb->dev);
+
 	if (unlikely(is_link_local(dest))) {
 		/* Pause frames shouldn't be passed up by driver anyway */
 		if (skb->protocol == htons(ETH_P_PAUSE))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 8fb75f8..2c911c0 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -55,6 +55,9 @@
 static int brnf_filter_vlan_tagged __read_mostly = 0;
 static int brnf_filter_pppoe_tagged __read_mostly = 0;
 #else
+#define brnf_call_iptables 1
+#define brnf_call_ip6tables 1
+#define brnf_call_arptables 1
 #define brnf_filter_vlan_tagged 0
 #define brnf_filter_pppoe_tagged 0
 #endif
@@ -117,26 +120,27 @@
 {
 	struct rtable *rt = &br->fake_rtable;
 
-	atomic_set(&rt->u.dst.__refcnt, 1);
-	rt->u.dst.dev = br->dev;
-	rt->u.dst.path = &rt->u.dst;
-	rt->u.dst.metrics[RTAX_MTU - 1] = 1500;
-	rt->u.dst.flags	= DST_NOXFRM;
-	rt->u.dst.ops = &fake_dst_ops;
+	atomic_set(&rt->dst.__refcnt, 1);
+	rt->dst.dev = br->dev;
+	rt->dst.path = &rt->dst;
+	rt->dst.metrics[RTAX_MTU - 1] = 1500;
+	rt->dst.flags	= DST_NOXFRM;
+	rt->dst.ops = &fake_dst_ops;
 }
 
 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
 {
-	struct net_bridge_port *port = rcu_dereference(dev->br_port);
-
-	return port ? &port->br->fake_rtable : NULL;
+	if (!br_port_exists(dev))
+		return NULL;
+	return &br_port_get_rcu(dev)->br->fake_rtable;
 }
 
 static inline struct net_device *bridge_parent(const struct net_device *dev)
 {
-	struct net_bridge_port *port = rcu_dereference(dev->br_port);
+	if (!br_port_exists(dev))
+		return NULL;
 
-	return port ? port->br->dev : NULL;
+	return br_port_get_rcu(dev)->br->dev;
 }
 
 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
@@ -244,8 +248,7 @@
 		kfree_skb(skb);
 		return 0;
 	}
-	dst_hold(&rt->u.dst);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set_noref(skb, &rt->dst);
 
 	skb->dev = nf_bridge->physindev;
 	nf_bridge_update_protocol(skb);
@@ -396,8 +399,7 @@
 			kfree_skb(skb);
 			return 0;
 		}
-		dst_hold(&rt->u.dst);
-		skb_dst_set(skb, &rt->u.dst);
+		skb_dst_set_noref(skb, &rt->dst);
 	}
 
 	skb->dev = nf_bridge->physindev;
@@ -545,25 +547,30 @@
 				      const struct net_device *out,
 				      int (*okfn)(struct sk_buff *))
 {
+	struct net_bridge_port *p;
+	struct net_bridge *br;
 	struct iphdr *iph;
 	__u32 len = nf_bridge_encap_header_len(skb);
 
 	if (unlikely(!pskb_may_pull(skb, len)))
 		goto out;
 
+	p = br_port_get_rcu(in);
+	if (p == NULL)
+		goto out;
+	br = p->br;
+
 	if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
 	    IS_PPPOE_IPV6(skb)) {
-#ifdef CONFIG_SYSCTL
-		if (!brnf_call_ip6tables)
+		if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
 			return NF_ACCEPT;
-#endif
+
 		nf_bridge_pull_encap_header_rcsum(skb);
 		return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
 	}
-#ifdef CONFIG_SYSCTL
-	if (!brnf_call_iptables)
+
+	if (!brnf_call_iptables && !br->nf_call_iptables)
 		return NF_ACCEPT;
-#endif
 
 	if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
 	    !IS_PPPOE_IP(skb))
@@ -719,12 +726,17 @@
 				      const struct net_device *out,
 				      int (*okfn)(struct sk_buff *))
 {
+	struct net_bridge_port *p;
+	struct net_bridge *br;
 	struct net_device **d = (struct net_device **)(skb->cb);
 
-#ifdef CONFIG_SYSCTL
-	if (!brnf_call_arptables)
+	p = br_port_get_rcu(out);
+	if (p == NULL)
 		return NF_ACCEPT;
-#endif
+	br = p->br;
+
+	if (!brnf_call_arptables && !br->nf_call_arptables)
+		return NF_ACCEPT;
 
 	if (skb->protocol != htons(ETH_P_ARP)) {
 		if (!IS_VLAN_ARP(skb))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index fe0a790..4a6a378 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -120,10 +120,11 @@
 	idx = 0;
 	for_each_netdev(net, dev) {
 		/* not a bridge port */
-		if (dev->br_port == NULL || idx < cb->args[0])
+		if (!br_port_exists(dev) || idx < cb->args[0])
 			goto skip;
 
-		if (br_fill_ifinfo(skb, dev->br_port, NETLINK_CB(cb->skb).pid,
+		if (br_fill_ifinfo(skb, br_port_get(dev),
+				   NETLINK_CB(cb->skb).pid,
 				   cb->nlh->nlmsg_seq, RTM_NEWLINK,
 				   NLM_F_MULTI) < 0)
 			break;
@@ -168,9 +169,9 @@
 	if (!dev)
 		return -ENODEV;
 
-	p = dev->br_port;
-	if (!p)
+	if (!br_port_exists(dev))
 		return -EINVAL;
+	p = br_port_get(dev);
 
 	/* if kernel STP is running, don't allow changes */
 	if (p->br->stp_enabled == BR_KERNEL_STP)
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 717e1fd..404d4e1 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -32,14 +32,15 @@
 static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
 {
 	struct net_device *dev = ptr;
-	struct net_bridge_port *p = dev->br_port;
+	struct net_bridge_port *p = br_port_get(dev);
 	struct net_bridge *br;
 	int err;
 
 	/* not a port of a bridge */
-	if (p == NULL)
+	if (!br_port_exists(dev))
 		return NOTIFY_DONE;
 
+	p = br_port_get(dev);
 	br = p->br;
 
 	switch (event) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0f4a74b..75c90ed 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -15,6 +15,8 @@
 
 #include <linux/netdevice.h>
 #include <linux/if_bridge.h>
+#include <linux/netpoll.h>
+#include <linux/u64_stats_sync.h>
 #include <net/route.h>
 
 #define BR_HASH_BITS 8
@@ -143,13 +145,23 @@
 #ifdef CONFIG_SYSFS
 	char				sysfs_name[IFNAMSIZ];
 #endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	struct netpoll			*np;
+#endif
 };
 
+#define br_port_get_rcu(dev) \
+	((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
+#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
+#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
+
 struct br_cpu_netstats {
-	unsigned long	rx_packets;
-	unsigned long	rx_bytes;
-	unsigned long	tx_packets;
-	unsigned long	tx_bytes;
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			tx_packets;
+	u64			tx_bytes;
+	struct u64_stats_sync	syncp;
 };
 
 struct net_bridge
@@ -164,6 +176,9 @@
 	unsigned long			feature_mask;
 #ifdef CONFIG_BRIDGE_NETFILTER
 	struct rtable 			fake_rtable;
+	bool				nf_call_iptables;
+	bool				nf_call_ip6tables;
+	bool				nf_call_arptables;
 #endif
 	unsigned long			flags;
 #define BR_SET_MAC_ADDR		0x00000001
@@ -273,16 +288,41 @@
 extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
 			       struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
-extern void br_netpoll_cleanup(struct net_device *dev);
-extern void br_netpoll_enable(struct net_bridge *br,
-			      struct net_device *dev);
-extern void br_netpoll_disable(struct net_bridge *br,
-			       struct net_device *dev);
-#else
-#define br_netpoll_cleanup(br)
-#define br_netpoll_enable(br, dev)
-#define br_netpoll_disable(br, dev)
+static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
+{
+	return br->dev->npinfo;
+}
 
+static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
+				       struct sk_buff *skb)
+{
+	struct netpoll *np = p->np;
+
+	if (np)
+		netpoll_send_skb(np, skb);
+}
+
+extern int br_netpoll_enable(struct net_bridge_port *p);
+extern void br_netpoll_disable(struct net_bridge_port *p);
+#else
+static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
+{
+	return NULL;
+}
+
+static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
+				       struct sk_buff *skb)
+{
+}
+
+static inline int br_netpoll_enable(struct net_bridge_port *p)
+{
+	return 0;
+}
+
+static inline void br_netpoll_disable(struct net_bridge_port *p)
+{
+}
 #endif
 
 /* br_fdb.c */
@@ -331,8 +371,7 @@
 
 /* br_input.c */
 extern int br_handle_frame_finish(struct sk_buff *skb);
-extern struct sk_buff *br_handle_frame(struct net_bridge_port *p,
-				       struct sk_buff *skb);
+extern struct sk_buff *br_handle_frame(struct sk_buff *skb);
 
 /* br_ioctl.c */
 extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 217bd22..70aecb4 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -137,12 +137,13 @@
 		struct net_device *dev)
 {
 	const unsigned char *dest = eth_hdr(skb)->h_dest;
-	struct net_bridge_port *p = rcu_dereference(dev->br_port);
+	struct net_bridge_port *p;
 	struct net_bridge *br;
 	const unsigned char *buf;
 
-	if (!p)
+	if (!br_port_exists(dev))
 		goto err;
+	p = br_port_get_rcu(dev);
 
 	if (!pskb_may_pull(skb, 4))
 		goto err;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 486b8f3..5c1e555 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -611,6 +611,73 @@
 		   show_multicast_startup_query_interval,
 		   store_multicast_startup_query_interval);
 #endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+static ssize_t show_nf_call_iptables(
+	struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct net_bridge *br = to_bridge(d);
+	return sprintf(buf, "%u\n", br->nf_call_iptables);
+}
+
+static int set_nf_call_iptables(struct net_bridge *br, unsigned long val)
+{
+	br->nf_call_iptables = val ? true : false;
+	return 0;
+}
+
+static ssize_t store_nf_call_iptables(
+	struct device *d, struct device_attribute *attr, const char *buf,
+	size_t len)
+{
+	return store_bridge_parm(d, buf, len, set_nf_call_iptables);
+}
+static DEVICE_ATTR(nf_call_iptables, S_IRUGO | S_IWUSR,
+		   show_nf_call_iptables, store_nf_call_iptables);
+
+static ssize_t show_nf_call_ip6tables(
+	struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct net_bridge *br = to_bridge(d);
+	return sprintf(buf, "%u\n", br->nf_call_ip6tables);
+}
+
+static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val)
+{
+	br->nf_call_ip6tables = val ? true : false;
+	return 0;
+}
+
+static ssize_t store_nf_call_ip6tables(
+	struct device *d, struct device_attribute *attr, const char *buf,
+	size_t len)
+{
+	return store_bridge_parm(d, buf, len, set_nf_call_ip6tables);
+}
+static DEVICE_ATTR(nf_call_ip6tables, S_IRUGO | S_IWUSR,
+		   show_nf_call_ip6tables, store_nf_call_ip6tables);
+
+static ssize_t show_nf_call_arptables(
+	struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct net_bridge *br = to_bridge(d);
+	return sprintf(buf, "%u\n", br->nf_call_arptables);
+}
+
+static int set_nf_call_arptables(struct net_bridge *br, unsigned long val)
+{
+	br->nf_call_arptables = val ? true : false;
+	return 0;
+}
+
+static ssize_t store_nf_call_arptables(
+	struct device *d, struct device_attribute *attr, const char *buf,
+	size_t len)
+{
+	return store_bridge_parm(d, buf, len, set_nf_call_arptables);
+}
+static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR,
+		   show_nf_call_arptables, store_nf_call_arptables);
+#endif
 
 static struct attribute *bridge_attrs[] = {
 	&dev_attr_forward_delay.attr,
@@ -645,6 +712,11 @@
 	&dev_attr_multicast_query_response_interval.attr,
 	&dev_attr_multicast_startup_query_interval.attr,
 #endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+	&dev_attr_nf_call_iptables.attr,
+	&dev_attr_nf_call_ip6tables.attr,
+	&dev_attr_nf_call_arptables.attr,
+#endif
 	NULL
 };
 
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 9e19166..46624bb 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -24,8 +24,9 @@
 		return EBT_DROP;
 
 	if (par->hooknum != NF_BR_BROUTING)
+		/* rcu_read_lock()ed by nf_hook_slow */
 		memcpy(eth_hdr(skb)->h_dest,
-		       par->in->br_port->br->dev->dev_addr, ETH_ALEN);
+		       br_port_get_rcu(par->in)->br->dev->dev_addr, ETH_ALEN);
 	else
 		memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN);
 	skb->pkt_type = PACKET_HOST;
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ae3c7ce..26377e9 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -177,8 +177,9 @@
 	if (in) {
 		strcpy(pm->physindev, in->name);
 		/* If in isn't a bridge, then physindev==indev */
-		if (in->br_port)
-			strcpy(pm->indev, in->br_port->br->dev->name);
+		if (br_port_exists(in))
+			/* rcu_read_lock()ed by nf_hook_slow */
+			strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
 		else
 			strcpy(pm->indev, in->name);
 	} else
@@ -187,7 +188,8 @@
 	if (out) {
 		/* If out exists, then out is a bridge port */
 		strcpy(pm->physoutdev, out->name);
-		strcpy(pm->outdev, out->br_port->br->dev->name);
+		/* rcu_read_lock()ed by nf_hook_slow */
+		strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
 	} else
 		pm->outdev[0] = pm->physoutdev[0] = '\0';
 
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 59ca00e..bcc102e 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -140,11 +140,14 @@
 		return 1;
 	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
 		return 1;
-	if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
-	   e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
+	/* rcu_read_lock()ed by nf_hook_slow */
+	if (in && br_port_exists(in) &&
+	    FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
+		   EBT_ILOGICALIN))
 		return 1;
-	if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
-	   e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
+	if (out && br_port_exists(out) &&
+	    FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
+		   EBT_ILOGICALOUT))
 		return 1;
 
 	if (e->bitmask & EBT_SOURCEMAC) {
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index ed65178..529750d 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -21,19 +21,18 @@
 	See Documentation/networking/caif for a further explanation on how to
 	use and configure CAIF.
 
-if CAIF
-
 config  CAIF_DEBUG
 	bool "Enable Debug"
+	depends on CAIF
 	default n
 	--- help ---
 	Enable the inclusion of debug code in the CAIF stack.
 	Be aware that doing this will impact performance.
 	If unsure say N.
 
-
 config CAIF_NETDEV
 	tristate "CAIF GPRS Network device"
+	depends on CAIF
 	default CAIF
 	---help---
 	Say Y if you will be using a CAIF based GPRS network device.
@@ -41,5 +40,3 @@
 	If you select to build it as a built-in then the main CAIF device must
 	also be a built-in.
 	If unsure say Y.
-
-endif
diff --git a/net/caif/Makefile b/net/caif/Makefile
index 34852af..f87481f 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -1,23 +1,13 @@
-ifeq ($(CONFIG_CAIF_DEBUG),1)
-CAIF_DBG_FLAGS := -DDEBUG
+ifeq ($(CONFIG_CAIF_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
 endif
 
-ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
-
 caif-objs := caif_dev.o \
 	cfcnfg.o cfmuxl.o cfctrl.o  \
 	cffrml.o cfveil.o cfdbgl.o\
 	cfserl.o cfdgml.o  \
 	cfrfml.o cfvidl.o cfutill.o \
 	cfsrvl.o cfpkt_skbuff.o caif_config_util.o
-clean-dirs:= .tmp_versions
-
-clean-files:= \
-	Module.symvers \
-	modules.order \
-	*.cmd \
-	*.o \
-	*~
 
 obj-$(CONFIG_CAIF) += caif.o
 obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 6f36580..76ae683 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -80,6 +80,11 @@
 		       l->u.utility.paramlen);
 
 		break;
+	case CAIFPROTO_DEBUG:
+		l->linktype = CFCTRL_SRV_DBG;
+		l->endpoint = s->sockaddr.u.dbg.service;
+		l->chtype = s->sockaddr.u.dbg.type;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index e2b86f1..0b586e9 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -255,7 +255,7 @@
 			pref = CFPHYPREF_HIGH_BW;
 			break;
 		}
-
+		dev_hold(dev);
 		cfcnfg_add_phy_layer(get_caif_conf(),
 				     phy_type,
 				     dev,
@@ -285,6 +285,7 @@
 		caifd->layer.up->ctrlcmd(caifd->layer.up,
 					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
 					 caifd->layer.id);
+		might_sleep();
 		res = wait_event_interruptible_timeout(caifd->event,
 					atomic_read(&caifd->in_use) == 0,
 					TIMEOUT);
@@ -300,6 +301,7 @@
 				   "Unregistering an active CAIF device: %s\n",
 				   __func__, dev->name);
 		cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
+		dev_put(dev);
 		atomic_set(&caifd->state, what);
 		break;
 
@@ -326,7 +328,8 @@
 EXPORT_SYMBOL(get_caif_conf);
 
 int caif_connect_client(struct caif_connect_request *conn_req,
-			   struct cflayer *client_layer)
+			struct cflayer *client_layer, int *ifindex,
+			int *headroom, int *tailroom)
 {
 	struct cfctrl_link_param param;
 	int ret;
@@ -334,8 +337,9 @@
 	if (ret)
 		return ret;
 	/* Hook up the adaptation layer. */
-	return cfcnfg_add_adaptation_layer(get_caif_conf(),
-						&param, client_layer);
+	return cfcnfg_add_adaptation_layer(get_caif_conf(), &param,
+					client_layer, ifindex,
+					headroom, tailroom);
 }
 EXPORT_SYMBOL(caif_connect_client);
 
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3d0e095..8ce9047 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -28,8 +28,8 @@
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_NETPROTO(AF_CAIF);
 
-#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10)
-#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100)
+#define CAIF_DEF_SNDBUF (4096*10)
+#define CAIF_DEF_RCVBUF (4096*100)
 
 /*
  * CAIF state is re-using the TCP socket states.
@@ -76,6 +76,7 @@
 	struct caif_connect_request conn_req;
 	struct mutex readlock;
 	struct dentry *debugfs_socket_dir;
+	int headroom, tailroom, maxframe;
 };
 
 static int rx_flow_is_on(struct caifsock *cf_sk)
@@ -594,27 +595,32 @@
 		goto err;
 	noblock = msg->msg_flags & MSG_DONTWAIT;
 
-	buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
-
-	ret = -EMSGSIZE;
-	if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
-		goto err;
-
 	timeo = sock_sndtimeo(sk, noblock);
 	timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
 				1, timeo, &ret);
 
+	if (ret)
+		goto err;
 	ret = -EPIPE;
 	if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
 		sock_flag(sk, SOCK_DEAD) ||
 		(sk->sk_shutdown & RCV_SHUTDOWN))
 		goto err;
 
+	/* Error if trying to write more than maximum frame size. */
+	ret = -EMSGSIZE;
+	if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
+		goto err;
+
+	buffer_size = len + cf_sk->headroom + cf_sk->tailroom;
+
 	ret = -ENOMEM;
 	skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
-	if (!skb)
+
+	if (!skb || skb_tailroom(skb) < buffer_size)
 		goto err;
-	skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+
+	skb_reserve(skb, cf_sk->headroom);
 
 	ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
 
@@ -645,7 +651,6 @@
 	long timeo;
 
 	err = -EOPNOTSUPP;
-
 	if (unlikely(msg->msg_flags&MSG_OOB))
 		goto out_err;
 
@@ -662,8 +667,8 @@
 
 		size = len-sent;
 
-		if (size > CAIF_MAX_PAYLOAD_SIZE)
-			size = CAIF_MAX_PAYLOAD_SIZE;
+		if (size > cf_sk->maxframe)
+			size = cf_sk->maxframe;
 
 		/* If size is more than half of sndbuf, chop up message */
 		if (size > ((sk->sk_sndbuf >> 1) - 64))
@@ -673,14 +678,14 @@
 			size = SKB_MAX_ALLOC;
 
 		skb = sock_alloc_send_skb(sk,
-					size + CAIF_NEEDED_HEADROOM
-					+ CAIF_NEEDED_TAILROOM,
+					size + cf_sk->headroom +
+					cf_sk->tailroom,
 					msg->msg_flags&MSG_DONTWAIT,
 					&err);
 		if (skb == NULL)
 			goto out_err;
 
-		skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+		skb_reserve(skb, cf_sk->headroom);
 		/*
 		 *	If you pass two values to the sock_alloc_send_skb
 		 *	it tries to grab the large buffer with GFP_NOFS
@@ -821,17 +826,15 @@
 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 	long timeo;
 	int err;
+	int ifindex, headroom, tailroom;
+	struct net_device *dev;
+
 	lock_sock(sk);
 
 	err = -EAFNOSUPPORT;
 	if (uaddr->sa_family != AF_CAIF)
 		goto out;
 
-	err = -ESOCKTNOSUPPORT;
-	if (unlikely(!(sk->sk_type == SOCK_STREAM &&
-		       cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
-		       sk->sk_type != SOCK_SEQPACKET))
-		goto out;
 	switch (sock->state) {
 	case SS_UNCONNECTED:
 		/* Normal case, a fresh connect */
@@ -874,8 +877,7 @@
 	sk_stream_kill_queues(&cf_sk->sk);
 
 	err = -EINVAL;
-	if (addr_len != sizeof(struct sockaddr_caif) ||
-		!uaddr)
+	if (addr_len != sizeof(struct sockaddr_caif))
 		goto out;
 
 	memcpy(&cf_sk->conn_req.sockaddr, uaddr,
@@ -888,12 +890,23 @@
 	dbfs_atomic_inc(&cnt.num_connect_req);
 	cf_sk->layer.receive = caif_sktrecv_cb;
 	err = caif_connect_client(&cf_sk->conn_req,
-				&cf_sk->layer);
+				&cf_sk->layer, &ifindex, &headroom, &tailroom);
 	if (err < 0) {
 		cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
 		cf_sk->sk.sk_state = CAIF_DISCONNECTED;
 		goto out;
 	}
+	dev = dev_get_by_index(sock_net(sk), ifindex);
+	cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
+	cf_sk->tailroom = tailroom;
+	cf_sk->maxframe = dev->mtu - (headroom + tailroom);
+	dev_put(dev);
+	if (cf_sk->maxframe < 1) {
+		pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n",
+			__func__, dev->mtu);
+		err = -ENODEV;
+		goto out;
+	}
 
 	err = -EINPROGRESS;
 wait_connect:
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index df43f26..1c29189 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 #include <linux/stddef.h>
 #include <linux/slab.h>
+#include <linux/netdevice.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cfcnfg.h>
@@ -22,6 +23,7 @@
 #define PHY_NAME_LEN 20
 
 #define container_obj(layr) container_of(layr, struct cfcnfg, layer)
+#define RFM_FRAGMENT_SIZE 4030
 
 /* Information about CAIF physical interfaces held by Config Module in order
  * to manage physical interfaces
@@ -41,6 +43,15 @@
 
 	/* Information about the physical device */
 	struct dev_info dev_info;
+
+	/* Interface index */
+	int ifindex;
+
+	/* Use Start of frame extension */
+	bool use_stx;
+
+	/* Use Start of frame checksum */
+	bool use_fcs;
 };
 
 struct cfcnfg {
@@ -248,9 +259,20 @@
 {
 }
 
+int protohead[CFCTRL_SRV_MASK] = {
+	[CFCTRL_SRV_VEI] = 4,
+	[CFCTRL_SRV_DATAGRAM] = 7,
+	[CFCTRL_SRV_UTIL] = 4,
+	[CFCTRL_SRV_RFM] = 3,
+	[CFCTRL_SRV_DBG] = 3,
+};
+
 int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
 				struct cfctrl_link_param *param,
-				struct cflayer *adap_layer)
+				struct cflayer *adap_layer,
+				int *ifindex,
+				int *proto_head,
+				int *proto_tail)
 {
 	struct cflayer *frml;
 	if (adap_layer == NULL) {
@@ -276,6 +298,14 @@
 		     param->phyid);
 	caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
 		     param->phyid);
+
+	*ifindex = cnfg->phy_layers[param->phyid].ifindex;
+	*proto_head =
+		protohead[param->linktype]+
+		(cnfg->phy_layers[param->phyid].use_stx ? 1 : 0);
+
+	*proto_tail = 2;
+
 	/* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
 	cfctrl_enum_req(cnfg->ctrl, param->phyid);
 	return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
@@ -297,6 +327,8 @@
 	struct cfcnfg *cnfg = container_obj(layer);
 	struct cflayer *servicel = NULL;
 	struct cfcnfg_phyinfo *phyinfo;
+	struct net_device *netdev;
+
 	if (adapt_layer == NULL) {
 		pr_debug("CAIF: %s(): link setup response "
 				"but no client exist, send linkdown back\n",
@@ -308,19 +340,15 @@
 	caif_assert(cnfg != NULL);
 	caif_assert(phyid != 0);
 	phyinfo = &cnfg->phy_layers[phyid];
-	caif_assert(phyinfo != NULL);
 	caif_assert(phyinfo->id == phyid);
 	caif_assert(phyinfo->phy_layer != NULL);
 	caif_assert(phyinfo->phy_layer->id == phyid);
 
-	if (phyinfo != NULL &&
-	    phyinfo->phy_ref_count++ == 0 &&
-	    phyinfo->phy_layer != NULL &&
+	phyinfo->phy_ref_count++;
+	if (phyinfo->phy_ref_count == 1 &&
 	    phyinfo->phy_layer->modemcmd != NULL) {
-		caif_assert(phyinfo->phy_layer->id == phyid);
 		phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
 					     _CAIF_MODEMCMD_PHYIF_USEFULL);
-
 	}
 	adapt_layer->id = channel_id;
 
@@ -332,7 +360,9 @@
 		servicel = cfdgml_create(channel_id, &phyinfo->dev_info);
 		break;
 	case CFCTRL_SRV_RFM:
-		servicel = cfrfml_create(channel_id, &phyinfo->dev_info);
+		netdev = phyinfo->dev_info.dev;
+		servicel = cfrfml_create(channel_id, &phyinfo->dev_info,
+						netdev->mtu);
 		break;
 	case CFCTRL_SRV_UTIL:
 		servicel = cfutill_create(channel_id, &phyinfo->dev_info);
@@ -363,8 +393,8 @@
 
 void
 cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
-		     void *dev, struct cflayer *phy_layer, u16 *phyid,
-		     enum cfcnfg_phy_preference pref,
+		     struct net_device *dev, struct cflayer *phy_layer,
+		     u16 *phyid, enum cfcnfg_phy_preference pref,
 		     bool fcs, bool stx)
 {
 	struct cflayer *frml;
@@ -418,6 +448,10 @@
 	cnfg->phy_layers[*phyid].dev_info.dev = dev;
 	cnfg->phy_layers[*phyid].phy_layer = phy_layer;
 	cnfg->phy_layers[*phyid].phy_ref_count = 0;
+	cnfg->phy_layers[*phyid].ifindex = dev->ifindex;
+	cnfg->phy_layers[*phyid].use_stx = stx;
+	cnfg->phy_layers[*phyid].use_fcs = fcs;
+
 	phy_layer->type = phy_type;
 	frml = cffrml_create(*phyid, fcs);
 	if (!frml) {
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index fcfda98..563145f 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -19,7 +19,7 @@
 #ifdef CAIF_NO_LOOP
 static int handle_loop(struct cfctrl *ctrl,
 			      int cmd, struct cfpkt *pkt){
-	return CAIF_FAILURE;
+	return -1;
 }
 #else
 static int handle_loop(struct cfctrl *ctrl,
@@ -43,7 +43,7 @@
 	memset(&dev_info, 0, sizeof(dev_info));
 	dev_info.id = 0xff;
 	memset(this, 0, sizeof(*this));
-	cfsrvl_init(&this->serv, 0, &dev_info);
+	cfsrvl_init(&this->serv, 0, &dev_info, false);
 	atomic_set(&this->req_seq_no, 1);
 	atomic_set(&this->rsp_seq_no, 1);
 	this->serv.layer.receive = cfctrl_recv;
@@ -395,7 +395,7 @@
 	cmd = cmdrsp & CFCTRL_CMD_MASK;
 	if (cmd != CFCTRL_CMD_LINK_ERR
 	    && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
-		if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE)
+		if (handle_loop(cfctrl, cmd, pkt) != 0)
 			cmdrsp |= CFCTRL_ERR_BIT;
 	}
 
@@ -647,6 +647,6 @@
 	default:
 		break;
 	}
-	return CAIF_SUCCESS;
+	return 0;
 }
 #endif
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index ab6b6dc..676648c 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -22,7 +22,7 @@
 	}
 	caif_assert(offsetof(struct cfsrvl, layer) == 0);
 	memset(dbg, 0, sizeof(struct cfsrvl));
-	cfsrvl_init(dbg, channel_id, dev_info);
+	cfsrvl_init(dbg, channel_id, dev_info, false);
 	dbg->layer.receive = cfdbgl_receive;
 	dbg->layer.transmit = cfdbgl_transmit;
 	snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 5319484..ed9d53a 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -17,6 +17,7 @@
 #define DGM_FLOW_OFF 0x81
 #define DGM_FLOW_ON  0x80
 #define DGM_CTRL_PKT_SIZE 1
+#define DGM_MTU 1500
 
 static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
@@ -30,7 +31,7 @@
 	}
 	caif_assert(offsetof(struct cfsrvl, layer) == 0);
 	memset(dgm, 0, sizeof(struct cfsrvl));
-	cfsrvl_init(dgm, channel_id, dev_info);
+	cfsrvl_init(dgm, channel_id, dev_info, true);
 	dgm->layer.receive = cfdgml_receive;
 	dgm->layer.transmit = cfdgml_transmit;
 	snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
@@ -89,6 +90,10 @@
 	if (!cfsrvl_ready(service, &ret))
 		return ret;
 
+	/* STE Modem cannot handle more than 1500 bytes datagrams */
+	if (cfpkt_getlen(pkt) > DGM_MTU)
+		return -EMSGSIZE;
+
 	cfpkt_add_head(pkt, &zero, 4);
 
 	/* Add info for MUX-layer to route the packet out. */
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index a6fdf89..01f238f 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -9,8 +9,8 @@
 #include <linux/hardirq.h>
 #include <net/caif/cfpkt.h>
 
-#define PKT_PREFIX CAIF_NEEDED_HEADROOM
-#define PKT_POSTFIX CAIF_NEEDED_TAILROOM
+#define PKT_PREFIX  16
+#define PKT_POSTFIX 2
 #define PKT_LEN_WHEN_EXTENDING 128
 #define PKT_ERROR(pkt, errmsg) do {	   \
     cfpkt_priv(pkt)->erronous = true;	   \
@@ -338,7 +338,6 @@
 	u16 dstlen;
 	u16 createlen;
 	if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
-		cfpkt_destroy(addpkt);
 		return dstpkt;
 	}
 	if (expectlen > addlen)
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index fd27b17..4b04d25 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -7,102 +7,304 @@
 #include <linux/stddef.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
+#include <linux/unaligned/le_byteshift.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
 
-#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
-
+#define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
 #define RFM_SEGMENTATION_BIT 0x01
-#define RFM_PAYLOAD  0x00
-#define RFM_CMD_BIT  0x80
-#define RFM_FLOW_OFF 0x81
-#define RFM_FLOW_ON  0x80
-#define RFM_SET_PIN  0x82
-#define RFM_CTRL_PKT_SIZE 1
+#define RFM_HEAD_SIZE 7
 
 static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
-static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl);
 
-struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info)
+struct cfrfml {
+	struct cfsrvl serv;
+	struct cfpkt *incomplete_frm;
+	int fragment_size;
+	u8  seghead[6];
+	u16 pdu_size;
+	/* Protects serialized processing of packets */
+	spinlock_t sync;
+};
+
+static void cfrfml_release(struct kref *kref)
 {
-	struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
-	if (!rfm) {
+	struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref);
+	struct cfrfml *rfml = container_obj(&srvl->layer);
+
+	if (rfml->incomplete_frm)
+		cfpkt_destroy(rfml->incomplete_frm);
+
+	kfree(srvl);
+}
+
+struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
+					int mtu_size)
+{
+	int tmp;
+	struct cfrfml *this =
+		kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
+
+	if (!this) {
 		pr_warning("CAIF: %s(): Out of memory\n", __func__);
 		return NULL;
 	}
-	caif_assert(offsetof(struct cfsrvl, layer) == 0);
-	memset(rfm, 0, sizeof(struct cfsrvl));
-	cfsrvl_init(rfm, channel_id, dev_info);
-	rfm->layer.modemcmd = cfservl_modemcmd;
-	rfm->layer.receive = cfrfml_receive;
-	rfm->layer.transmit = cfrfml_transmit;
-	snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id);
-	return &rfm->layer;
+
+	cfsrvl_init(&this->serv, channel_id, dev_info, false);
+	this->serv.release = cfrfml_release;
+	this->serv.layer.receive = cfrfml_receive;
+	this->serv.layer.transmit = cfrfml_transmit;
+
+	/* Round down to closest multiple of 16 */
+	tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
+	tmp *= 16;
+
+	this->fragment_size = tmp;
+	spin_lock_init(&this->sync);
+	snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
+		"rfm%d", channel_id);
+
+	return &this->serv.layer;
 }
 
-static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
+			struct cfpkt *pkt, int *err)
 {
-       return -EPROTO;
+	struct cfpkt *tmppkt;
+	*err = -EPROTO;
+	/* n-th but not last segment */
+
+	if (cfpkt_extr_head(pkt, seghead, 6) < 0)
+		return NULL;
+
+	/* Verify correct header */
+	if (memcmp(seghead, rfml->seghead, 6) != 0)
+		return NULL;
+
+	tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
+			rfml->pdu_size + RFM_HEAD_SIZE);
+
+	/* If cfpkt_append failes input pkts are not freed */
+	*err = -ENOMEM;
+	if (tmppkt == NULL)
+		return NULL;
+
+	*err = 0;
+	return tmppkt;
 }
 
 static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
 {
 	u8 tmp;
 	bool segmented;
-	int ret;
+	int err;
+	u8 seghead[6];
+	struct cfrfml *rfml;
+	struct cfpkt *tmppkt = NULL;
+
 	caif_assert(layr->up != NULL);
 	caif_assert(layr->receive != NULL);
+	rfml = container_obj(layr);
+	spin_lock(&rfml->sync);
 
-	/*
-	 * RFM is taking care of segmentation and stripping of
-	 * segmentation bit.
-	 */
-	if (cfpkt_extr_head(pkt, &tmp, 1) < 0) {
-		pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
-		cfpkt_destroy(pkt);
-		return -EPROTO;
-	}
+	err = -EPROTO;
+	if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
+		goto out;
 	segmented = tmp & RFM_SEGMENTATION_BIT;
-	caif_assert(!segmented);
 
-	ret = layr->up->receive(layr->up, pkt);
-	return ret;
+	if (segmented) {
+		if (rfml->incomplete_frm == NULL) {
+			/* Initial Segment */
+			if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
+				goto out;
+
+			rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
+
+			if (cfpkt_erroneous(pkt))
+				goto out;
+			rfml->incomplete_frm = pkt;
+			pkt = NULL;
+		} else {
+
+			tmppkt = rfm_append(rfml, seghead, pkt, &err);
+			if (tmppkt == NULL)
+				goto out;
+
+			if (cfpkt_erroneous(tmppkt))
+				goto out;
+
+			rfml->incomplete_frm = tmppkt;
+
+
+			if (cfpkt_erroneous(tmppkt))
+				goto out;
+		}
+		err = 0;
+		goto out;
+	}
+
+	if (rfml->incomplete_frm) {
+
+		/* Last Segment */
+		tmppkt = rfm_append(rfml, seghead, pkt, &err);
+		if (tmppkt == NULL)
+			goto out;
+
+		if (cfpkt_erroneous(tmppkt))
+			goto out;
+
+		rfml->incomplete_frm = NULL;
+		pkt = tmppkt;
+		tmppkt = NULL;
+
+		/* Verify that length is correct */
+		err = EPROTO;
+		if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
+			goto out;
+	}
+
+	err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
+
+out:
+
+	if (err != 0) {
+		if (tmppkt)
+			cfpkt_destroy(tmppkt);
+		if (pkt)
+			cfpkt_destroy(pkt);
+		if (rfml->incomplete_frm)
+			cfpkt_destroy(rfml->incomplete_frm);
+		rfml->incomplete_frm = NULL;
+
+		pr_info("CAIF: %s(): "
+				"Connection error %d triggered on RFM link\n",
+				__func__, err);
+
+		/* Trigger connection error upon failure.*/
+		layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
+					rfml->serv.dev_info.id);
+	}
+	spin_unlock(&rfml->sync);
+	return err;
 }
 
-static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+
+static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-	u8 tmp = 0;
-	int ret;
-	struct cfsrvl *service = container_obj(layr);
-
-	caif_assert(layr->dn != NULL);
-	caif_assert(layr->dn->transmit != NULL);
-
-	if (!cfsrvl_ready(service, &ret))
-		return ret;
-
-	if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
-		pr_err("CAIF: %s():Packet too large - size=%d\n",
-			__func__, cfpkt_getlen(pkt));
-		return -EOVERFLOW;
-	}
-	if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
-		pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
-		return -EPROTO;
-	}
+	caif_assert(!cfpkt_getlen(pkt) < rfml->fragment_size);
 
 	/* Add info for MUX-layer to route the packet out. */
-	cfpkt_info(pkt)->channel_id = service->layer.id;
+	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
+
 	/*
 	 * To optimize alignment, we add up the size of CAIF header before
 	 * payload.
 	 */
-	cfpkt_info(pkt)->hdr_len = 1;
-	cfpkt_info(pkt)->dev_info = &service->dev_info;
-	ret = layr->dn->transmit(layr->dn, pkt);
-	if (ret < 0)
-		cfpkt_extr_head(pkt, &tmp, 1);
-	return ret;
+	cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
+	cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
+
+	return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
+}
+
+static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+	int err;
+	u8 seg;
+	u8 head[6];
+	struct cfpkt *rearpkt = NULL;
+	struct cfpkt *frontpkt = pkt;
+	struct cfrfml *rfml = container_obj(layr);
+
+	caif_assert(layr->dn != NULL);
+	caif_assert(layr->dn->transmit != NULL);
+
+	if (!cfsrvl_ready(&rfml->serv, &err))
+		return err;
+
+	err = -EPROTO;
+	if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
+		goto out;
+
+	err = 0;
+	if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
+		err = cfpkt_peek_head(pkt, head, 6);
+
+	if (err < 0)
+		goto out;
+
+	while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
+
+		seg = 1;
+		err = -EPROTO;
+
+		if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
+			goto out;
+		/*
+		 * On OOM error cfpkt_split returns NULL.
+		 *
+		 * NOTE: Segmented pdu is not correctly aligned.
+		 * This has negative performance impact.
+		 */
+
+		rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
+		if (rearpkt == NULL)
+			goto out;
+
+		err = cfrfml_transmit_segment(rfml, frontpkt);
+
+		if (err != 0)
+			goto out;
+		frontpkt = rearpkt;
+		rearpkt = NULL;
+
+		err = -ENOMEM;
+		if (frontpkt == NULL)
+			goto out;
+		err = -EPROTO;
+		if (cfpkt_add_head(frontpkt, head, 6) < 0)
+			goto out;
+
+	}
+
+	seg = 0;
+	err = -EPROTO;
+
+	if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
+		goto out;
+
+	err = cfrfml_transmit_segment(rfml, frontpkt);
+
+	frontpkt = NULL;
+out:
+
+	if (err != 0) {
+		pr_info("CAIF: %s(): "
+				"Connection error %d triggered on RFM link\n",
+				__func__, err);
+		/* Trigger connection error upon failure.*/
+
+		layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
+					rfml->serv.dev_info.id);
+
+		if (rearpkt)
+			cfpkt_destroy(rearpkt);
+
+		if (frontpkt && frontpkt != pkt) {
+
+			cfpkt_destroy(frontpkt);
+			/*
+			 * Socket layer will free the original packet,
+			 * but this packet may already be sent and
+			 * freed. So we have to return 0 in this case
+			 * to avoid socket layer to re-free this packet.
+			 * The return of shutdown indication will
+			 * cause connection to be invalidated anyhow.
+			 */
+			err = 0;
+		}
+	}
+
+	return err;
 }
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 965c5ba..a11fbd6 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -14,7 +14,8 @@
 #define container_obj(layr) ((struct cfserl *) layr)
 
 #define CFSERL_STX 0x02
-#define CAIF_MINIUM_PACKET_SIZE 4
+#define SERIAL_MINIUM_PACKET_SIZE 4
+#define SERIAL_MAX_FRAMESIZE 4096
 struct cfserl {
 	struct cflayer layer;
 	struct cfpkt *incomplete_frm;
@@ -119,8 +120,8 @@
 		/*
 		 * Frame error handling
 		 */
-		if (expectlen < CAIF_MINIUM_PACKET_SIZE
-		    || expectlen > CAIF_MAX_FRAMESIZE) {
+		if (expectlen < SERIAL_MINIUM_PACKET_SIZE
+		    || expectlen > SERIAL_MAX_FRAMESIZE) {
 			if (!layr->usestx) {
 				if (pkt != NULL)
 					cfpkt_destroy(pkt);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 6e5b7079..f40939a 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -24,8 +24,10 @@
 				int phyid)
 {
 	struct cfsrvl *service = container_obj(layr);
+
 	caif_assert(layr->up != NULL);
 	caif_assert(layr->up->ctrlcmd != NULL);
+
 	switch (ctrl) {
 	case CAIF_CTRLCMD_INIT_RSP:
 		service->open = true;
@@ -89,9 +91,14 @@
 static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
 {
 	struct cfsrvl *service = container_obj(layr);
+
 	caif_assert(layr != NULL);
 	caif_assert(layr->dn != NULL);
 	caif_assert(layr->dn->transmit != NULL);
+
+	if (!service->supports_flowctrl)
+		return 0;
+
 	switch (ctrl) {
 	case CAIF_MODEMCMD_FLOW_ON_REQ:
 		{
@@ -152,9 +159,17 @@
 	kfree(layer);
 }
 
+void cfsrvl_release(struct kref *kref)
+{
+	struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
+	kfree(service);
+}
+
 void cfsrvl_init(struct cfsrvl *service,
-		 u8 channel_id,
-		 struct dev_info *dev_info)
+			u8 channel_id,
+			struct dev_info *dev_info,
+			bool supports_flowctrl
+			)
 {
 	caif_assert(offsetof(struct cfsrvl, layer) == 0);
 	service->open = false;
@@ -164,14 +179,11 @@
 	service->layer.ctrlcmd = cfservl_ctrlcmd;
 	service->layer.modemcmd = cfservl_modemcmd;
 	service->dev_info = *dev_info;
+	service->supports_flowctrl = supports_flowctrl;
+	service->release = cfsrvl_release;
 	kref_init(&service->ref);
 }
 
-void cfsrvl_release(struct kref *kref)
-{
-	struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
-	kfree(service);
-}
 
 bool cfsrvl_ready(struct cfsrvl *service, int *err)
 {
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 5fd2c9e..02795af 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -31,7 +31,7 @@
 	}
 	caif_assert(offsetof(struct cfsrvl, layer) == 0);
 	memset(util, 0, sizeof(struct cfsrvl));
-	cfsrvl_init(util, channel_id, dev_info);
+	cfsrvl_init(util, channel_id, dev_info, true);
 	util->layer.receive = cfutill_receive;
 	util->layer.transmit = cfutill_transmit;
 	snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
@@ -90,12 +90,6 @@
 	if (!cfsrvl_ready(service, &ret))
 		return ret;
 
-	if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
-		pr_err("CAIF: %s(): packet too large size=%d\n",
-			__func__, cfpkt_getlen(pkt));
-		return -EOVERFLOW;
-	}
-
 	cfpkt_add_head(pkt, &zero, 1);
 	/* Add info for MUX-layer to route the packet out. */
 	info = cfpkt_info(pkt);
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index e04f7d9..77cc09f 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -30,7 +30,7 @@
 	}
 	caif_assert(offsetof(struct cfsrvl, layer) == 0);
 	memset(vei, 0, sizeof(struct cfsrvl));
-	cfsrvl_init(vei, channel_id, dev_info);
+	cfsrvl_init(vei, channel_id, dev_info, true);
 	vei->layer.receive = cfvei_receive;
 	vei->layer.transmit = cfvei_transmit;
 	snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
@@ -84,11 +84,6 @@
 		return ret;
 	caif_assert(layr->dn != NULL);
 	caif_assert(layr->dn->transmit != NULL);
-	if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
-		pr_warning("CAIF: %s(): Packet too large - size=%d\n",
-			   __func__, cfpkt_getlen(pkt));
-		return -EOVERFLOW;
-	}
 
 	if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
 		pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index 89ad4ea..ada6ee2 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -27,7 +27,7 @@
 	caif_assert(offsetof(struct cfsrvl, layer) == 0);
 
 	memset(vid, 0, sizeof(struct cfsrvl));
-	cfsrvl_init(vid, channel_id, dev_info);
+	cfsrvl_init(vid, channel_id, dev_info, false);
 	vid->layer.receive = cfvidl_receive;
 	vid->layer.transmit = cfvidl_transmit;
 	snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 610966a..4293e19 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -23,7 +23,7 @@
 #include <net/caif/caif_dev.h>
 
 /* GPRS PDP connection has MTU to 1500 */
-#define SIZE_MTU 1500
+#define GPRS_PDP_MTU 1500
 /* 5 sec. connect timeout */
 #define CONNECT_TIMEOUT (5 * HZ)
 #define CAIF_NET_DEFAULT_QUEUE_LEN 500
@@ -232,6 +232,8 @@
 {
 	struct chnl_net *priv = NULL;
 	int result = -1;
+	int llifindex, headroom, tailroom, mtu;
+	struct net_device *lldev;
 	ASSERT_RTNL();
 	priv = netdev_priv(dev);
 	if (!priv) {
@@ -241,41 +243,88 @@
 
 	if (priv->state != CAIF_CONNECTING) {
 		priv->state = CAIF_CONNECTING;
-		result = caif_connect_client(&priv->conn_req, &priv->chnl);
+		result = caif_connect_client(&priv->conn_req, &priv->chnl,
+					&llifindex, &headroom, &tailroom);
 		if (result != 0) {
-				priv->state = CAIF_DISCONNECTED;
 				pr_debug("CAIF: %s(): err: "
 					"Unable to register and open device,"
 					" Err:%d\n",
 					__func__,
 					result);
-				return result;
+				goto error;
+		}
+
+		lldev = dev_get_by_index(dev_net(dev), llifindex);
+
+		if (lldev == NULL) {
+			pr_debug("CAIF: %s(): no interface?\n", __func__);
+			result = -ENODEV;
+			goto error;
+		}
+
+		dev->needed_tailroom = tailroom + lldev->needed_tailroom;
+		dev->hard_header_len = headroom + lldev->hard_header_len +
+			lldev->needed_tailroom;
+
+		/*
+		 * MTU, head-room etc is not know before we have a
+		 * CAIF link layer device available. MTU calculation may
+		 * override initial RTNL configuration.
+		 * MTU is minimum of current mtu, link layer mtu pluss
+		 * CAIF head and tail, and PDP GPRS contexts max MTU.
+		 */
+		mtu = min_t(int, dev->mtu, lldev->mtu - (headroom + tailroom));
+		mtu = min_t(int, GPRS_PDP_MTU, mtu);
+		dev_set_mtu(dev, mtu);
+		dev_put(lldev);
+
+		if (mtu < 100) {
+			pr_warning("CAIF: %s(): "
+				"CAIF Interface MTU too small (%d)\n",
+				__func__, mtu);
+			result = -ENODEV;
+			goto error;
 		}
 	}
 
+	rtnl_unlock();  /* Release RTNL lock during connect wait */
+
 	result = wait_event_interruptible_timeout(priv->netmgmt_wq,
 						priv->state != CAIF_CONNECTING,
 						CONNECT_TIMEOUT);
 
+	rtnl_lock();
+
 	if (result == -ERESTARTSYS) {
 		pr_debug("CAIF: %s(): wait_event_interruptible"
 			 " woken by a signal\n", __func__);
-		return -ERESTARTSYS;
+		result = -ERESTARTSYS;
+		goto error;
 	}
+
 	if (result == 0) {
 		pr_debug("CAIF: %s(): connect timeout\n", __func__);
 		caif_disconnect_client(&priv->chnl);
 		priv->state = CAIF_DISCONNECTED;
 		pr_debug("CAIF: %s(): state disconnected\n", __func__);
-		return -ETIMEDOUT;
+		result = -ETIMEDOUT;
+		goto error;
 	}
 
 	if (priv->state != CAIF_CONNECTED) {
 		pr_debug("CAIF: %s(): connect failed\n", __func__);
-		return -ECONNREFUSED;
+		result = -ECONNREFUSED;
+		goto error;
 	}
 	pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
 	return 0;
+
+error:
+	caif_disconnect_client(&priv->chnl);
+	priv->state = CAIF_DISCONNECTED;
+	pr_debug("CAIF: %s(): state disconnected\n", __func__);
+	return result;
+
 }
 
 static int chnl_net_stop(struct net_device *dev)
@@ -321,9 +370,7 @@
 	dev->destructor = free_netdev;
 	dev->flags |= IFF_NOARP;
 	dev->flags |= IFF_POINTOPOINT;
-	dev->needed_headroom = CAIF_NEEDED_HEADROOM;
-	dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
-	dev->mtu = SIZE_MTU;
+	dev->mtu = GPRS_PDP_MTU;
 	dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
 
 	priv = netdev_priv(dev);
diff --git a/net/can/raw.c b/net/can/raw.c
index da99cf1..ccfe633 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -436,14 +436,9 @@
 
 		if (count > 1) {
 			/* filter does not fit into dfilter => alloc space */
-			filter = kmalloc(optlen, GFP_KERNEL);
-			if (!filter)
-				return -ENOMEM;
-
-			if (copy_from_user(filter, optval, optlen)) {
-				kfree(filter);
-				return -EFAULT;
-			}
+			filter = memdup_user(optval, optlen);
+			if (IS_ERR(filter))
+				return PTR_ERR(filter);
 		} else if (count == 1) {
 			if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
 				return -EFAULT;
diff --git a/net/compat.c b/net/compat.c
index ec24d9e..63d260e 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -81,7 +81,7 @@
 	int tot_len;
 
 	if (kern_msg->msg_namelen) {
-		if (mode==VERIFY_READ) {
+		if (mode == VERIFY_READ) {
 			int err = move_addr_to_kernel(kern_msg->msg_name,
 						      kern_msg->msg_namelen,
 						      kern_address);
@@ -354,7 +354,7 @@
 static int do_set_sock_timeout(struct socket *sock, int level,
 		int optname, char __user *optval, unsigned int optlen)
 {
-	struct compat_timeval __user *up = (struct compat_timeval __user *) optval;
+	struct compat_timeval __user *up = (struct compat_timeval __user *)optval;
 	struct timeval ktime;
 	mm_segment_t old_fs;
 	int err;
@@ -367,7 +367,7 @@
 		return -EFAULT;
 	old_fs = get_fs();
 	set_fs(KERNEL_DS);
-	err = sock_setsockopt(sock, level, optname, (char *) &ktime, sizeof(ktime));
+	err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
 	set_fs(old_fs);
 
 	return err;
@@ -389,11 +389,10 @@
 				char __user *optval, unsigned int optlen)
 {
 	int err;
-	struct socket *sock;
+	struct socket *sock = sockfd_lookup(fd, &err);
 
-	if ((sock = sockfd_lookup(fd, &err))!=NULL)
-	{
-		err = security_socket_setsockopt(sock,level,optname);
+	if (sock) {
+		err = security_socket_setsockopt(sock, level, optname);
 		if (err) {
 			sockfd_put(sock);
 			return err;
@@ -453,7 +452,7 @@
 int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
 {
 	struct compat_timeval __user *ctv =
-			(struct compat_timeval __user*) userstamp;
+			(struct compat_timeval __user *) userstamp;
 	int err = -ENOENT;
 	struct timeval tv;
 
@@ -477,7 +476,7 @@
 int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
 {
 	struct compat_timespec __user *ctv =
-			(struct compat_timespec __user*) userstamp;
+			(struct compat_timespec __user *) userstamp;
 	int err = -ENOENT;
 	struct timespec ts;
 
@@ -502,12 +501,10 @@
 				char __user *optval, int __user *optlen)
 {
 	int err;
-	struct socket *sock;
+	struct socket *sock = sockfd_lookup(fd, &err);
 
-	if ((sock = sockfd_lookup(fd, &err))!=NULL)
-	{
-		err = security_socket_getsockopt(sock, level,
-							   optname);
+	if (sock) {
+		err = security_socket_getsockopt(sock, level, optname);
 		if (err) {
 			sockfd_put(sock);
 			return err;
@@ -531,7 +528,7 @@
 	__u32				 gr_interface;
 	struct __kernel_sockaddr_storage gr_group
 		__attribute__ ((aligned(4)));
-} __attribute__ ((packed));
+} __packed;
 
 struct compat_group_source_req {
 	__u32				 gsr_interface;
@@ -539,7 +536,7 @@
 		__attribute__ ((aligned(4)));
 	struct __kernel_sockaddr_storage gsr_source
 		__attribute__ ((aligned(4)));
-} __attribute__ ((packed));
+} __packed;
 
 struct compat_group_filter {
 	__u32				 gf_interface;
@@ -549,7 +546,7 @@
 	__u32				 gf_numsrc;
 	struct __kernel_sockaddr_storage gf_slist[1]
 		__attribute__ ((aligned(4)));
-} __attribute__ ((packed));
+} __packed;
 
 #define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \
 			sizeof(struct __kernel_sockaddr_storage))
@@ -557,7 +554,7 @@
 
 int compat_mc_setsockopt(struct sock *sock, int level, int optname,
 	char __user *optval, unsigned int optlen,
-	int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int))
+	int (*setsockopt)(struct sock *, int, int, char __user *, unsigned int))
 {
 	char __user	*koptval = optval;
 	int		koptlen = optlen;
@@ -640,12 +637,11 @@
 	}
 	return setsockopt(sock, level, optname, koptval, koptlen);
 }
-
 EXPORT_SYMBOL(compat_mc_setsockopt);
 
 int compat_mc_getsockopt(struct sock *sock, int level, int optname,
 	char __user *optval, int __user *optlen,
-	int (*getsockopt)(struct sock *,int,int,char __user *,int __user *))
+	int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
 {
 	struct compat_group_filter __user *gf32 = (void *)optval;
 	struct group_filter __user *kgf;
@@ -681,7 +677,7 @@
 	    __put_user(interface, &kgf->gf_interface) ||
 	    __put_user(fmode, &kgf->gf_fmode) ||
 	    __put_user(numsrc, &kgf->gf_numsrc) ||
-	    copy_in_user(&kgf->gf_group,&gf32->gf_group,sizeof(kgf->gf_group)))
+	    copy_in_user(&kgf->gf_group, &gf32->gf_group, sizeof(kgf->gf_group)))
 		return -EFAULT;
 
 	err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen);
@@ -714,21 +710,22 @@
 		copylen = numsrc * sizeof(gf32->gf_slist[0]);
 		if (copylen > klen)
 			copylen = klen;
-	        if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen))
+		if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen))
 			return -EFAULT;
 	}
 	return err;
 }
-
 EXPORT_SYMBOL(compat_mc_getsockopt);
 
 
 /* Argument list sizes for compat_sys_socketcall */
 #define AL(x) ((x) * sizeof(u32))
-static unsigned char nas[20]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
-				AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
-				AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
-				AL(4),AL(5)};
+static unsigned char nas[20] = {
+	AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
+	AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
+	AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
+	AL(4), AL(5)
+};
 #undef AL
 
 asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
@@ -827,7 +824,7 @@
 					  compat_ptr(a[4]), compat_ptr(a[5]));
 		break;
 	case SYS_SHUTDOWN:
-		ret = sys_shutdown(a0,a1);
+		ret = sys_shutdown(a0, a1);
 		break;
 	case SYS_SETSOCKOPT:
 		ret = compat_sys_setsockopt(a0, a1, a[2],
diff --git a/net/core/dev.c b/net/core/dev.c
index 723a347..eb4201c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -803,35 +803,31 @@
 EXPORT_SYMBOL(dev_getfirstbyhwtype);
 
 /**
- *	dev_get_by_flags - find any device with given flags
+ *	dev_get_by_flags_rcu - find any device with given flags
  *	@net: the applicable net namespace
  *	@if_flags: IFF_* values
  *	@mask: bitmask of bits in if_flags to check
  *
  *	Search for any interface with the given flags. Returns NULL if a device
- *	is not found or a pointer to the device. The device returned has
- *	had a reference added and the pointer is safe until the user calls
- *	dev_put to indicate they have finished with it.
+ *	is not found or a pointer to the device. Must be called inside
+ *	rcu_read_lock(), and result refcount is unchanged.
  */
 
-struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
+struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
 				    unsigned short mask)
 {
 	struct net_device *dev, *ret;
 
 	ret = NULL;
-	rcu_read_lock();
 	for_each_netdev_rcu(net, dev) {
 		if (((dev->flags ^ if_flags) & mask) == 0) {
-			dev_hold(dev);
 			ret = dev;
 			break;
 		}
 	}
-	rcu_read_unlock();
 	return ret;
 }
-EXPORT_SYMBOL(dev_get_by_flags);
+EXPORT_SYMBOL(dev_get_by_flags_rcu);
 
 /**
  *	dev_valid_name - check if name is okay for network device
@@ -1541,7 +1537,8 @@
 				if (net_ratelimit())
 					printk(KERN_CRIT "protocol %04x is "
 					       "buggy, dev %s\n",
-					       skb2->protocol, dev->name);
+					       ntohs(skb2->protocol),
+					       dev->name);
 				skb_reset_network_header(skb2);
 			}
 
@@ -1595,7 +1592,9 @@
 
 void dev_kfree_skb_irq(struct sk_buff *skb)
 {
-	if (atomic_dec_and_test(&skb->users)) {
+	if (!skb->destructor)
+		dev_kfree_skb(skb);
+	else if (atomic_dec_and_test(&skb->users)) {
 		struct softnet_data *sd;
 		unsigned long flags;
 
@@ -1915,6 +1914,22 @@
 		skb_orphan(skb);
 }
 
+/*
+ * Returns true if either:
+ *	1. skb has frag_list and the device doesn't support FRAGLIST, or
+ *	2. skb is fragmented and the device does not support SG, or if
+ *	   at least one of fragments is in highmem and device does not
+ *	   support DMA from it.
+ */
+static inline int skb_needs_linearize(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	return skb_is_nonlinear(skb) &&
+	       ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
+	        (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
+					      illegal_highdma(dev, skb))));
+}
+
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 			struct netdev_queue *txq)
 {
@@ -1939,6 +1954,22 @@
 				goto out_kfree_skb;
 			if (skb->next)
 				goto gso;
+		} else {
+			if (skb_needs_linearize(skb, dev) &&
+			    __skb_linearize(skb))
+				goto out_kfree_skb;
+
+			/* If packet is not checksummed and device does not
+			 * support checksumming for this protocol, complete
+			 * checksumming here.
+			 */
+			if (skb->ip_summed == CHECKSUM_PARTIAL) {
+				skb_set_transport_header(skb, skb->csum_start -
+					      skb_headroom(skb));
+				if (!dev_can_checksum(dev, skb) &&
+				     skb_checksum_help(skb))
+					goto out_kfree_skb;
+			}
 		}
 
 		rc = ops->ndo_start_xmit(skb, dev);
@@ -2056,14 +2087,24 @@
 				 struct netdev_queue *txq)
 {
 	spinlock_t *root_lock = qdisc_lock(q);
+	bool contended = qdisc_is_running(q);
 	int rc;
 
+	/*
+	 * Heuristic to force contended enqueues to serialize on a
+	 * separate lock before trying to get qdisc main lock.
+	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
+	 * and dequeue packets faster.
+	 */
+	if (unlikely(contended))
+		spin_lock(&q->busylock);
+
 	spin_lock(root_lock);
 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
 		kfree_skb(skb);
 		rc = NET_XMIT_DROP;
 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
-		   !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
+		   qdisc_run_begin(q)) {
 		/*
 		 * This is a work-conserving queue; there are no old skbs
 		 * waiting to be sent out; and the qdisc is not running -
@@ -2072,37 +2113,33 @@
 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
 			skb_dst_force(skb);
 		__qdisc_update_bstats(q, skb->len);
-		if (sch_direct_xmit(skb, q, dev, txq, root_lock))
+		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
+			if (unlikely(contended)) {
+				spin_unlock(&q->busylock);
+				contended = false;
+			}
 			__qdisc_run(q);
-		else
-			clear_bit(__QDISC_STATE_RUNNING, &q->state);
+		} else
+			qdisc_run_end(q);
 
 		rc = NET_XMIT_SUCCESS;
 	} else {
 		skb_dst_force(skb);
 		rc = qdisc_enqueue_root(skb, q);
-		qdisc_run(q);
+		if (qdisc_run_begin(q)) {
+			if (unlikely(contended)) {
+				spin_unlock(&q->busylock);
+				contended = false;
+			}
+			__qdisc_run(q);
+		}
 	}
 	spin_unlock(root_lock);
-
+	if (unlikely(contended))
+		spin_unlock(&q->busylock);
 	return rc;
 }
 
-/*
- * Returns true if either:
- *	1. skb has frag_list and the device doesn't support FRAGLIST, or
- *	2. skb is fragmented and the device does not support SG, or if
- *	   at least one of fragments is in highmem and device does not
- *	   support DMA from it.
- */
-static inline int skb_needs_linearize(struct sk_buff *skb,
-				      struct net_device *dev)
-{
-	return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
-	       (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
-					      illegal_highdma(dev, skb)));
-}
-
 /**
  *	dev_queue_xmit - transmit a buffer
  *	@skb: buffer to transmit
@@ -2135,25 +2172,6 @@
 	struct Qdisc *q;
 	int rc = -ENOMEM;
 
-	/* GSO will handle the following emulations directly. */
-	if (netif_needs_gso(dev, skb))
-		goto gso;
-
-	/* Convert a paged skb to linear, if required */
-	if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
-		goto out_kfree_skb;
-
-	/* If packet is not checksummed and device does not support
-	 * checksumming for this protocol, complete checksumming here.
-	 */
-	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		skb_set_transport_header(skb, skb->csum_start -
-					      skb_headroom(skb));
-		if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
-			goto out_kfree_skb;
-	}
-
-gso:
 	/* Disable soft irqs for various locks below. Also
 	 * stops preemption for RCU.
 	 */
@@ -2212,7 +2230,6 @@
 	rc = -ENETDOWN;
 	rcu_read_unlock_bh();
 
-out_kfree_skb:
 	kfree_skb(skb);
 	return rc;
 out:
@@ -2597,70 +2614,14 @@
 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 }
 
-#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
-
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
+    (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
 /* This hook is defined here for ATM LANE */
 int (*br_fdb_test_addr_hook)(struct net_device *dev,
 			     unsigned char *addr) __read_mostly;
 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
 #endif
 
-/*
- * If bridge module is loaded call bridging hook.
- *  returns NULL if packet was consumed.
- */
-struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
-					struct sk_buff *skb) __read_mostly;
-EXPORT_SYMBOL_GPL(br_handle_frame_hook);
-
-static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
-					    struct packet_type **pt_prev, int *ret,
-					    struct net_device *orig_dev)
-{
-	struct net_bridge_port *port;
-
-	if (skb->pkt_type == PACKET_LOOPBACK ||
-	    (port = rcu_dereference(skb->dev->br_port)) == NULL)
-		return skb;
-
-	if (*pt_prev) {
-		*ret = deliver_skb(skb, *pt_prev, orig_dev);
-		*pt_prev = NULL;
-	}
-
-	return br_handle_frame_hook(port, skb);
-}
-#else
-#define handle_bridge(skb, pt_prev, ret, orig_dev)	(skb)
-#endif
-
-#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
-struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p,
-					     struct sk_buff *skb) __read_mostly;
-EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
-
-static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
-					     struct packet_type **pt_prev,
-					     int *ret,
-					     struct net_device *orig_dev)
-{
-	struct macvlan_port *port;
-
-	port = rcu_dereference(skb->dev->macvlan_port);
-	if (!port)
-		return skb;
-
-	if (*pt_prev) {
-		*ret = deliver_skb(skb, *pt_prev, orig_dev);
-		*pt_prev = NULL;
-	}
-	return macvlan_handle_frame_hook(port, skb);
-}
-#else
-#define handle_macvlan(skb, pt_prev, ret, orig_dev)	(skb)
-#endif
-
 #ifdef CONFIG_NET_CLS_ACT
 /* TODO: Maybe we should just force sch_ingress to be compiled in
  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
@@ -2711,9 +2672,6 @@
 	if (*pt_prev) {
 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
 		*pt_prev = NULL;
-	} else {
-		/* Huh? Why does turning on AF_PACKET affect this? */
-		skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
 	}
 
 	switch (ing_filter(skb)) {
@@ -2756,6 +2714,51 @@
 	rcu_read_unlock();
 }
 
+/**
+ *	netdev_rx_handler_register - register receive handler
+ *	@dev: device to register a handler for
+ *	@rx_handler: receive handler to register
+ *	@rx_handler_data: data pointer that is used by rx handler
+ *
+ *	Register a receive hander for a device. This handler will then be
+ *	called from __netif_receive_skb. A negative errno code is returned
+ *	on a failure.
+ *
+ *	The caller must hold the rtnl_mutex.
+ */
+int netdev_rx_handler_register(struct net_device *dev,
+			       rx_handler_func_t *rx_handler,
+			       void *rx_handler_data)
+{
+	ASSERT_RTNL();
+
+	if (dev->rx_handler)
+		return -EBUSY;
+
+	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
+	rcu_assign_pointer(dev->rx_handler, rx_handler);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
+
+/**
+ *	netdev_rx_handler_unregister - unregister receive handler
+ *	@dev: device to unregister a handler from
+ *
+ *	Unregister a receive hander from a device.
+ *
+ *	The caller must hold the rtnl_mutex.
+ */
+void netdev_rx_handler_unregister(struct net_device *dev)
+{
+
+	ASSERT_RTNL();
+	rcu_assign_pointer(dev->rx_handler, NULL);
+	rcu_assign_pointer(dev->rx_handler_data, NULL);
+}
+EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+
 static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
 					      struct net_device *master)
 {
@@ -2777,7 +2780,8 @@
 	if (master->priv_flags & IFF_MASTER_ARPMON)
 		dev->last_rx = jiffies;
 
-	if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
+	if ((master->priv_flags & IFF_MASTER_ALB) &&
+	    (master->priv_flags & IFF_BRIDGE_PORT)) {
 		/* Do address unmangle. The local destination address
 		 * will be always the one master has. Provides the right
 		 * functionality in a bridge.
@@ -2808,6 +2812,7 @@
 static int __netif_receive_skb(struct sk_buff *skb)
 {
 	struct packet_type *ptype, *pt_prev;
+	rx_handler_func_t *rx_handler;
 	struct net_device *orig_dev;
 	struct net_device *master;
 	struct net_device *null_or_orig;
@@ -2849,8 +2854,7 @@
 			skb->dev = master;
 	}
 
-	__get_cpu_var(softnet_data).processed++;
-
+	__this_cpu_inc(softnet_data.processed);
 	skb_reset_network_header(skb);
 	skb_reset_transport_header(skb);
 	skb->mac_len = skb->network_header - skb->mac_header;
@@ -2882,12 +2886,17 @@
 ncls:
 #endif
 
-	skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
-	if (!skb)
-		goto out;
-	skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
-	if (!skb)
-		goto out;
+	/* Handle special case of bridge or macvlan */
+	rx_handler = rcu_dereference(skb->dev->rx_handler);
+	if (rx_handler) {
+		if (pt_prev) {
+			ret = deliver_skb(skb, pt_prev, orig_dev);
+			pt_prev = NULL;
+		}
+		skb = rx_handler(skb);
+		if (!skb)
+			goto out;
+	}
 
 	/*
 	 * Make sure frames received on VLAN interfaces stacked on
@@ -3712,10 +3721,11 @@
 
 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
 {
-	const struct net_device_stats *stats = dev_get_stats(dev);
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
 
-	seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
-		   "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
 		   dev->name, stats->rx_bytes, stats->rx_packets,
 		   stats->rx_errors,
 		   stats->rx_dropped + stats->rx_missed_errors,
@@ -5290,20 +5300,29 @@
 /**
  *	dev_get_stats	- get network device statistics
  *	@dev: device to get statistics from
+ *	@storage: place to store stats
  *
  *	Get network statistics from device. The device driver may provide
- *	its own method by setting dev->netdev_ops->get_stats; otherwise
- *	the internal statistics structure is used.
+ *	its own method by setting dev->netdev_ops->get_stats64 or
+ *	dev->netdev_ops->get_stats; otherwise the internal statistics
+ *	structure is used.
  */
-const struct net_device_stats *dev_get_stats(struct net_device *dev)
+const struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+					      struct rtnl_link_stats64 *storage)
 {
 	const struct net_device_ops *ops = dev->netdev_ops;
 
-	if (ops->ndo_get_stats)
-		return ops->ndo_get_stats(dev);
-
-	dev_txq_stats_fold(dev, &dev->stats);
-	return &dev->stats;
+	if (ops->ndo_get_stats64) {
+		memset(storage, 0, sizeof(*storage));
+		return ops->ndo_get_stats64(dev, storage);
+	}
+	if (ops->ndo_get_stats) {
+		memcpy(storage, ops->ndo_get_stats(dev), sizeof(*storage));
+		return storage;
+	}
+	memcpy(storage, &dev->stats, sizeof(*storage));
+	dev_txq_stats_fold(dev, (struct net_device_stats *)storage);
+	return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
 
@@ -5808,6 +5827,68 @@
 	return buffer;
 }
 
+static int __netdev_printk(const char *level, const struct net_device *dev,
+			   struct va_format *vaf)
+{
+	int r;
+
+	if (dev && dev->dev.parent)
+		r = dev_printk(level, dev->dev.parent, "%s: %pV",
+			       netdev_name(dev), vaf);
+	else if (dev)
+		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
+	else
+		r = printk("%s(NULL net_device): %pV", level, vaf);
+
+	return r;
+}
+
+int netdev_printk(const char *level, const struct net_device *dev,
+		  const char *format, ...)
+{
+	struct va_format vaf;
+	va_list args;
+	int r;
+
+	va_start(args, format);
+
+	vaf.fmt = format;
+	vaf.va = &args;
+
+	r = __netdev_printk(level, dev, &vaf);
+	va_end(args);
+
+	return r;
+}
+EXPORT_SYMBOL(netdev_printk);
+
+#define define_netdev_printk_level(func, level)			\
+int func(const struct net_device *dev, const char *fmt, ...)	\
+{								\
+	int r;							\
+	struct va_format vaf;					\
+	va_list args;						\
+								\
+	va_start(args, fmt);					\
+								\
+	vaf.fmt = fmt;						\
+	vaf.va = &args;						\
+								\
+	r = __netdev_printk(level, dev, &vaf);			\
+	va_end(args);						\
+								\
+	return r;						\
+}								\
+EXPORT_SYMBOL(func);
+
+define_netdev_printk_level(netdev_emerg, KERN_EMERG);
+define_netdev_printk_level(netdev_alert, KERN_ALERT);
+define_netdev_printk_level(netdev_crit, KERN_CRIT);
+define_netdev_printk_level(netdev_err, KERN_ERR);
+define_netdev_printk_level(netdev_warn, KERN_WARNING);
+define_netdev_printk_level(netdev_notice, KERN_NOTICE);
+define_netdev_printk_level(netdev_info, KERN_INFO);
+
 static void __net_exit netdev_exit(struct net *net)
 {
 	kfree(net->dev_name_head);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 75e4ffe..7a85367 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -144,31 +144,13 @@
 }
 EXPORT_SYMBOL(ethtool_op_get_flags);
 
-int ethtool_op_set_flags(struct net_device *dev, u32 data)
+int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
 {
-	const struct ethtool_ops *ops = dev->ethtool_ops;
-	unsigned long features = dev->features;
+	if (data & ~supported)
+		return -EINVAL;
 
-	if (data & ETH_FLAG_LRO)
-		features |= NETIF_F_LRO;
-	else
-		features &= ~NETIF_F_LRO;
-
-	if (data & ETH_FLAG_NTUPLE) {
-		if (!ops->set_rx_ntuple)
-			return -EOPNOTSUPP;
-		features |= NETIF_F_NTUPLE;
-	} else {
-		/* safe to clear regardless */
-		features &= ~NETIF_F_NTUPLE;
-	}
-
-	if (data & ETH_FLAG_RXHASH)
-		features |= NETIF_F_RXHASH;
-	else
-		features &= ~NETIF_F_RXHASH;
-
-	dev->features = features;
+	dev->features = ((dev->features & ~flags_dup_features) |
+			 (data & flags_dup_features));
 	return 0;
 }
 EXPORT_SYMBOL(ethtool_op_set_flags);
@@ -395,6 +377,80 @@
 	return ret;
 }
 
+static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
+						     void __user *useraddr)
+{
+	struct ethtool_rxfh_indir *indir;
+	u32 table_size;
+	size_t full_size;
+	int ret;
+
+	if (!dev->ethtool_ops->get_rxfh_indir)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&table_size,
+			   useraddr + offsetof(struct ethtool_rxfh_indir, size),
+			   sizeof(table_size)))
+		return -EFAULT;
+
+	if (table_size >
+	    (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
+		return -ENOMEM;
+	full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
+	indir = kmalloc(full_size, GFP_USER);
+	if (!indir)
+		return -ENOMEM;
+
+	indir->cmd = ETHTOOL_GRXFHINDIR;
+	indir->size = table_size;
+	ret = dev->ethtool_ops->get_rxfh_indir(dev, indir);
+	if (ret)
+		goto out;
+
+	if (copy_to_user(useraddr, indir, full_size))
+		ret = -EFAULT;
+
+out:
+	kfree(indir);
+	return ret;
+}
+
+static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
+						     void __user *useraddr)
+{
+	struct ethtool_rxfh_indir *indir;
+	u32 table_size;
+	size_t full_size;
+	int ret;
+
+	if (!dev->ethtool_ops->set_rxfh_indir)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&table_size,
+			   useraddr + offsetof(struct ethtool_rxfh_indir, size),
+			   sizeof(table_size)))
+		return -EFAULT;
+
+	if (table_size >
+	    (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
+		return -ENOMEM;
+	full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
+	indir = kmalloc(full_size, GFP_USER);
+	if (!indir)
+		return -ENOMEM;
+
+	if (copy_from_user(indir, useraddr, full_size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = dev->ethtool_ops->set_rxfh_indir(dev, indir);
+
+out:
+	kfree(indir);
+	return ret;
+}
+
 static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
 			struct ethtool_rx_ntuple_flow_spec *spec,
 			struct ethtool_rx_ntuple_flow_spec_container *fsc)
@@ -1563,6 +1619,12 @@
 	case ETHTOOL_GSSET_INFO:
 		rc = ethtool_get_sset_info(dev, useraddr);
 		break;
+	case ETHTOOL_GRXFHINDIR:
+		rc = ethtool_get_rxfh_indir(dev, useraddr);
+		break;
+	case ETHTOOL_SRXFHINDIR:
+		rc = ethtool_set_rxfh_indir(dev, useraddr);
+		break;
 	default:
 		rc = -EOPNOTSUPP;
 	}
diff --git a/net/core/filter.c b/net/core/filter.c
index da69fb7..52b051f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -128,87 +128,87 @@
 		fentry = &filter[pc];
 
 		switch (fentry->code) {
-		case BPF_ALU|BPF_ADD|BPF_X:
+		case BPF_S_ALU_ADD_X:
 			A += X;
 			continue;
-		case BPF_ALU|BPF_ADD|BPF_K:
+		case BPF_S_ALU_ADD_K:
 			A += fentry->k;
 			continue;
-		case BPF_ALU|BPF_SUB|BPF_X:
+		case BPF_S_ALU_SUB_X:
 			A -= X;
 			continue;
-		case BPF_ALU|BPF_SUB|BPF_K:
+		case BPF_S_ALU_SUB_K:
 			A -= fentry->k;
 			continue;
-		case BPF_ALU|BPF_MUL|BPF_X:
+		case BPF_S_ALU_MUL_X:
 			A *= X;
 			continue;
-		case BPF_ALU|BPF_MUL|BPF_K:
+		case BPF_S_ALU_MUL_K:
 			A *= fentry->k;
 			continue;
-		case BPF_ALU|BPF_DIV|BPF_X:
+		case BPF_S_ALU_DIV_X:
 			if (X == 0)
 				return 0;
 			A /= X;
 			continue;
-		case BPF_ALU|BPF_DIV|BPF_K:
+		case BPF_S_ALU_DIV_K:
 			A /= fentry->k;
 			continue;
-		case BPF_ALU|BPF_AND|BPF_X:
+		case BPF_S_ALU_AND_X:
 			A &= X;
 			continue;
-		case BPF_ALU|BPF_AND|BPF_K:
+		case BPF_S_ALU_AND_K:
 			A &= fentry->k;
 			continue;
-		case BPF_ALU|BPF_OR|BPF_X:
+		case BPF_S_ALU_OR_X:
 			A |= X;
 			continue;
-		case BPF_ALU|BPF_OR|BPF_K:
+		case BPF_S_ALU_OR_K:
 			A |= fentry->k;
 			continue;
-		case BPF_ALU|BPF_LSH|BPF_X:
+		case BPF_S_ALU_LSH_X:
 			A <<= X;
 			continue;
-		case BPF_ALU|BPF_LSH|BPF_K:
+		case BPF_S_ALU_LSH_K:
 			A <<= fentry->k;
 			continue;
-		case BPF_ALU|BPF_RSH|BPF_X:
+		case BPF_S_ALU_RSH_X:
 			A >>= X;
 			continue;
-		case BPF_ALU|BPF_RSH|BPF_K:
+		case BPF_S_ALU_RSH_K:
 			A >>= fentry->k;
 			continue;
-		case BPF_ALU|BPF_NEG:
+		case BPF_S_ALU_NEG:
 			A = -A;
 			continue;
-		case BPF_JMP|BPF_JA:
+		case BPF_S_JMP_JA:
 			pc += fentry->k;
 			continue;
-		case BPF_JMP|BPF_JGT|BPF_K:
+		case BPF_S_JMP_JGT_K:
 			pc += (A > fentry->k) ? fentry->jt : fentry->jf;
 			continue;
-		case BPF_JMP|BPF_JGE|BPF_K:
+		case BPF_S_JMP_JGE_K:
 			pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
 			continue;
-		case BPF_JMP|BPF_JEQ|BPF_K:
+		case BPF_S_JMP_JEQ_K:
 			pc += (A == fentry->k) ? fentry->jt : fentry->jf;
 			continue;
-		case BPF_JMP|BPF_JSET|BPF_K:
+		case BPF_S_JMP_JSET_K:
 			pc += (A & fentry->k) ? fentry->jt : fentry->jf;
 			continue;
-		case BPF_JMP|BPF_JGT|BPF_X:
+		case BPF_S_JMP_JGT_X:
 			pc += (A > X) ? fentry->jt : fentry->jf;
 			continue;
-		case BPF_JMP|BPF_JGE|BPF_X:
+		case BPF_S_JMP_JGE_X:
 			pc += (A >= X) ? fentry->jt : fentry->jf;
 			continue;
-		case BPF_JMP|BPF_JEQ|BPF_X:
+		case BPF_S_JMP_JEQ_X:
 			pc += (A == X) ? fentry->jt : fentry->jf;
 			continue;
-		case BPF_JMP|BPF_JSET|BPF_X:
+		case BPF_S_JMP_JSET_X:
 			pc += (A & X) ? fentry->jt : fentry->jf;
 			continue;
-		case BPF_LD|BPF_W|BPF_ABS:
+		case BPF_S_LD_W_ABS:
 			k = fentry->k;
 load_w:
 			ptr = load_pointer(skb, k, 4, &tmp);
@@ -217,7 +217,7 @@
 				continue;
 			}
 			break;
-		case BPF_LD|BPF_H|BPF_ABS:
+		case BPF_S_LD_H_ABS:
 			k = fentry->k;
 load_h:
 			ptr = load_pointer(skb, k, 2, &tmp);
@@ -226,7 +226,7 @@
 				continue;
 			}
 			break;
-		case BPF_LD|BPF_B|BPF_ABS:
+		case BPF_S_LD_B_ABS:
 			k = fentry->k;
 load_b:
 			ptr = load_pointer(skb, k, 1, &tmp);
@@ -235,54 +235,54 @@
 				continue;
 			}
 			break;
-		case BPF_LD|BPF_W|BPF_LEN:
+		case BPF_S_LD_W_LEN:
 			A = skb->len;
 			continue;
-		case BPF_LDX|BPF_W|BPF_LEN:
+		case BPF_S_LDX_W_LEN:
 			X = skb->len;
 			continue;
-		case BPF_LD|BPF_W|BPF_IND:
+		case BPF_S_LD_W_IND:
 			k = X + fentry->k;
 			goto load_w;
-		case BPF_LD|BPF_H|BPF_IND:
+		case BPF_S_LD_H_IND:
 			k = X + fentry->k;
 			goto load_h;
-		case BPF_LD|BPF_B|BPF_IND:
+		case BPF_S_LD_B_IND:
 			k = X + fentry->k;
 			goto load_b;
-		case BPF_LDX|BPF_B|BPF_MSH:
+		case BPF_S_LDX_B_MSH:
 			ptr = load_pointer(skb, fentry->k, 1, &tmp);
 			if (ptr != NULL) {
 				X = (*(u8 *)ptr & 0xf) << 2;
 				continue;
 			}
 			return 0;
-		case BPF_LD|BPF_IMM:
+		case BPF_S_LD_IMM:
 			A = fentry->k;
 			continue;
-		case BPF_LDX|BPF_IMM:
+		case BPF_S_LDX_IMM:
 			X = fentry->k;
 			continue;
-		case BPF_LD|BPF_MEM:
+		case BPF_S_LD_MEM:
 			A = mem[fentry->k];
 			continue;
-		case BPF_LDX|BPF_MEM:
+		case BPF_S_LDX_MEM:
 			X = mem[fentry->k];
 			continue;
-		case BPF_MISC|BPF_TAX:
+		case BPF_S_MISC_TAX:
 			X = A;
 			continue;
-		case BPF_MISC|BPF_TXA:
+		case BPF_S_MISC_TXA:
 			A = X;
 			continue;
-		case BPF_RET|BPF_K:
+		case BPF_S_RET_K:
 			return fentry->k;
-		case BPF_RET|BPF_A:
+		case BPF_S_RET_A:
 			return A;
-		case BPF_ST:
+		case BPF_S_ST:
 			mem[fentry->k] = A;
 			continue;
-		case BPF_STX:
+		case BPF_S_STX:
 			mem[fentry->k] = X;
 			continue;
 		default:
@@ -390,53 +390,128 @@
 		/* Only allow valid instructions */
 		switch (ftest->code) {
 		case BPF_ALU|BPF_ADD|BPF_K:
+			ftest->code = BPF_S_ALU_ADD_K;
+			break;
 		case BPF_ALU|BPF_ADD|BPF_X:
+			ftest->code = BPF_S_ALU_ADD_X;
+			break;
 		case BPF_ALU|BPF_SUB|BPF_K:
+			ftest->code = BPF_S_ALU_SUB_K;
+			break;
 		case BPF_ALU|BPF_SUB|BPF_X:
+			ftest->code = BPF_S_ALU_SUB_X;
+			break;
 		case BPF_ALU|BPF_MUL|BPF_K:
+			ftest->code = BPF_S_ALU_MUL_K;
+			break;
 		case BPF_ALU|BPF_MUL|BPF_X:
+			ftest->code = BPF_S_ALU_MUL_X;
+			break;
 		case BPF_ALU|BPF_DIV|BPF_X:
+			ftest->code = BPF_S_ALU_DIV_X;
+			break;
 		case BPF_ALU|BPF_AND|BPF_K:
+			ftest->code = BPF_S_ALU_AND_K;
+			break;
 		case BPF_ALU|BPF_AND|BPF_X:
+			ftest->code = BPF_S_ALU_AND_X;
+			break;
 		case BPF_ALU|BPF_OR|BPF_K:
+			ftest->code = BPF_S_ALU_OR_K;
+			break;
 		case BPF_ALU|BPF_OR|BPF_X:
+			ftest->code = BPF_S_ALU_OR_X;
+			break;
 		case BPF_ALU|BPF_LSH|BPF_K:
+			ftest->code = BPF_S_ALU_LSH_K;
+			break;
 		case BPF_ALU|BPF_LSH|BPF_X:
+			ftest->code = BPF_S_ALU_LSH_X;
+			break;
 		case BPF_ALU|BPF_RSH|BPF_K:
+			ftest->code = BPF_S_ALU_RSH_K;
+			break;
 		case BPF_ALU|BPF_RSH|BPF_X:
+			ftest->code = BPF_S_ALU_RSH_X;
+			break;
 		case BPF_ALU|BPF_NEG:
+			ftest->code = BPF_S_ALU_NEG;
+			break;
 		case BPF_LD|BPF_W|BPF_ABS:
+			ftest->code = BPF_S_LD_W_ABS;
+			break;
 		case BPF_LD|BPF_H|BPF_ABS:
+			ftest->code = BPF_S_LD_H_ABS;
+			break;
 		case BPF_LD|BPF_B|BPF_ABS:
+			ftest->code = BPF_S_LD_B_ABS;
+			break;
 		case BPF_LD|BPF_W|BPF_LEN:
+			ftest->code = BPF_S_LD_W_LEN;
+			break;
 		case BPF_LD|BPF_W|BPF_IND:
+			ftest->code = BPF_S_LD_W_IND;
+			break;
 		case BPF_LD|BPF_H|BPF_IND:
+			ftest->code = BPF_S_LD_H_IND;
+			break;
 		case BPF_LD|BPF_B|BPF_IND:
+			ftest->code = BPF_S_LD_B_IND;
+			break;
 		case BPF_LD|BPF_IMM:
+			ftest->code = BPF_S_LD_IMM;
+			break;
 		case BPF_LDX|BPF_W|BPF_LEN:
+			ftest->code = BPF_S_LDX_W_LEN;
+			break;
 		case BPF_LDX|BPF_B|BPF_MSH:
+			ftest->code = BPF_S_LDX_B_MSH;
+			break;
 		case BPF_LDX|BPF_IMM:
+			ftest->code = BPF_S_LDX_IMM;
+			break;
 		case BPF_MISC|BPF_TAX:
+			ftest->code = BPF_S_MISC_TAX;
+			break;
 		case BPF_MISC|BPF_TXA:
+			ftest->code = BPF_S_MISC_TXA;
+			break;
 		case BPF_RET|BPF_K:
+			ftest->code = BPF_S_RET_K;
+			break;
 		case BPF_RET|BPF_A:
+			ftest->code = BPF_S_RET_A;
 			break;
 
 		/* Some instructions need special checks */
 
-		case BPF_ALU|BPF_DIV|BPF_K:
 			/* check for division by zero */
+		case BPF_ALU|BPF_DIV|BPF_K:
 			if (ftest->k == 0)
 				return -EINVAL;
+			ftest->code = BPF_S_ALU_DIV_K;
 			break;
 
+		/* check for invalid memory addresses */
 		case BPF_LD|BPF_MEM:
-		case BPF_LDX|BPF_MEM:
-		case BPF_ST:
-		case BPF_STX:
-			/* check for invalid memory addresses */
 			if (ftest->k >= BPF_MEMWORDS)
 				return -EINVAL;
+			ftest->code = BPF_S_LD_MEM;
+			break;
+		case BPF_LDX|BPF_MEM:
+			if (ftest->k >= BPF_MEMWORDS)
+				return -EINVAL;
+			ftest->code = BPF_S_LDX_MEM;
+			break;
+		case BPF_ST:
+			if (ftest->k >= BPF_MEMWORDS)
+				return -EINVAL;
+			ftest->code = BPF_S_ST;
+			break;
+		case BPF_STX:
+			if (ftest->k >= BPF_MEMWORDS)
+				return -EINVAL;
+			ftest->code = BPF_S_STX;
 			break;
 
 		case BPF_JMP|BPF_JA:
@@ -447,28 +522,63 @@
 			 */
 			if (ftest->k >= (unsigned)(flen-pc-1))
 				return -EINVAL;
+			ftest->code = BPF_S_JMP_JA;
 			break;
 
 		case BPF_JMP|BPF_JEQ|BPF_K:
+			ftest->code = BPF_S_JMP_JEQ_K;
+			break;
 		case BPF_JMP|BPF_JEQ|BPF_X:
+			ftest->code = BPF_S_JMP_JEQ_X;
+			break;
 		case BPF_JMP|BPF_JGE|BPF_K:
+			ftest->code = BPF_S_JMP_JGE_K;
+			break;
 		case BPF_JMP|BPF_JGE|BPF_X:
+			ftest->code = BPF_S_JMP_JGE_X;
+			break;
 		case BPF_JMP|BPF_JGT|BPF_K:
+			ftest->code = BPF_S_JMP_JGT_K;
+			break;
 		case BPF_JMP|BPF_JGT|BPF_X:
+			ftest->code = BPF_S_JMP_JGT_X;
+			break;
 		case BPF_JMP|BPF_JSET|BPF_K:
+			ftest->code = BPF_S_JMP_JSET_K;
+			break;
 		case BPF_JMP|BPF_JSET|BPF_X:
-			/* for conditionals both must be safe */
-			if (pc + ftest->jt + 1 >= flen ||
-			    pc + ftest->jf + 1 >= flen)
-				return -EINVAL;
+			ftest->code = BPF_S_JMP_JSET_X;
 			break;
 
 		default:
 			return -EINVAL;
 		}
+
+			/* for conditionals both must be safe */
+		switch (ftest->code) {
+		case BPF_S_JMP_JEQ_K:
+		case BPF_S_JMP_JEQ_X:
+		case BPF_S_JMP_JGE_K:
+		case BPF_S_JMP_JGE_X:
+		case BPF_S_JMP_JGT_K:
+		case BPF_S_JMP_JGT_X:
+		case BPF_S_JMP_JSET_X:
+		case BPF_S_JMP_JSET_K:
+			if (pc + ftest->jt + 1 >= flen ||
+			    pc + ftest->jf + 1 >= flen)
+				return -EINVAL;
+		}
 	}
 
-	return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
+	/* last instruction must be a RET code */
+	switch (filter[flen - 1].code) {
+	case BPF_S_RET_K:
+	case BPF_S_RET_A:
+		return 0;
+		break;
+		default:
+			return -EINVAL;
+		}
 }
 EXPORT_SYMBOL(sk_chk_filter);
 
diff --git a/net/core/flow.c b/net/core/flow.c
index 1619006..8c7c91a 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -222,7 +222,7 @@
 	unsigned int hash;
 
 	local_bh_disable();
-	fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
+	fcp = this_cpu_ptr(fc->percpu);
 
 	fle = NULL;
 	flo = NULL;
@@ -302,7 +302,7 @@
 	LIST_HEAD(gc_list);
 	int i, deleted = 0;
 
-	fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
+	fcp = this_cpu_ptr(fc->percpu);
 	for (i = 0; i < flow_cache_hash_size(fc); i++) {
 		hlist_for_each_entry_safe(fle, entry, tmp,
 					  &fcp->hash_table[i], u.hlist) {
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 785e527..9fbe7f7 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -263,6 +263,7 @@
  *
  * Removes the rate estimator specified by &bstats and &rate_est.
  *
+ * Note : Caller should respect an RCU grace period before freeing stats_lock
  */
 void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
 			struct gnet_stats_rate_est *rate_est)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 99e7052..914f42b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -29,6 +29,7 @@
 static const char fmt_long_hex[] = "%#lx\n";
 static const char fmt_dec[] = "%d\n";
 static const char fmt_ulong[] = "%lu\n";
+static const char fmt_u64[] = "%llu\n";
 
 static inline int dev_isalive(const struct net_device *dev)
 {
@@ -324,14 +325,15 @@
 	struct net_device *dev = to_net_dev(d);
 	ssize_t ret = -EINVAL;
 
-	WARN_ON(offset > sizeof(struct net_device_stats) ||
-			offset % sizeof(unsigned long) != 0);
+	WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
+			offset % sizeof(u64) != 0);
 
 	read_lock(&dev_base_lock);
 	if (dev_isalive(dev)) {
-		const struct net_device_stats *stats = dev_get_stats(dev);
-		ret = sprintf(buf, fmt_ulong,
-			      *(unsigned long *)(((u8 *) stats) + offset));
+		struct rtnl_link_stats64 temp;
+		const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
+
+		ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
 	}
 	read_unlock(&dev_base_lock);
 	return ret;
@@ -343,7 +345,7 @@
 			   struct device_attribute *attr, char *buf) 	\
 {									\
 	return netstat_show(d, attr, buf,				\
-			    offsetof(struct net_device_stats, name));	\
+			    offsetof(struct rtnl_link_stats64, name));	\
 }									\
 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 94825b1..ca6dc31 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -49,7 +49,6 @@
 		(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
 				sizeof(struct iphdr) + sizeof(struct ethhdr))
 
-static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
 static unsigned int carrier_timeout = 4;
@@ -197,7 +196,6 @@
 
 	service_arp_queue(dev->npinfo);
 
-	zap_completion_queue();
 }
 
 void netpoll_poll(struct netpoll *np)
@@ -221,40 +219,11 @@
 	spin_unlock_irqrestore(&skb_pool.lock, flags);
 }
 
-static void zap_completion_queue(void)
-{
-	unsigned long flags;
-	struct softnet_data *sd = &get_cpu_var(softnet_data);
-
-	if (sd->completion_queue) {
-		struct sk_buff *clist;
-
-		local_irq_save(flags);
-		clist = sd->completion_queue;
-		sd->completion_queue = NULL;
-		local_irq_restore(flags);
-
-		while (clist != NULL) {
-			struct sk_buff *skb = clist;
-			clist = clist->next;
-			if (skb->destructor) {
-				atomic_inc(&skb->users);
-				dev_kfree_skb_any(skb); /* put this one back */
-			} else {
-				__kfree_skb(skb);
-			}
-		}
-	}
-
-	put_cpu_var(softnet_data);
-}
-
 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 {
 	int count = 0;
 	struct sk_buff *skb;
 
-	zap_completion_queue();
 	refill_skbs();
 repeat:
 
@@ -292,6 +261,7 @@
 	unsigned long tries;
 	struct net_device *dev = np->dev;
 	const struct net_device_ops *ops = dev->netdev_ops;
+	/* It is up to the caller to keep npinfo alive. */
 	struct netpoll_info *npinfo = np->dev->npinfo;
 
 	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -723,29 +693,27 @@
 	return -1;
 }
 
-int netpoll_setup(struct netpoll *np)
+int __netpoll_setup(struct netpoll *np)
 {
-	struct net_device *ndev = NULL;
-	struct in_device *in_dev;
+	struct net_device *ndev = np->dev;
 	struct netpoll_info *npinfo;
-	struct netpoll *npe, *tmp;
+	const struct net_device_ops *ops;
 	unsigned long flags;
 	int err;
 
-	if (np->dev_name)
-		ndev = dev_get_by_name(&init_net, np->dev_name);
-	if (!ndev) {
-		printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
+	if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
+	    !ndev->netdev_ops->ndo_poll_controller) {
+		printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
 		       np->name, np->dev_name);
-		return -ENODEV;
+		err = -ENOTSUPP;
+		goto out;
 	}
 
-	np->dev = ndev;
 	if (!ndev->npinfo) {
 		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
 		if (!npinfo) {
 			err = -ENOMEM;
-			goto put;
+			goto out;
 		}
 
 		npinfo->rx_flags = 0;
@@ -757,6 +725,13 @@
 		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 
 		atomic_set(&npinfo->refcnt, 1);
+
+		ops = np->dev->netdev_ops;
+		if (ops->ndo_netpoll_setup) {
+			err = ops->ndo_netpoll_setup(ndev, npinfo);
+			if (err)
+				goto free_npinfo;
+		}
 	} else {
 		npinfo = ndev->npinfo;
 		atomic_inc(&npinfo->refcnt);
@@ -764,12 +739,37 @@
 
 	npinfo->netpoll = np;
 
-	if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
-	    !ndev->netdev_ops->ndo_poll_controller) {
-		printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
+	if (np->rx_hook) {
+		spin_lock_irqsave(&npinfo->rx_lock, flags);
+		npinfo->rx_flags |= NETPOLL_RX_ENABLED;
+		list_add_tail(&np->rx, &npinfo->rx_np);
+		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+	}
+
+	/* last thing to do is link it to the net device structure */
+	rcu_assign_pointer(ndev->npinfo, npinfo);
+
+	return 0;
+
+free_npinfo:
+	kfree(npinfo);
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(__netpoll_setup);
+
+int netpoll_setup(struct netpoll *np)
+{
+	struct net_device *ndev = NULL;
+	struct in_device *in_dev;
+	int err;
+
+	if (np->dev_name)
+		ndev = dev_get_by_name(&init_net, np->dev_name);
+	if (!ndev) {
+		printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
 		       np->name, np->dev_name);
-		err = -ENOTSUPP;
-		goto release;
+		return -ENODEV;
 	}
 
 	if (!netif_running(ndev)) {
@@ -785,7 +785,7 @@
 		if (err) {
 			printk(KERN_ERR "%s: failed to open %s\n",
 			       np->name, ndev->name);
-			goto release;
+			goto put;
 		}
 
 		atleast = jiffies + HZ/10;
@@ -822,7 +822,7 @@
 			printk(KERN_ERR "%s: no IP address for %s, aborting\n",
 			       np->name, np->dev_name);
 			err = -EDESTADDRREQ;
-			goto release;
+			goto put;
 		}
 
 		np->local_ip = in_dev->ifa_list->ifa_local;
@@ -830,34 +830,20 @@
 		printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
 	}
 
-	if (np->rx_hook) {
-		spin_lock_irqsave(&npinfo->rx_lock, flags);
-		npinfo->rx_flags |= NETPOLL_RX_ENABLED;
-		list_add_tail(&np->rx, &npinfo->rx_np);
-		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-	}
+	np->dev = ndev;
 
 	/* fill up the skb queue */
 	refill_skbs();
 
-	/* last thing to do is link it to the net device structure */
-	ndev->npinfo = npinfo;
+	rtnl_lock();
+	err = __netpoll_setup(np);
+	rtnl_unlock();
 
-	/* avoid racing with NAPI reading npinfo */
-	synchronize_rcu();
+	if (err)
+		goto put;
 
 	return 0;
 
- release:
-	if (!ndev->npinfo) {
-		spin_lock_irqsave(&npinfo->rx_lock, flags);
-		list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
-			npe->dev = NULL;
-		}
-		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-
-		kfree(npinfo);
-	}
 put:
 	dev_put(ndev);
 	return err;
@@ -870,42 +856,56 @@
 }
 core_initcall(netpoll_init);
 
-void netpoll_cleanup(struct netpoll *np)
+void __netpoll_cleanup(struct netpoll *np)
 {
 	struct netpoll_info *npinfo;
 	unsigned long flags;
 
-	if (np->dev) {
-		npinfo = np->dev->npinfo;
-		if (npinfo) {
-			if (!list_empty(&npinfo->rx_np)) {
-				spin_lock_irqsave(&npinfo->rx_lock, flags);
-				list_del(&np->rx);
-				if (list_empty(&npinfo->rx_np))
-					npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
-				spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-			}
+	npinfo = np->dev->npinfo;
+	if (!npinfo)
+		return;
 
-			if (atomic_dec_and_test(&npinfo->refcnt)) {
-				const struct net_device_ops *ops;
-				skb_queue_purge(&npinfo->arp_tx);
-				skb_queue_purge(&npinfo->txq);
-				cancel_rearming_delayed_work(&npinfo->tx_work);
-
-				/* clean after last, unfinished work */
-				__skb_queue_purge(&npinfo->txq);
-				kfree(npinfo);
-				ops = np->dev->netdev_ops;
-				if (ops->ndo_netpoll_cleanup)
-					ops->ndo_netpoll_cleanup(np->dev);
-				else
-					np->dev->npinfo = NULL;
-			}
-		}
-
-		dev_put(np->dev);
+	if (!list_empty(&npinfo->rx_np)) {
+		spin_lock_irqsave(&npinfo->rx_lock, flags);
+		list_del(&np->rx);
+		if (list_empty(&npinfo->rx_np))
+			npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 	}
 
+	if (atomic_dec_and_test(&npinfo->refcnt)) {
+		const struct net_device_ops *ops;
+
+		ops = np->dev->netdev_ops;
+		if (ops->ndo_netpoll_cleanup)
+			ops->ndo_netpoll_cleanup(np->dev);
+
+		rcu_assign_pointer(np->dev->npinfo, NULL);
+
+		/* avoid racing with NAPI reading npinfo */
+		synchronize_rcu_bh();
+
+		skb_queue_purge(&npinfo->arp_tx);
+		skb_queue_purge(&npinfo->txq);
+		cancel_rearming_delayed_work(&npinfo->tx_work);
+
+		/* clean after last, unfinished work */
+		__skb_queue_purge(&npinfo->txq);
+		kfree(npinfo);
+	}
+}
+EXPORT_SYMBOL_GPL(__netpoll_cleanup);
+
+void netpoll_cleanup(struct netpoll *np)
+{
+	if (!np->dev)
+		return;
+
+	rtnl_lock();
+	__netpoll_cleanup(np);
+	rtnl_unlock();
+
+	dev_put(np->dev);
 	np->dev = NULL;
 }
 
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1dacd7b..1ee2ebd 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -115,6 +115,9 @@
  * command by Adit Ranadive <adit.262@gmail.com>
  *
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/sys.h>
 #include <linux/types.h>
 #include <linux/module.h>
@@ -169,11 +172,13 @@
 #include <asm/dma.h>
 #include <asm/div64.h>		/* do_div */
 
-#define VERSION 	"2.73"
+#define VERSION	"2.74"
 #define IP_NAME_SZ 32
 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
 #define MPLS_STACK_BOTTOM htonl(0x00000100)
 
+#define func_enter() pr_debug("entering %s\n", __func__);
+
 /* Device flag bits */
 #define F_IPSRC_RND   (1<<0)	/* IP-Src Random  */
 #define F_IPDST_RND   (1<<1)	/* IP-Dst Random  */
@@ -424,7 +429,8 @@
 }
 
 static const char version[] =
-	"pktgen " VERSION ": Packet Generator for packet performance testing.\n";
+	"Packet Generator for packet performance testing. "
+	"Version: " VERSION "\n";
 
 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
@@ -495,7 +501,7 @@
 		pktgen_reset_all_threads();
 
 	else
-		printk(KERN_WARNING "pktgen: Unknown command: %s\n", data);
+		pr_warning("Unknown command: %s\n", data);
 
 	err = count;
 
@@ -852,14 +858,14 @@
 	pg_result = &(pkt_dev->result[0]);
 
 	if (count < 1) {
-		printk(KERN_WARNING "pktgen: wrong command format\n");
+		pr_warning("wrong command format\n");
 		return -EINVAL;
 	}
 
 	max = count - i;
 	tmp = count_trail_chars(&user_buffer[i], max);
 	if (tmp < 0) {
-		printk(KERN_WARNING "pktgen: illegal format\n");
+		pr_warning("illegal format\n");
 		return tmp;
 	}
 	i += tmp;
@@ -980,6 +986,36 @@
 			(unsigned long long) pkt_dev->delay);
 		return count;
 	}
+	if (!strcmp(name, "rate")) {
+		len = num_arg(&user_buffer[i], 10, &value);
+		if (len < 0)
+			return len;
+
+		i += len;
+		if (!value)
+			return len;
+		pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value;
+		if (debug)
+			pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
+
+		sprintf(pg_result, "OK: rate=%lu", value);
+		return count;
+	}
+	if (!strcmp(name, "ratep")) {
+		len = num_arg(&user_buffer[i], 10, &value);
+		if (len < 0)
+			return len;
+
+		i += len;
+		if (!value)
+			return len;
+		pkt_dev->delay = NSEC_PER_SEC/value;
+		if (debug)
+			pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
+
+		sprintf(pg_result, "OK: rate=%lu", value);
+		return count;
+	}
 	if (!strcmp(name, "udp_src_min")) {
 		len = num_arg(&user_buffer[i], 10, &value);
 		if (len < 0)
@@ -1781,7 +1817,7 @@
 		       name, (unsigned long)count);
 
 	if (!t) {
-		printk(KERN_ERR "pktgen: ERROR: No thread\n");
+		pr_err("ERROR: No thread\n");
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1874,7 +1910,7 @@
 	int i = 0;
 
 	mutex_lock(&pktgen_thread_lock);
-	pr_debug("pktgen: pktgen_mark_device marking %s for removal\n", ifname);
+	pr_debug("%s: marking %s for removal\n", __func__, ifname);
 
 	while (1) {
 
@@ -1883,15 +1919,14 @@
 			break;	/* success */
 
 		mutex_unlock(&pktgen_thread_lock);
-		pr_debug("pktgen: pktgen_mark_device waiting for %s "
-				"to disappear....\n", ifname);
+		pr_debug("%s: waiting for %s to disappear....\n",
+			 __func__, ifname);
 		schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
 		mutex_lock(&pktgen_thread_lock);
 
 		if (++i >= max_tries) {
-			printk(KERN_ERR "pktgen_mark_device: timed out after "
-			       "waiting %d msec for device %s to be removed\n",
-			       msec_per_try * i, ifname);
+			pr_err("%s: timed out after waiting %d msec for device %s to be removed\n",
+			       __func__, msec_per_try * i, ifname);
 			break;
 		}
 
@@ -1918,8 +1953,8 @@
 							  &pktgen_if_fops,
 							  pkt_dev);
 			if (!pkt_dev->entry)
-				printk(KERN_ERR "pktgen: can't move proc "
-				       " entry for '%s'\n", dev->name);
+				pr_err("can't move proc entry for '%s'\n",
+				       dev->name);
 			break;
 		}
 	}
@@ -1983,15 +2018,15 @@
 
 	odev = pktgen_dev_get_by_name(pkt_dev, ifname);
 	if (!odev) {
-		printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname);
+		pr_err("no such netdevice: \"%s\"\n", ifname);
 		return -ENODEV;
 	}
 
 	if (odev->type != ARPHRD_ETHER) {
-		printk(KERN_ERR "pktgen: not an ethernet device: \"%s\"\n", ifname);
+		pr_err("not an ethernet device: \"%s\"\n", ifname);
 		err = -EINVAL;
 	} else if (!netif_running(odev)) {
-		printk(KERN_ERR "pktgen: device is down: \"%s\"\n", ifname);
+		pr_err("device is down: \"%s\"\n", ifname);
 		err = -ENETDOWN;
 	} else {
 		pkt_dev->odev = odev;
@@ -2010,8 +2045,7 @@
 	int ntxq;
 
 	if (!pkt_dev->odev) {
-		printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
-		       "setup_inject.\n");
+		pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n");
 		sprintf(pkt_dev->result,
 			"ERROR: pkt_dev->odev == NULL in setup_inject.\n");
 		return;
@@ -2021,19 +2055,15 @@
 	ntxq = pkt_dev->odev->real_num_tx_queues;
 
 	if (ntxq <= pkt_dev->queue_map_min) {
-		printk(KERN_WARNING "pktgen: WARNING: Requested "
-		       "queue_map_min (zero-based) (%d) exceeds valid range "
-		       "[0 - %d] for (%d) queues on %s, resetting\n",
-		       pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
-		       pkt_dev->odevname);
+		pr_warning("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
+			   pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
+			   pkt_dev->odevname);
 		pkt_dev->queue_map_min = ntxq - 1;
 	}
 	if (pkt_dev->queue_map_max >= ntxq) {
-		printk(KERN_WARNING "pktgen: WARNING: Requested "
-		       "queue_map_max (zero-based) (%d) exceeds valid range "
-		       "[0 - %d] for (%d) queues on %s, resetting\n",
-		       pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
-		       pkt_dev->odevname);
+		pr_warning("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
+			   pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
+			   pkt_dev->odevname);
 		pkt_dev->queue_map_max = ntxq - 1;
 	}
 
@@ -2093,8 +2123,7 @@
 			}
 			rcu_read_unlock();
 			if (err)
-				printk(KERN_ERR "pktgen: ERROR: IPv6 link "
-				       "address not availble.\n");
+				pr_err("ERROR: IPv6 link address not available\n");
 		}
 #endif
 	} else {
@@ -2142,15 +2171,15 @@
 	hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 	hrtimer_set_expires(&t.timer, spin_until);
 
-	remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
+	remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
 	if (remaining <= 0) {
 		pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
 		return;
 	}
 
 	start_time = ktime_now();
-	if (remaining < 100)
-		udelay(remaining); 	/* really small just spin */
+	if (remaining < 100000)
+		ndelay(remaining);	/* really small just spin */
 	else {
 		/* see do_nanosleep */
 		hrtimer_init_sleeper(&t, current);
@@ -2528,8 +2557,8 @@
 			if (nhead > 0) {
 				ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
 				if (ret < 0) {
-					printk(KERN_ERR "Error expanding "
-					       "ipsec packet %d\n", ret);
+					pr_err("Error expanding ipsec packet %d\n",
+					       ret);
 					goto err;
 				}
 			}
@@ -2538,8 +2567,7 @@
 			skb_pull(skb, ETH_HLEN);
 			ret = pktgen_output_ipsec(skb, pkt_dev);
 			if (ret) {
-				printk(KERN_ERR "Error creating ipsec "
-				       "packet %d\n", ret);
+				pr_err("Error creating ipsec packet %d\n", ret);
 				goto err;
 			}
 			/* restore ll */
@@ -3015,8 +3043,7 @@
 	if (datalen < sizeof(struct pktgen_hdr)) {
 		datalen = sizeof(struct pktgen_hdr);
 		if (net_ratelimit())
-			printk(KERN_INFO "pktgen: increased datalen to %d\n",
-			       datalen);
+			pr_info("increased datalen to %d\n", datalen);
 	}
 
 	udph->source = htons(pkt_dev->cur_udp_src);
@@ -3143,7 +3170,7 @@
 	struct pktgen_dev *pkt_dev;
 	int started = 0;
 
-	pr_debug("pktgen: entering pktgen_run. %p\n", t);
+	func_enter();
 
 	if_lock(t);
 	list_for_each_entry(pkt_dev, &t->if_list, list) {
@@ -3176,7 +3203,7 @@
 {
 	struct pktgen_thread *t;
 
-	pr_debug("pktgen: entering pktgen_stop_all_threads_ifs.\n");
+	func_enter();
 
 	mutex_lock(&pktgen_thread_lock);
 
@@ -3241,7 +3268,7 @@
 {
 	struct pktgen_thread *t;
 
-	pr_debug("pktgen: entering pktgen_run_all_threads.\n");
+	func_enter();
 
 	mutex_lock(&pktgen_thread_lock);
 
@@ -3260,7 +3287,7 @@
 {
 	struct pktgen_thread *t;
 
-	pr_debug("pktgen: entering pktgen_reset_all_threads.\n");
+	func_enter();
 
 	mutex_lock(&pktgen_thread_lock);
 
@@ -3310,8 +3337,8 @@
 	int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
 
 	if (!pkt_dev->running) {
-		printk(KERN_WARNING "pktgen: interface: %s is already "
-		       "stopped\n", pkt_dev->odevname);
+		pr_warning("interface: %s is already stopped\n",
+			   pkt_dev->odevname);
 		return -EINVAL;
 	}
 
@@ -3347,7 +3374,7 @@
 {
 	struct pktgen_dev *pkt_dev;
 
-	pr_debug("pktgen: entering pktgen_stop\n");
+	func_enter();
 
 	if_lock(t);
 
@@ -3367,7 +3394,7 @@
 	struct list_head *q, *n;
 	struct pktgen_dev *cur;
 
-	pr_debug("pktgen: entering pktgen_rem_one_if\n");
+	func_enter();
 
 	if_lock(t);
 
@@ -3393,9 +3420,10 @@
 	struct list_head *q, *n;
 	struct pktgen_dev *cur;
 
+	func_enter();
+
 	/* Remove all devices, free mem */
 
-	pr_debug("pktgen: entering pktgen_rem_all_ifs\n");
 	if_lock(t);
 
 	list_for_each_safe(q, n, &t->if_list) {
@@ -3477,8 +3505,7 @@
 
 		pkt_dev->skb = fill_packet(odev, pkt_dev);
 		if (pkt_dev->skb == NULL) {
-			printk(KERN_ERR "pktgen: ERROR: couldn't "
-			       "allocate skb in fill_packet.\n");
+			pr_err("ERROR: couldn't allocate skb in fill_packet\n");
 			schedule();
 			pkt_dev->clone_count--;	/* back out increment, OOM */
 			return;
@@ -3558,8 +3585,7 @@
 	init_waitqueue_head(&t->queue);
 	complete(&t->start_done);
 
-	pr_debug("pktgen: starting pktgen/%d:  pid=%d\n",
-		 cpu, task_pid_nr(current));
+	pr_debug("starting pktgen/%d:  pid=%d\n", cpu, task_pid_nr(current));
 
 	set_current_state(TASK_INTERRUPTIBLE);
 
@@ -3612,13 +3638,13 @@
 		set_current_state(TASK_INTERRUPTIBLE);
 	}
 
-	pr_debug("pktgen: %s stopping all device\n", t->tsk->comm);
+	pr_debug("%s stopping all device\n", t->tsk->comm);
 	pktgen_stop(t);
 
-	pr_debug("pktgen: %s removing all device\n", t->tsk->comm);
+	pr_debug("%s removing all device\n", t->tsk->comm);
 	pktgen_rem_all_ifs(t);
 
-	pr_debug("pktgen: %s removing thread.\n", t->tsk->comm);
+	pr_debug("%s removing thread\n", t->tsk->comm);
 	pktgen_rem_thread(t);
 
 	return 0;
@@ -3642,7 +3668,7 @@
 		}
 
 	if_unlock(t);
-	pr_debug("pktgen: find_dev(%s) returning %p\n", ifname, pkt_dev);
+	pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev);
 	return pkt_dev;
 }
 
@@ -3658,8 +3684,7 @@
 	if_lock(t);
 
 	if (pkt_dev->pg_thread) {
-		printk(KERN_ERR "pktgen: ERROR: already assigned "
-		       "to a thread.\n");
+		pr_err("ERROR: already assigned to a thread\n");
 		rv = -EBUSY;
 		goto out;
 	}
@@ -3685,7 +3710,7 @@
 
 	pkt_dev = __pktgen_NN_threads(ifname, FIND);
 	if (pkt_dev) {
-		printk(KERN_ERR "pktgen: ERROR: interface already used.\n");
+		pr_err("ERROR: interface already used\n");
 		return -EBUSY;
 	}
 
@@ -3730,7 +3755,7 @@
 	pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir,
 					  &pktgen_if_fops, pkt_dev);
 	if (!pkt_dev->entry) {
-		printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
+		pr_err("cannot create %s/%s procfs entry\n",
 		       PG_PROC_DIR, ifname);
 		err = -EINVAL;
 		goto out2;
@@ -3761,8 +3786,7 @@
 	t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL,
 			 cpu_to_node(cpu));
 	if (!t) {
-		printk(KERN_ERR "pktgen: ERROR: out of memory, can't "
-		       "create new thread.\n");
+		pr_err("ERROR: out of memory, can't create new thread\n");
 		return -ENOMEM;
 	}
 
@@ -3776,8 +3800,7 @@
 
 	p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu);
 	if (IS_ERR(p)) {
-		printk(KERN_ERR "pktgen: kernel_thread() failed "
-		       "for cpu %d\n", t->cpu);
+		pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
 		list_del(&t->th_list);
 		kfree(t);
 		return PTR_ERR(p);
@@ -3788,7 +3811,7 @@
 	pe = proc_create_data(t->tsk->comm, 0600, pg_proc_dir,
 			      &pktgen_thread_fops, t);
 	if (!pe) {
-		printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
+		pr_err("cannot create %s/%s procfs entry\n",
 		       PG_PROC_DIR, t->tsk->comm);
 		kthread_stop(p);
 		list_del(&t->th_list);
@@ -3822,11 +3845,10 @@
 				struct pktgen_dev *pkt_dev)
 {
 
-	pr_debug("pktgen: remove_device pkt_dev=%p\n", pkt_dev);
+	pr_debug("remove_device pkt_dev=%p\n", pkt_dev);
 
 	if (pkt_dev->running) {
-		printk(KERN_WARNING "pktgen: WARNING: trying to remove a "
-		       "running interface, stopping it now.\n");
+		pr_warning("WARNING: trying to remove a running interface, stopping it now\n");
 		pktgen_stop_device(pkt_dev);
 	}
 
@@ -3857,7 +3879,7 @@
 	int cpu;
 	struct proc_dir_entry *pe;
 
-	printk(KERN_INFO "%s", version);
+	pr_info("%s", version);
 
 	pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net);
 	if (!pg_proc_dir)
@@ -3865,8 +3887,7 @@
 
 	pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
 	if (pe == NULL) {
-		printk(KERN_ERR "pktgen: ERROR: cannot create %s "
-		       "procfs entry.\n", PGCTRL);
+		pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL);
 		proc_net_remove(&init_net, PG_PROC_DIR);
 		return -EINVAL;
 	}
@@ -3879,13 +3900,12 @@
 
 		err = pktgen_create_thread(cpu);
 		if (err)
-			printk(KERN_WARNING "pktgen: WARNING: Cannot create "
-			       "thread for cpu %d (%d)\n", cpu, err);
+			pr_warning("WARNING: Cannot create thread for cpu %d (%d)\n",
+				   cpu, err);
 	}
 
 	if (list_empty(&pktgen_threads)) {
-		printk(KERN_ERR "pktgen: ERROR: Initialization failed for "
-		       "all threads\n");
+		pr_err("ERROR: Initialization failed for all threads\n");
 		unregister_netdevice_notifier(&pktgen_notifier_block);
 		remove_proc_entry(PGCTRL, pg_proc_dir);
 		proc_net_remove(&init_net, PG_PROC_DIR);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1a2af24..5e773ea 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -579,7 +579,7 @@
 }
 
 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
-				 const struct net_device_stats *b)
+				 const struct rtnl_link_stats64 *b)
 {
 	a->rx_packets = b->rx_packets;
 	a->tx_packets = b->tx_packets;
@@ -610,7 +610,7 @@
 	a->tx_compressed = b->tx_compressed;
 }
 
-static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b)
+static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
 {
 	struct rtnl_link_stats64 a;
 
@@ -791,7 +791,8 @@
 {
 	struct ifinfomsg *ifm;
 	struct nlmsghdr *nlh;
-	const struct net_device_stats *stats;
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *stats;
 	struct nlattr *attr;
 
 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
@@ -847,7 +848,7 @@
 	if (attr == NULL)
 		goto nla_put_failure;
 
-	stats = dev_get_stats(dev);
+	stats = dev_get_stats(dev, &temp);
 	copy_rtnl_link_stats(nla_data(attr), stats);
 
 	attr = nla_reserve(skb, IFLA_STATS64,
diff --git a/net/core/scm.c b/net/core/scm.c
index b88f6f9..681c976 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -170,6 +170,30 @@
 			err = scm_check_creds(&p->creds);
 			if (err)
 				goto error;
+
+			if (pid_vnr(p->pid) != p->creds.pid) {
+				struct pid *pid;
+				err = -ESRCH;
+				pid = find_get_pid(p->creds.pid);
+				if (!pid)
+					goto error;
+				put_pid(p->pid);
+				p->pid = pid;
+			}
+
+			if ((p->cred->euid != p->creds.uid) ||
+				(p->cred->egid != p->creds.gid)) {
+				struct cred *cred;
+				err = -ENOMEM;
+				cred = prepare_creds();
+				if (!cred)
+					goto error;
+
+				cred->uid = cred->euid = p->creds.uid;
+				cred->gid = cred->egid = p->creds.uid;
+				put_cred(p->cred);
+				p->cred = cred;
+			}
 			break;
 		default:
 			goto error;
diff --git a/net/core/sock.c b/net/core/sock.c
index 2cf7f9f..fef2434 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -110,6 +110,7 @@
 #include <linux/tcp.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
+#include <linux/user_namespace.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -156,7 +157,7 @@
   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
-  "sk_lock-AF_IEEE802154",
+  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
   "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
@@ -172,7 +173,7 @@
   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
-  "slock-AF_IEEE802154",
+  "slock-AF_IEEE802154", "slock-AF_CAIF" ,
   "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
@@ -188,7 +189,7 @@
   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
-  "clock-AF_IEEE802154",
+  "clock-AF_IEEE802154", "clock-AF_CAIF" ,
   "clock-AF_MAX"
 };
 
@@ -749,6 +750,20 @@
 EXPORT_SYMBOL(sock_setsockopt);
 
 
+void cred_to_ucred(struct pid *pid, const struct cred *cred,
+		   struct ucred *ucred)
+{
+	ucred->pid = pid_vnr(pid);
+	ucred->uid = ucred->gid = -1;
+	if (cred) {
+		struct user_namespace *current_ns = current_user_ns();
+
+		ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
+		ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
+	}
+}
+EXPORT_SYMBOL_GPL(cred_to_ucred);
+
 int sock_getsockopt(struct socket *sock, int level, int optname,
 		    char __user *optval, int __user *optlen)
 {
@@ -901,11 +916,15 @@
 		break;
 
 	case SO_PEERCRED:
-		if (len > sizeof(sk->sk_peercred))
-			len = sizeof(sk->sk_peercred);
-		if (copy_to_user(optval, &sk->sk_peercred, len))
+	{
+		struct ucred peercred;
+		if (len > sizeof(peercred))
+			len = sizeof(peercred);
+		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
+		if (copy_to_user(optval, &peercred, len))
 			return -EFAULT;
 		goto lenout;
+	}
 
 	case SO_PEERNAME:
 	{
@@ -1119,6 +1138,9 @@
 		printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
 		       __func__, atomic_read(&sk->sk_omem_alloc));
 
+	if (sk->sk_peer_cred)
+		put_cred(sk->sk_peer_cred);
+	put_pid(sk->sk_peer_pid);
 	put_net(sock_net(sk));
 	sk_prot_free(sk->sk_prot_creator, sk);
 }
@@ -1954,9 +1976,8 @@
 	sk->sk_sndmsg_page	=	NULL;
 	sk->sk_sndmsg_off	=	0;
 
-	sk->sk_peercred.pid 	=	0;
-	sk->sk_peercred.uid	=	-1;
-	sk->sk_peercred.gid	=	-1;
+	sk->sk_peer_pid 	=	NULL;
+	sk->sk_peer_cred	=	NULL;
 	sk->sk_write_pending	=	0;
 	sk->sk_rcvlowat		=	1;
 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 01e4d39..2abddee 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -82,7 +82,7 @@
 	elapsed_time = delta / 10;
 
 	if (elapsed_time != 0 &&
-	    dccp_insert_option_elapsed_time(sk, skb, elapsed_time))
+	    dccp_insert_option_elapsed_time(skb, elapsed_time))
 		return -1;
 
 	avr = dccp_ackvec_record_new();
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index d323589..95f7529 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -715,9 +715,9 @@
 	x_recv = htonl(hc->rx_x_recv);
 	pinv   = htonl(hc->rx_pinv);
 
-	if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
+	if (dccp_insert_option(skb, TFRC_OPT_LOSS_EVENT_RATE,
 			       &pinv, sizeof(pinv)) ||
-	    dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
+	    dccp_insert_option(skb, TFRC_OPT_RECEIVE_RATE,
 			       &x_recv, sizeof(x_recv)))
 		return -1;
 
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index a10a61a..3ccef1b 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -446,16 +446,12 @@
 
 extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
 extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*);
-extern int dccp_insert_option_elapsed_time(struct sock *sk,
-					    struct sk_buff *skb,
-					    u32 elapsed_time);
+extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
 extern u32 dccp_timestamp(void);
 extern void dccp_timestamping_init(void);
-extern int dccp_insert_option_timestamp(struct sock *sk,
-					 struct sk_buff *skb);
-extern int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
-			       unsigned char option,
-			       const void *value, unsigned char len);
+extern int dccp_insert_option_timestamp(struct sk_buff *skb);
+extern int dccp_insert_option(struct sk_buff *skb, unsigned char option,
+			      const void *value, unsigned char len);
 
 #ifdef CONFIG_SYSCTL
 extern int dccp_sysctl_init(void);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 6beb6a7..10c957a 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -430,7 +430,7 @@
 		if (dccp_parse_options(sk, NULL, skb))
 			return 1;
 
-		/* Obtain usec RTT sample from SYN exchange (used by CCID 3) */
+		/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
 		if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
 			dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
 			    dp->dccps_options_received.dccpor_timestamp_echo));
@@ -535,6 +535,8 @@
 						   const struct dccp_hdr *dh,
 						   const unsigned len)
 {
+	struct dccp_sock *dp = dccp_sk(sk);
+	u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
 	int queued = 0;
 
 	switch (dh->dccph_type) {
@@ -559,7 +561,14 @@
 		if (sk->sk_state == DCCP_PARTOPEN)
 			inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 
-		dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
+		/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
+		if (likely(sample)) {
+			long delta = dccp_timestamp() - sample;
+
+			dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
+		}
+
+		dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
 		dccp_set_state(sk, DCCP_OPEN);
 
 		if (dh->dccph_type == DCCP_PKT_DATAACK ||
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index d9b11ef..d4a166f 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -105,7 +105,7 @@
 		goto failure;
 
 	/* OK, now commit destination to socket.  */
-	sk_setup_caps(sk, &rt->u.dst);
+	sk_setup_caps(sk, &rt->dst);
 
 	dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
 						    inet->inet_daddr,
@@ -475,7 +475,7 @@
 		return NULL;
 	}
 
-	return &rt->u.dst;
+	return &rt->dst;
 }
 
 static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 0916988..6e3f325 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -248,7 +248,7 @@
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sk_buff *skb;
 	struct ipv6_txoptions *opt = NULL;
-	struct in6_addr *final_p = NULL, final;
+	struct in6_addr *final_p, final;
 	struct flowi fl;
 	int err = -1;
 	struct dst_entry *dst;
@@ -265,13 +265,7 @@
 
 	opt = np->opt;
 
-	if (opt != NULL && opt->srcrt != NULL) {
-		const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
-
-		ipv6_addr_copy(&final, &fl.fl6_dst);
-		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-		final_p = &final;
-	}
+	final_p = fl6_update_dst(&fl, opt, &final);
 
 	err = ip6_dst_lookup(sk, &dst, &fl);
 	if (err)
@@ -545,19 +539,13 @@
 		goto out_overflow;
 
 	if (dst == NULL) {
-		struct in6_addr *final_p = NULL, final;
+		struct in6_addr *final_p, final;
 		struct flowi fl;
 
 		memset(&fl, 0, sizeof(fl));
 		fl.proto = IPPROTO_DCCP;
 		ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
-		if (opt != NULL && opt->srcrt != NULL) {
-			const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
-
-			ipv6_addr_copy(&final, &fl.fl6_dst);
-			ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-			final_p = &final;
-		}
+		final_p = fl6_update_dst(&fl, opt, &final);
 		ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
 		fl.oif = sk->sk_bound_dev_if;
 		fl.fl_ip_dport = inet_rsk(req)->rmt_port;
@@ -885,7 +873,7 @@
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct dccp_sock *dp = dccp_sk(sk);
-	struct in6_addr *saddr = NULL, *final_p = NULL, final;
+	struct in6_addr *saddr = NULL, *final_p, final;
 	struct flowi fl;
 	struct dst_entry *dst;
 	int addr_type;
@@ -988,13 +976,7 @@
 	fl.fl_ip_sport = inet->inet_sport;
 	security_sk_classify_flow(sk, &fl);
 
-	if (np->opt != NULL && np->opt->srcrt != NULL) {
-		const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
-
-		ipv6_addr_copy(&final, &fl.fl6_dst);
-		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-		final_p = &final;
-	}
+	final_p = fl6_update_dst(&fl, np->opt, &final);
 
 	err = ip6_dst_lookup(sk, &dst, &fl);
 	if (err)
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 07395f8..bfda087 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -299,9 +299,8 @@
 	return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6);
 }
 
-int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
-			const unsigned char option,
-			const void *value, const unsigned char len)
+int dccp_insert_option(struct sk_buff *skb, const unsigned char option,
+		       const void *value, const unsigned char len)
 {
 	unsigned char *to;
 
@@ -354,8 +353,7 @@
 	return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
 }
 
-int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb,
-				    u32 elapsed_time)
+int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
 {
 	const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
 	const int len = 2 + elapsed_time_len;
@@ -386,13 +384,13 @@
 
 EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time);
 
-int dccp_insert_option_timestamp(struct sock *sk, struct sk_buff *skb)
+int dccp_insert_option_timestamp(struct sk_buff *skb)
 {
 	__be32 now = htonl(dccp_timestamp());
 	/* yes this will overflow but that is the point as we want a
 	 * 10 usec 32 bit timer which mean it wraps every 11.9 hours */
 
-	return dccp_insert_option(sk, skb, DCCPO_TIMESTAMP, &now, sizeof(now));
+	return dccp_insert_option(skb, DCCPO_TIMESTAMP, &now, sizeof(now));
 }
 
 EXPORT_SYMBOL_GPL(dccp_insert_option_timestamp);
@@ -531,9 +529,9 @@
 		if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST) {
 			/*
 			 * Obtain RTT sample from Request/Response exchange.
-			 * This is currently used in CCID 3 initialisation.
+			 * This is currently used for TFRC initialisation.
 			 */
-			if (dccp_insert_option_timestamp(sk, skb))
+			if (dccp_insert_option_timestamp(skb))
 				return -1;
 
 		} else if (dp->dccps_hc_rx_ackvec != NULL &&
@@ -564,6 +562,10 @@
 	if (dccp_feat_insert_opts(NULL, dreq, skb))
 		return -1;
 
+	/* Obtain RTT sample from Response/Ack exchange (used by TFRC). */
+	if (dccp_insert_option_timestamp(skb))
+		return -1;
+
 	if (dreq->dreq_timestamp_echo != 0 &&
 	    dccp_insert_option_timestamp_echo(NULL, dreq, skb))
 		return -1;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b03ecf6..096250d 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -473,14 +473,9 @@
 	if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
 		return -EINVAL;
 
-	val = kmalloc(optlen, GFP_KERNEL);
-	if (val == NULL)
-		return -ENOMEM;
-
-	if (copy_from_user(val, optval, optlen)) {
-		kfree(val);
-		return -EFAULT;
-	}
+	val = memdup_user(optval, optlen);
+	if (IS_ERR(val))
+		return PTR_ERR(val);
 
 	lock_sock(sk);
 	if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
@@ -1007,7 +1002,8 @@
 static inline int dccp_mib_init(void)
 {
 	return snmp_mib_init((void __percpu **)dccp_statistics,
-			     sizeof(struct dccp_mib));
+			     sizeof(struct dccp_mib),
+			     __alignof__(struct dccp_mib));
 }
 
 static inline void dccp_mib_exit(void)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 812e6df..6585ea6 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -146,13 +146,13 @@
 
 static inline void dnrt_free(struct dn_route *rt)
 {
-	call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
 }
 
 static inline void dnrt_drop(struct dn_route *rt)
 {
-	dst_release(&rt->u.dst);
-	call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+	dst_release(&rt->dst);
+	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
 }
 
 static void dn_dst_check_expire(unsigned long dummy)
@@ -167,13 +167,13 @@
 
 		spin_lock(&dn_rt_hash_table[i].lock);
 		while((rt=*rtp) != NULL) {
-			if (atomic_read(&rt->u.dst.__refcnt) ||
-					(now - rt->u.dst.lastuse) < expire) {
-				rtp = &rt->u.dst.dn_next;
+			if (atomic_read(&rt->dst.__refcnt) ||
+					(now - rt->dst.lastuse) < expire) {
+				rtp = &rt->dst.dn_next;
 				continue;
 			}
-			*rtp = rt->u.dst.dn_next;
-			rt->u.dst.dn_next = NULL;
+			*rtp = rt->dst.dn_next;
+			rt->dst.dn_next = NULL;
 			dnrt_free(rt);
 		}
 		spin_unlock(&dn_rt_hash_table[i].lock);
@@ -198,13 +198,13 @@
 		rtp = &dn_rt_hash_table[i].chain;
 
 		while((rt=*rtp) != NULL) {
-			if (atomic_read(&rt->u.dst.__refcnt) ||
-					(now - rt->u.dst.lastuse) < expire) {
-				rtp = &rt->u.dst.dn_next;
+			if (atomic_read(&rt->dst.__refcnt) ||
+					(now - rt->dst.lastuse) < expire) {
+				rtp = &rt->dst.dn_next;
 				continue;
 			}
-			*rtp = rt->u.dst.dn_next;
-			rt->u.dst.dn_next = NULL;
+			*rtp = rt->dst.dn_next;
+			rt->dst.dn_next = NULL;
 			dnrt_drop(rt);
 			break;
 		}
@@ -287,25 +287,25 @@
 	while((rth = *rthp) != NULL) {
 		if (compare_keys(&rth->fl, &rt->fl)) {
 			/* Put it first */
-			*rthp = rth->u.dst.dn_next;
-			rcu_assign_pointer(rth->u.dst.dn_next,
+			*rthp = rth->dst.dn_next;
+			rcu_assign_pointer(rth->dst.dn_next,
 					   dn_rt_hash_table[hash].chain);
 			rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
 
-			dst_use(&rth->u.dst, now);
+			dst_use(&rth->dst, now);
 			spin_unlock_bh(&dn_rt_hash_table[hash].lock);
 
 			dnrt_drop(rt);
 			*rp = rth;
 			return 0;
 		}
-		rthp = &rth->u.dst.dn_next;
+		rthp = &rth->dst.dn_next;
 	}
 
-	rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
+	rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
 	rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
 
-	dst_use(&rt->u.dst, now);
+	dst_use(&rt->dst, now);
 	spin_unlock_bh(&dn_rt_hash_table[hash].lock);
 	*rp = rt;
 	return 0;
@@ -323,8 +323,8 @@
 			goto nothing_to_declare;
 
 		for(; rt; rt=next) {
-			next = rt->u.dst.dn_next;
-			rt->u.dst.dn_next = NULL;
+			next = rt->dst.dn_next;
+			rt->dst.dn_next = NULL;
 			dst_free((struct dst_entry *)rt);
 		}
 
@@ -743,7 +743,7 @@
 	/* Ensure that we have enough space for headers */
 	rt = (struct dn_route *)skb_dst(skb);
 	header_len = dn_db->use_long ? 21 : 6;
-	if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len))
+	if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
 		goto drop;
 
 	/*
@@ -752,7 +752,7 @@
 	if (++cb->hops > 30)
 		goto drop;
 
-	skb->dev = rt->u.dst.dev;
+	skb->dev = rt->dst.dev;
 
 	/*
 	 * If packet goes out same interface it came in on, then set
@@ -792,7 +792,7 @@
 static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
 {
 	struct dn_fib_info *fi = res->fi;
-	struct net_device *dev = rt->u.dst.dev;
+	struct net_device *dev = rt->dst.dev;
 	struct neighbour *n;
 	unsigned mss;
 
@@ -800,25 +800,25 @@
 		if (DN_FIB_RES_GW(*res) &&
 		    DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
 			rt->rt_gateway = DN_FIB_RES_GW(*res);
-		memcpy(rt->u.dst.metrics, fi->fib_metrics,
-		       sizeof(rt->u.dst.metrics));
+		memcpy(rt->dst.metrics, fi->fib_metrics,
+		       sizeof(rt->dst.metrics));
 	}
 	rt->rt_type = res->type;
 
-	if (dev != NULL && rt->u.dst.neighbour == NULL) {
+	if (dev != NULL && rt->dst.neighbour == NULL) {
 		n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
 		if (IS_ERR(n))
 			return PTR_ERR(n);
-		rt->u.dst.neighbour = n;
+		rt->dst.neighbour = n;
 	}
 
-	if (dst_metric(&rt->u.dst, RTAX_MTU) == 0 ||
-	    dst_metric(&rt->u.dst, RTAX_MTU) > rt->u.dst.dev->mtu)
-		rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
-	mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst));
-	if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0 ||
-	    dst_metric(&rt->u.dst, RTAX_ADVMSS) > mss)
-		rt->u.dst.metrics[RTAX_ADVMSS-1] = mss;
+	if (dst_metric(&rt->dst, RTAX_MTU) == 0 ||
+	    dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
+		rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
+	mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
+	if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 ||
+	    dst_metric(&rt->dst, RTAX_ADVMSS) > mss)
+		rt->dst.metrics[RTAX_ADVMSS-1] = mss;
 	return 0;
 }
 
@@ -1096,8 +1096,8 @@
 	if (rt == NULL)
 		goto e_nobufs;
 
-	atomic_set(&rt->u.dst.__refcnt, 1);
-	rt->u.dst.flags   = DST_HOST;
+	atomic_set(&rt->dst.__refcnt, 1);
+	rt->dst.flags   = DST_HOST;
 
 	rt->fl.fld_src    = oldflp->fld_src;
 	rt->fl.fld_dst    = oldflp->fld_dst;
@@ -1113,17 +1113,17 @@
 	rt->rt_dst_map    = fl.fld_dst;
 	rt->rt_src_map    = fl.fld_src;
 
-	rt->u.dst.dev = dev_out;
+	rt->dst.dev = dev_out;
 	dev_hold(dev_out);
-	rt->u.dst.neighbour = neigh;
+	rt->dst.neighbour = neigh;
 	neigh = NULL;
 
-	rt->u.dst.lastuse = jiffies;
-	rt->u.dst.output  = dn_output;
-	rt->u.dst.input   = dn_rt_bug;
+	rt->dst.lastuse = jiffies;
+	rt->dst.output  = dn_output;
+	rt->dst.input   = dn_rt_bug;
 	rt->rt_flags      = flags;
 	if (flags & RTCF_LOCAL)
-		rt->u.dst.input = dn_nsp_rx;
+		rt->dst.input = dn_nsp_rx;
 
 	err = dn_rt_set_next_hop(rt, &res);
 	if (err)
@@ -1152,7 +1152,7 @@
 	err = -ENOBUFS;
 	goto done;
 e_neighbour:
-	dst_free(&rt->u.dst);
+	dst_free(&rt->dst);
 	goto e_nobufs;
 }
 
@@ -1168,15 +1168,15 @@
 	if (!(flags & MSG_TRYHARD)) {
 		rcu_read_lock_bh();
 		for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
-			rt = rcu_dereference_bh(rt->u.dst.dn_next)) {
+			rt = rcu_dereference_bh(rt->dst.dn_next)) {
 			if ((flp->fld_dst == rt->fl.fld_dst) &&
 			    (flp->fld_src == rt->fl.fld_src) &&
 			    (flp->mark == rt->fl.mark) &&
 			    (rt->fl.iif == 0) &&
 			    (rt->fl.oif == flp->oif)) {
-				dst_use(&rt->u.dst, jiffies);
+				dst_use(&rt->dst, jiffies);
 				rcu_read_unlock_bh();
-				*pprt = &rt->u.dst;
+				*pprt = &rt->dst;
 				return 0;
 			}
 		}
@@ -1375,29 +1375,29 @@
 	rt->fl.iif        = in_dev->ifindex;
 	rt->fl.mark       = fl.mark;
 
-	rt->u.dst.flags = DST_HOST;
-	rt->u.dst.neighbour = neigh;
-	rt->u.dst.dev = out_dev;
-	rt->u.dst.lastuse = jiffies;
-	rt->u.dst.output = dn_rt_bug;
+	rt->dst.flags = DST_HOST;
+	rt->dst.neighbour = neigh;
+	rt->dst.dev = out_dev;
+	rt->dst.lastuse = jiffies;
+	rt->dst.output = dn_rt_bug;
 	switch(res.type) {
 		case RTN_UNICAST:
-			rt->u.dst.input = dn_forward;
+			rt->dst.input = dn_forward;
 			break;
 		case RTN_LOCAL:
-			rt->u.dst.output = dn_output;
-			rt->u.dst.input = dn_nsp_rx;
-			rt->u.dst.dev = in_dev;
+			rt->dst.output = dn_output;
+			rt->dst.input = dn_nsp_rx;
+			rt->dst.dev = in_dev;
 			flags |= RTCF_LOCAL;
 			break;
 		default:
 		case RTN_UNREACHABLE:
 		case RTN_BLACKHOLE:
-			rt->u.dst.input = dst_discard;
+			rt->dst.input = dst_discard;
 	}
 	rt->rt_flags = flags;
-	if (rt->u.dst.dev)
-		dev_hold(rt->u.dst.dev);
+	if (rt->dst.dev)
+		dev_hold(rt->dst.dev);
 
 	err = dn_rt_set_next_hop(rt, &res);
 	if (err)
@@ -1405,7 +1405,7 @@
 
 	hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
 	dn_insert_route(rt, hash, &rt);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 done:
 	if (neigh)
@@ -1427,7 +1427,7 @@
 	goto done;
 
 e_neighbour:
-	dst_free(&rt->u.dst);
+	dst_free(&rt->dst);
 	goto done;
 }
 
@@ -1442,13 +1442,13 @@
 
 	rcu_read_lock();
 	for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
-	    rt = rcu_dereference(rt->u.dst.dn_next)) {
+	    rt = rcu_dereference(rt->dst.dn_next)) {
 		if ((rt->fl.fld_src == cb->src) &&
 		    (rt->fl.fld_dst == cb->dst) &&
 		    (rt->fl.oif == 0) &&
 		    (rt->fl.mark == skb->mark) &&
 		    (rt->fl.iif == cb->iif)) {
-			dst_use(&rt->u.dst, jiffies);
+			dst_use(&rt->dst, jiffies);
 			rcu_read_unlock();
 			skb_dst_set(skb, (struct dst_entry *)rt);
 			return 0;
@@ -1487,8 +1487,8 @@
 		r->rtm_src_len = 16;
 		RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src);
 	}
-	if (rt->u.dst.dev)
-		RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
+	if (rt->dst.dev)
+		RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex);
 	/*
 	 * Note to self - change this if input routes reverse direction when
 	 * they deal only with inputs and not with replies like they do
@@ -1497,11 +1497,11 @@
 	RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
 	if (rt->rt_daddr != rt->rt_gateway)
 		RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
-	if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
+	if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
 		goto rtattr_failure;
-	expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
-	if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires,
-			       rt->u.dst.error) < 0)
+	expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
+	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
+			       rt->dst.error) < 0)
 		goto rtattr_failure;
 	if (rt->fl.iif)
 		RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
@@ -1568,8 +1568,8 @@
 		local_bh_enable();
 		memset(cb, 0, sizeof(struct dn_skb_cb));
 		rt = (struct dn_route *)skb_dst(skb);
-		if (!err && -rt->u.dst.error)
-			err = rt->u.dst.error;
+		if (!err && -rt->dst.error)
+			err = rt->dst.error;
 	} else {
 		int oif = 0;
 		if (rta[RTA_OIF - 1])
@@ -1583,7 +1583,7 @@
 	skb->dev = NULL;
 	if (err)
 		goto out_free;
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 	if (rtm->rtm_flags & RTM_F_NOTIFY)
 		rt->rt_flags |= RTCF_NOTIFY;
 
@@ -1632,10 +1632,10 @@
 		rcu_read_lock_bh();
 		for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
 			rt;
-			rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) {
+			rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
 			if (idx < s_idx)
 				continue;
-			skb_dst_set(skb, dst_clone(&rt->u.dst));
+			skb_dst_set(skb, dst_clone(&rt->dst));
 			if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
 					cb->nlh->nlmsg_seq, RTM_NEWROUTE,
 					1, NLM_F_MULTI) <= 0) {
@@ -1678,7 +1678,7 @@
 {
 	struct dn_rt_cache_iter_state *s = seq->private;
 
-	rt = rt->u.dst.dn_next;
+	rt = rt->dst.dn_next;
 	while(!rt) {
 		rcu_read_unlock_bh();
 		if (--s->bucket < 0)
@@ -1719,12 +1719,12 @@
 	char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
 
 	seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
-			rt->u.dst.dev ? rt->u.dst.dev->name : "*",
+			rt->dst.dev ? rt->dst.dev->name : "*",
 			dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
 			dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
-			atomic_read(&rt->u.dst.__refcnt),
-			rt->u.dst.__use,
-			(int) dst_metric(&rt->u.dst, RTAX_RTT));
+			atomic_read(&rt->dst.__refcnt),
+			rt->dst.__use,
+			(int) dst_metric(&rt->dst, RTAX_RTT));
 	return 0;
 }
 
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 2a5a805..dc54bd0 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -48,7 +48,7 @@
 
 static const struct proto_ops econet_ops;
 static struct hlist_head econet_sklist;
-static DEFINE_RWLOCK(econet_lock);
+static DEFINE_SPINLOCK(econet_lock);
 static DEFINE_MUTEX(econet_mutex);
 
 /* Since there are only 256 possible network numbers (or fewer, depends
@@ -98,16 +98,16 @@
 
 static void econet_remove_socket(struct hlist_head *list, struct sock *sk)
 {
-	write_lock_bh(&econet_lock);
+	spin_lock_bh(&econet_lock);
 	sk_del_node_init(sk);
-	write_unlock_bh(&econet_lock);
+	spin_unlock_bh(&econet_lock);
 }
 
 static void econet_insert_socket(struct hlist_head *list, struct sock *sk)
 {
-	write_lock_bh(&econet_lock);
+	spin_lock_bh(&econet_lock);
 	sk_add_node(sk, list);
-	write_unlock_bh(&econet_lock);
+	spin_unlock_bh(&econet_lock);
 }
 
 /*
@@ -782,15 +782,19 @@
 	struct sock *sk;
 	struct hlist_node *node;
 
+	spin_lock(&econet_lock);
 	sk_for_each(sk, node, &econet_sklist) {
 		struct econet_sock *opt = ec_sk(sk);
 		if ((opt->port == port || opt->port == 0) &&
 		    (opt->station == station || opt->station == 0) &&
-		    (opt->net == net || opt->net == 0))
+		    (opt->net == net || opt->net == 0)) {
+			sock_hold(sk);
 			goto found;
+		}
 	}
 	sk = NULL;
 found:
+	spin_unlock(&econet_lock);
 	return sk;
 }
 
@@ -852,7 +856,7 @@
 {
 	struct iphdr *ip = ip_hdr(skb);
 	unsigned char stn = ntohl(ip->saddr) & 0xff;
-	struct sock *sk;
+	struct sock *sk = NULL;
 	struct sk_buff *newskb;
 	struct ec_device *edev = skb->dev->ec_ptr;
 
@@ -882,10 +886,13 @@
 	}
 
 	aun_send_response(ip->saddr, ah->handle, 3, 0);
+	sock_put(sk);
 	return;
 
 bad:
 	aun_send_response(ip->saddr, ah->handle, 4, 0);
+	if (sk)
+		sock_put(sk);
 }
 
 /*
@@ -1050,7 +1057,7 @@
 static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
 {
 	struct ec_framehdr *hdr;
-	struct sock *sk;
+	struct sock *sk = NULL;
 	struct ec_device *edev = dev->ec_ptr;
 
 	if (!net_eq(dev_net(dev), &init_net))
@@ -1085,10 +1092,12 @@
 	if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb,
 			    hdr->port))
 		goto drop;
-
+	sock_put(sk);
 	return NET_RX_SUCCESS;
 
 drop:
+	if (sk)
+		sock_put(sk);
 	kfree_skb(skb);
 	return NET_RX_DROP;
 }
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 61ec032..215c839 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -158,7 +158,6 @@
 __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ethhdr *eth;
-	unsigned char *rawp;
 
 	skb->dev = dev;
 	skb_reset_mac_header(skb);
@@ -199,15 +198,13 @@
 	if (ntohs(eth->h_proto) >= 1536)
 		return eth->h_proto;
 
-	rawp = skb->data;
-
 	/*
 	 *      This is a magic hack to spot IPX packets. Older Novell breaks
 	 *      the protocol design and runs IPX over 802.3 without an 802.2 LLC
 	 *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
 	 *      won't work for fault tolerant netware but does for the rest.
 	 */
-	if (*(unsigned short *)rawp == 0xFFFF)
+	if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF)
 		return htons(ETH_P_802_3);
 
 	/*
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 551ce56..3ceb025 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -355,6 +355,8 @@
 	inet = inet_sk(sk);
 	inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
 
+	inet->nodefrag = 0;
+
 	if (SOCK_RAW == sock->type) {
 		inet->inet_num = protocol;
 		if (IPPROTO_RAW == protocol)
@@ -1100,7 +1102,7 @@
 	if (err)
 		return err;
 
-	sk_setup_caps(sk, &rt->u.dst);
+	sk_setup_caps(sk, &rt->dst);
 
 	new_saddr = rt->rt_src;
 
@@ -1166,7 +1168,7 @@
 	err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0);
 }
 	if (!err)
-		sk_setup_caps(sk, &rt->u.dst);
+		sk_setup_caps(sk, &rt->dst);
 	else {
 		/* Routing failed... */
 		sk->sk_route_caps = 0;
@@ -1425,13 +1427,49 @@
 }
 EXPORT_SYMBOL_GPL(snmp_fold_field);
 
-int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
+#if BITS_PER_LONG==32
+
+u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
+{
+	u64 res = 0;
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		void *bhptr, *userptr;
+		struct u64_stats_sync *syncp;
+		u64 v_bh, v_user;
+		unsigned int start;
+
+		/* first mib used by softirq context, we must use _bh() accessors */
+		bhptr = per_cpu_ptr(SNMP_STAT_BHPTR(mib), cpu);
+		syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
+		do {
+			start = u64_stats_fetch_begin_bh(syncp);
+			v_bh = *(((u64 *) bhptr) + offt);
+		} while (u64_stats_fetch_retry_bh(syncp, start));
+
+		/* second mib used in USER context */
+		userptr = per_cpu_ptr(SNMP_STAT_USRPTR(mib), cpu);
+		syncp = (struct u64_stats_sync *)(userptr + syncp_offset);
+		do {
+			start = u64_stats_fetch_begin(syncp);
+			v_user = *(((u64 *) userptr) + offt);
+		} while (u64_stats_fetch_retry(syncp, start));
+
+		res += v_bh + v_user;
+	}
+	return res;
+}
+EXPORT_SYMBOL_GPL(snmp_fold_field64);
+#endif
+
+int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
 {
 	BUG_ON(ptr == NULL);
-	ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long));
+	ptr[0] = __alloc_percpu(mibsize, align);
 	if (!ptr[0])
 		goto err0;
-	ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long));
+	ptr[1] = __alloc_percpu(mibsize, align);
 	if (!ptr[1])
 		goto err1;
 	return 0;
@@ -1488,25 +1526,32 @@
 static __net_init int ipv4_mib_init_net(struct net *net)
 {
 	if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
-			  sizeof(struct tcp_mib)) < 0)
+			  sizeof(struct tcp_mib),
+			  __alignof__(struct tcp_mib)) < 0)
 		goto err_tcp_mib;
 	if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
-			  sizeof(struct ipstats_mib)) < 0)
+			  sizeof(struct ipstats_mib),
+			  __alignof__(struct ipstats_mib)) < 0)
 		goto err_ip_mib;
 	if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
-			  sizeof(struct linux_mib)) < 0)
+			  sizeof(struct linux_mib),
+			  __alignof__(struct linux_mib)) < 0)
 		goto err_net_mib;
 	if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
-			  sizeof(struct udp_mib)) < 0)
+			  sizeof(struct udp_mib),
+			  __alignof__(struct udp_mib)) < 0)
 		goto err_udp_mib;
 	if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
-			  sizeof(struct udp_mib)) < 0)
+			  sizeof(struct udp_mib),
+			  __alignof__(struct udp_mib)) < 0)
 		goto err_udplite_mib;
 	if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
-			  sizeof(struct icmp_mib)) < 0)
+			  sizeof(struct icmp_mib),
+			  __alignof__(struct icmp_mib)) < 0)
 		goto err_icmp_mib;
 	if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
-			  sizeof(struct icmpmsg_mib)) < 0)
+			  sizeof(struct icmpmsg_mib),
+			  __alignof__(struct icmpmsg_mib)) < 0)
 		goto err_icmpmsg_mib;
 
 	tcp_mib_init(net);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index f094b75..09ead1b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -333,11 +333,14 @@
 	struct net_device *dev = neigh->dev;
 	__be32 target = *(__be32*)neigh->primary_key;
 	int probes = atomic_read(&neigh->probes);
-	struct in_device *in_dev = in_dev_get(dev);
+	struct in_device *in_dev;
 
-	if (!in_dev)
+	rcu_read_lock();
+	in_dev = __in_dev_get_rcu(dev);
+	if (!in_dev) {
+		rcu_read_unlock();
 		return;
-
+	}
 	switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
 	default:
 	case 0:		/* By default announce any local IP */
@@ -358,9 +361,8 @@
 	case 2:		/* Avoid secondary IPs, get a primary/preferred one */
 		break;
 	}
+	rcu_read_unlock();
 
-	if (in_dev)
-		in_dev_put(in_dev);
 	if (!saddr)
 		saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
 
@@ -427,7 +429,7 @@
 
 	if (ip_route_output_key(net, &rt, &fl) < 0)
 		return 1;
-	if (rt->u.dst.dev != dev) {
+	if (rt->dst.dev != dev) {
 		NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
 		flag = 1;
 	}
@@ -532,7 +534,7 @@
 	struct in_device *out_dev;
 	int imi, omi = -1;
 
-	if (rt->u.dst.dev == dev)
+	if (rt->dst.dev == dev)
 		return 0;
 
 	if (!IN_DEV_PROXY_ARP(in_dev))
@@ -545,10 +547,10 @@
 
 	/* place to check for proxy_arp for routes */
 
-	if ((out_dev = in_dev_get(rt->u.dst.dev)) != NULL) {
+	out_dev = __in_dev_get_rcu(rt->dst.dev);
+	if (out_dev)
 		omi = IN_DEV_MEDIUM_ID(out_dev);
-		in_dev_put(out_dev);
-	}
+
 	return (omi != imi && omi != -1);
 }
 
@@ -576,7 +578,7 @@
 				__be32 sip, __be32 tip)
 {
 	/* Private VLAN is only concerned about the same ethernet segment */
-	if (rt->u.dst.dev != dev)
+	if (rt->dst.dev != dev)
 		return 0;
 
 	/* Don't reply on self probes (often done by windowz boxes)*/
@@ -741,7 +743,7 @@
 static int arp_process(struct sk_buff *skb)
 {
 	struct net_device *dev = skb->dev;
-	struct in_device *in_dev = in_dev_get(dev);
+	struct in_device *in_dev = __in_dev_get_rcu(dev);
 	struct arphdr *arp;
 	unsigned char *arp_ptr;
 	struct rtable *rt;
@@ -890,7 +892,6 @@
 					arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
 				} else {
 					pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb);
-					in_dev_put(in_dev);
 					return 0;
 				}
 				goto out;
@@ -936,8 +937,6 @@
 	}
 
 out:
-	if (in_dev)
-		in_dev_put(in_dev);
 	consume_skb(skb);
 	return 0;
 }
@@ -1045,7 +1044,7 @@
 		struct rtable * rt;
 		if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
 			return err;
-		dev = rt->u.dst.dev;
+		dev = rt->dst.dev;
 		ip_rt_put(rt);
 		if (!dev)
 			return -EINVAL;
@@ -1152,7 +1151,7 @@
 		struct rtable * rt;
 		if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
 			return err;
-		dev = rt->u.dst.dev;
+		dev = rt->dst.dev;
 		ip_rt_put(rt);
 		if (!dev)
 			return -EINVAL;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index fb24658..fe3daa7 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -69,7 +69,7 @@
 	sk->sk_state = TCP_ESTABLISHED;
 	inet->inet_id = jiffies;
 
-	sk_dst_set(sk, &rt->u.dst);
+	sk_dst_set(sk, &rt->dst);
 	return(0);
 }
 
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 382bc76..da14c49 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1081,6 +1081,7 @@
 		}
 		ip_mc_up(in_dev);
 		/* fall through */
+	case NETDEV_NOTIFY_PEERS:
 	case NETDEV_CHANGEADDR:
 		/* Send gratuitous ARP to notify of link change */
 		if (IN_DEV_ARP_NOTIFY(in_dev)) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4f0ed45..e830f7a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -284,7 +284,7 @@
 	if (no_addr)
 		goto last_resort;
 	if (rpf == 1)
-		goto e_inval;
+		goto e_rpf;
 	fl.oif = dev->ifindex;
 
 	ret = 0;
@@ -299,7 +299,7 @@
 
 last_resort:
 	if (rpf)
-		goto e_inval;
+		goto e_rpf;
 	*spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
 	*itag = 0;
 	return 0;
@@ -308,6 +308,8 @@
 	fib_res_put(&res);
 e_inval:
 	return -EINVAL;
+e_rpf:
+	return -EXDEV;
 }
 
 static inline __be32 sk_extract_addr(struct sockaddr *addr)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index d65e9215..7569b21 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -271,7 +271,7 @@
 static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
 		int type, int code)
 {
-	struct dst_entry *dst = &rt->u.dst;
+	struct dst_entry *dst = &rt->dst;
 	int rc = 1;
 
 	if (type > NR_ICMP_TYPES)
@@ -327,7 +327,7 @@
 	struct sock *sk;
 	struct sk_buff *skb;
 
-	sk = icmp_sk(dev_net((*rt)->u.dst.dev));
+	sk = icmp_sk(dev_net((*rt)->dst.dev));
 	if (ip_append_data(sk, icmp_glue_bits, icmp_param,
 			   icmp_param->data_len+icmp_param->head_len,
 			   icmp_param->head_len,
@@ -359,7 +359,7 @@
 {
 	struct ipcm_cookie ipc;
 	struct rtable *rt = skb_rtable(skb);
-	struct net *net = dev_net(rt->u.dst.dev);
+	struct net *net = dev_net(rt->dst.dev);
 	struct sock *sk;
 	struct inet_sock *inet;
 	__be32 daddr;
@@ -427,7 +427,7 @@
 
 	if (!rt)
 		goto out;
-	net = dev_net(rt->u.dst.dev);
+	net = dev_net(rt->dst.dev);
 
 	/*
 	 *	Find the original header. It is expected to be valid, of course.
@@ -596,9 +596,9 @@
 			/* Ugh! */
 			orefdst = skb_in->_skb_refdst; /* save old refdst */
 			err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
-					     RT_TOS(tos), rt2->u.dst.dev);
+					     RT_TOS(tos), rt2->dst.dev);
 
-			dst_release(&rt2->u.dst);
+			dst_release(&rt2->dst);
 			rt2 = skb_rtable(skb_in);
 			skb_in->_skb_refdst = orefdst; /* restore old refdst */
 		}
@@ -610,7 +610,7 @@
 				  XFRM_LOOKUP_ICMP);
 		switch (err) {
 		case 0:
-			dst_release(&rt->u.dst);
+			dst_release(&rt->dst);
 			rt = rt2;
 			break;
 		case -EPERM:
@@ -629,7 +629,7 @@
 
 	/* RFC says return as much as we can without exceeding 576 bytes. */
 
-	room = dst_mtu(&rt->u.dst);
+	room = dst_mtu(&rt->dst);
 	if (room > 576)
 		room = 576;
 	room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
@@ -925,6 +925,7 @@
 /*
  * RFC1812 (4.3.3.9).	A router SHOULD listen all replies, and complain
  *			loudly if an inconsistency is found.
+ * called with rcu_read_lock()
  */
 
 static void icmp_address_reply(struct sk_buff *skb)
@@ -935,12 +936,12 @@
 	struct in_ifaddr *ifa;
 
 	if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
-		goto out;
+		return;
 
-	in_dev = in_dev_get(dev);
+	in_dev = __in_dev_get_rcu(dev);
 	if (!in_dev)
-		goto out;
-	rcu_read_lock();
+		return;
+
 	if (in_dev->ifa_list &&
 	    IN_DEV_LOG_MARTIANS(in_dev) &&
 	    IN_DEV_FORWARD(in_dev)) {
@@ -958,9 +959,6 @@
 			       mp, dev->name, &rt->rt_src);
 		}
 	}
-	rcu_read_unlock();
-	in_dev_put(in_dev);
-out:;
 }
 
 static void icmp_discard(struct sk_buff *skb)
@@ -974,7 +972,7 @@
 {
 	struct icmphdr *icmph;
 	struct rtable *rt = skb_rtable(skb);
-	struct net *net = dev_net(rt->u.dst.dev);
+	struct net *net = dev_net(rt->dst.dev);
 
 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
 		struct sec_path *sp = skb_sec_path(skb);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5fff865..b5580d4 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -312,7 +312,7 @@
 		return NULL;
 	}
 
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 	skb->dev = dev;
 
 	skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -330,7 +330,7 @@
 	pip->saddr    = rt->rt_src;
 	pip->protocol = IPPROTO_IGMP;
 	pip->tot_len  = 0;	/* filled in later */
-	ip_select_ident(pip, &rt->u.dst, NULL);
+	ip_select_ident(pip, &rt->dst, NULL);
 	((u8*)&pip[1])[0] = IPOPT_RA;
 	((u8*)&pip[1])[1] = 4;
 	((u8*)&pip[1])[2] = 0;
@@ -660,7 +660,7 @@
 		return -1;
 	}
 
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
@@ -676,7 +676,7 @@
 	iph->daddr    = dst;
 	iph->saddr    = rt->rt_src;
 	iph->protocol = IPPROTO_IGMP;
-	ip_select_ident(iph, &rt->u.dst, NULL);
+	ip_select_ident(iph, &rt->dst, NULL);
 	((u8*)&iph[1])[0] = IPOPT_RA;
 	((u8*)&iph[1])[1] = 4;
 	((u8*)&iph[1])[2] = 0;
@@ -916,18 +916,19 @@
 	read_unlock(&in_dev->mc_list_lock);
 }
 
+/* called in rcu_read_lock() section */
 int igmp_rcv(struct sk_buff *skb)
 {
 	/* This basically follows the spec line by line -- see RFC1112 */
 	struct igmphdr *ih;
-	struct in_device *in_dev = in_dev_get(skb->dev);
+	struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
 	int len = skb->len;
 
 	if (in_dev == NULL)
 		goto drop;
 
 	if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
-		goto drop_ref;
+		goto drop;
 
 	switch (skb->ip_summed) {
 	case CHECKSUM_COMPLETE:
@@ -937,7 +938,7 @@
 	case CHECKSUM_NONE:
 		skb->csum = 0;
 		if (__skb_checksum_complete(skb))
-			goto drop_ref;
+			goto drop;
 	}
 
 	ih = igmp_hdr(skb);
@@ -957,7 +958,6 @@
 		break;
 	case IGMP_PIM:
 #ifdef CONFIG_IP_PIMSM_V1
-		in_dev_put(in_dev);
 		return pim_rcv_v1(skb);
 #endif
 	case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -971,8 +971,6 @@
 		break;
 	}
 
-drop_ref:
-	in_dev_put(in_dev);
 drop:
 	kfree_skb(skb);
 	return 0;
@@ -1427,7 +1425,7 @@
 	}
 
 	if (!dev && !ip_route_output_key(net, &rt, &fl)) {
-		dev = rt->u.dst.dev;
+		dev = rt->dst.dev;
 		ip_rt_put(rt);
 	}
 	if (dev) {
@@ -1646,8 +1644,7 @@
 				if (dpsf->sf_inaddr == psf->sf_inaddr)
 					break;
 			if (!dpsf) {
-				dpsf = (struct ip_sf_list *)
-					kmalloc(sizeof(*dpsf), GFP_ATOMIC);
+				dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
 				if (!dpsf)
 					continue;
 				*dpsf = *psf;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 70eb350..57c9e4d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -383,7 +383,7 @@
 		goto no_route;
 	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
 		goto route_err;
-	return &rt->u.dst;
+	return &rt->dst;
 
 route_err:
 	ip_rt_put(rt);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 6bcfe52..9ffa24b 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -51,8 +51,8 @@
  *  lookups performed with disabled BHs.
  *
  *  Serialisation issues.
- *  1.  Nodes may appear in the tree only with the pool write lock held.
- *  2.  Nodes may disappear from the tree only with the pool write lock held
+ *  1.  Nodes may appear in the tree only with the pool lock held.
+ *  2.  Nodes may disappear from the tree only with the pool lock held
  *      AND reference count being 0.
  *  3.  Nodes appears and disappears from unused node list only under
  *      "inet_peer_unused_lock".
@@ -64,23 +64,31 @@
  *		   usually under some other lock to prevent node disappearing
  *		dtime: unused node list lock
  *		v4daddr: unchangeable
- *		ip_id_count: idlock
+ *		ip_id_count: atomic value (no lock needed)
  */
 
 static struct kmem_cache *peer_cachep __read_mostly;
 
 #define node_height(x) x->avl_height
-static struct inet_peer peer_fake_node = {
-	.avl_left	= &peer_fake_node,
-	.avl_right	= &peer_fake_node,
+
+#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+static const struct inet_peer peer_fake_node = {
+	.avl_left	= peer_avl_empty,
+	.avl_right	= peer_avl_empty,
 	.avl_height	= 0
 };
-#define peer_avl_empty (&peer_fake_node)
-static struct inet_peer *peer_root = peer_avl_empty;
-static DEFINE_RWLOCK(peer_pool_lock);
+
+static struct {
+	struct inet_peer *root;
+	spinlock_t	lock;
+	int		total;
+} peers = {
+	.root		= peer_avl_empty,
+	.lock		= __SPIN_LOCK_UNLOCKED(peers.lock),
+	.total		= 0,
+};
 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
 
-static int peer_total;
 /* Exported for sysctl_net_ipv4.  */
 int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
 					 * aggressively at this stage */
@@ -89,8 +97,13 @@
 int inet_peer_gc_mintime __read_mostly = 10 * HZ;
 int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
 
-static LIST_HEAD(unused_peers);
-static DEFINE_SPINLOCK(inet_peer_unused_lock);
+static struct {
+	struct list_head	list;
+	spinlock_t		lock;
+} unused_peers = {
+	.list			= LIST_HEAD_INIT(unused_peers.list),
+	.lock			= __SPIN_LOCK_UNLOCKED(unused_peers.lock),
+};
 
 static void peer_check_expire(unsigned long dummy);
 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -116,7 +129,7 @@
 
 	peer_cachep = kmem_cache_create("inet_peer_cache",
 			sizeof(struct inet_peer),
-			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
 			NULL);
 
 	/* All the timers, started at system startup tend
@@ -131,38 +144,69 @@
 /* Called with or without local BH being disabled. */
 static void unlink_from_unused(struct inet_peer *p)
 {
-	spin_lock_bh(&inet_peer_unused_lock);
-	list_del_init(&p->unused);
-	spin_unlock_bh(&inet_peer_unused_lock);
+	if (!list_empty(&p->unused)) {
+		spin_lock_bh(&unused_peers.lock);
+		list_del_init(&p->unused);
+		spin_unlock_bh(&unused_peers.lock);
+	}
 }
 
 /*
  * Called with local BH disabled and the pool lock held.
- * _stack is known to be NULL or not at compile time,
- * so compiler will optimize the if (_stack) tests.
  */
 #define lookup(_daddr, _stack) 					\
 ({								\
 	struct inet_peer *u, **v;				\
-	if (_stack != NULL) {					\
-		stackptr = _stack;				\
-		*stackptr++ = &peer_root;			\
-	}							\
-	for (u = peer_root; u != peer_avl_empty; ) {		\
+								\
+	stackptr = _stack;					\
+	*stackptr++ = &peers.root;				\
+	for (u = peers.root; u != peer_avl_empty; ) {		\
 		if (_daddr == u->v4daddr)			\
 			break;					\
 		if ((__force __u32)_daddr < (__force __u32)u->v4daddr)	\
 			v = &u->avl_left;			\
 		else						\
 			v = &u->avl_right;			\
-		if (_stack != NULL)				\
-			*stackptr++ = v;			\
+		*stackptr++ = v;				\
 		u = *v;						\
 	}							\
 	u;							\
 })
 
-/* Called with local BH disabled and the pool write lock held. */
+/*
+ * Called with rcu_read_lock_bh()
+ * Because we hold no lock against a writer, its quite possible we fall
+ * in an endless loop.
+ * But every pointer we follow is guaranteed to be valid thanks to RCU.
+ * We exit from this function if number of links exceeds PEER_MAXDEPTH
+ */
+static struct inet_peer *lookup_rcu_bh(__be32 daddr)
+{
+	struct inet_peer *u = rcu_dereference_bh(peers.root);
+	int count = 0;
+
+	while (u != peer_avl_empty) {
+		if (daddr == u->v4daddr) {
+			/* Before taking a reference, check if this entry was
+			 * deleted, unlink_from_pool() sets refcnt=-1 to make
+			 * distinction between an unused entry (refcnt=0) and
+			 * a freed one.
+			 */
+			if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
+				u = NULL;
+			return u;
+		}
+		if ((__force __u32)daddr < (__force __u32)u->v4daddr)
+			u = rcu_dereference_bh(u->avl_left);
+		else
+			u = rcu_dereference_bh(u->avl_right);
+		if (unlikely(++count == PEER_MAXDEPTH))
+			break;
+	}
+	return NULL;
+}
+
+/* Called with local BH disabled and the pool lock held. */
 #define lookup_rightempty(start)				\
 ({								\
 	struct inet_peer *u, **v;				\
@@ -176,9 +220,10 @@
 	u;							\
 })
 
-/* Called with local BH disabled and the pool write lock held.
+/* Called with local BH disabled and the pool lock held.
  * Variable names are the proof of operation correctness.
- * Look into mm/map_avl.c for more detail description of the ideas.  */
+ * Look into mm/map_avl.c for more detail description of the ideas.
+ */
 static void peer_avl_rebalance(struct inet_peer **stack[],
 		struct inet_peer ***stackend)
 {
@@ -254,15 +299,21 @@
 	}
 }
 
-/* Called with local BH disabled and the pool write lock held. */
+/* Called with local BH disabled and the pool lock held. */
 #define link_to_pool(n)						\
 do {								\
 	n->avl_height = 1;					\
 	n->avl_left = peer_avl_empty;				\
 	n->avl_right = peer_avl_empty;				\
+	smp_wmb(); /* lockless readers can catch us now */	\
 	**--stackptr = n;					\
 	peer_avl_rebalance(stack, stackptr);			\
-} while(0)
+} while (0)
+
+static void inetpeer_free_rcu(struct rcu_head *head)
+{
+	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
+}
 
 /* May be called with local BH enabled. */
 static void unlink_from_pool(struct inet_peer *p)
@@ -271,13 +322,14 @@
 
 	do_free = 0;
 
-	write_lock_bh(&peer_pool_lock);
+	spin_lock_bh(&peers.lock);
 	/* Check the reference counter.  It was artificially incremented by 1
-	 * in cleanup() function to prevent sudden disappearing.  If the
-	 * reference count is still 1 then the node is referenced only as `p'
-	 * here and from the pool.  So under the exclusive pool lock it's safe
-	 * to remove the node and free it later. */
-	if (atomic_read(&p->refcnt) == 1) {
+	 * in cleanup() function to prevent sudden disappearing.  If we can
+	 * atomically (because of lockless readers) take this last reference,
+	 * it's safe to remove the node and free it later.
+	 * We use refcnt=-1 to alert lockless readers this entry is deleted.
+	 */
+	if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
 		struct inet_peer **stack[PEER_MAXDEPTH];
 		struct inet_peer ***stackptr, ***delp;
 		if (lookup(p->v4daddr, stack) != p)
@@ -303,20 +355,21 @@
 			delp[1] = &t->avl_left; /* was &p->avl_left */
 		}
 		peer_avl_rebalance(stack, stackptr);
-		peer_total--;
+		peers.total--;
 		do_free = 1;
 	}
-	write_unlock_bh(&peer_pool_lock);
+	spin_unlock_bh(&peers.lock);
 
 	if (do_free)
-		kmem_cache_free(peer_cachep, p);
+		call_rcu_bh(&p->rcu, inetpeer_free_rcu);
 	else
 		/* The node is used again.  Decrease the reference counter
 		 * back.  The loop "cleanup -> unlink_from_unused
 		 *   -> unlink_from_pool -> putpeer -> link_to_unused
 		 *   -> cleanup (for the same node)"
 		 * doesn't really exist because the entry will have a
-		 * recent deletion time and will not be cleaned again soon. */
+		 * recent deletion time and will not be cleaned again soon.
+		 */
 		inet_putpeer(p);
 }
 
@@ -326,16 +379,16 @@
 	struct inet_peer *p = NULL;
 
 	/* Remove the first entry from the list of unused nodes. */
-	spin_lock_bh(&inet_peer_unused_lock);
-	if (!list_empty(&unused_peers)) {
+	spin_lock_bh(&unused_peers.lock);
+	if (!list_empty(&unused_peers.list)) {
 		__u32 delta;
 
-		p = list_first_entry(&unused_peers, struct inet_peer, unused);
+		p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
 		delta = (__u32)jiffies - p->dtime;
 
 		if (delta < ttl) {
 			/* Do not prune fresh entries. */
-			spin_unlock_bh(&inet_peer_unused_lock);
+			spin_unlock_bh(&unused_peers.lock);
 			return -1;
 		}
 
@@ -345,7 +398,7 @@
 		 * before unlink_from_pool() call. */
 		atomic_inc(&p->refcnt);
 	}
-	spin_unlock_bh(&inet_peer_unused_lock);
+	spin_unlock_bh(&unused_peers.lock);
 
 	if (p == NULL)
 		/* It means that the total number of USED entries has
@@ -360,62 +413,56 @@
 /* Called with or without local BH being disabled. */
 struct inet_peer *inet_getpeer(__be32 daddr, int create)
 {
-	struct inet_peer *p, *n;
+	struct inet_peer *p;
 	struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
 
-	/* Look up for the address quickly. */
-	read_lock_bh(&peer_pool_lock);
-	p = lookup(daddr, NULL);
-	if (p != peer_avl_empty)
-		atomic_inc(&p->refcnt);
-	read_unlock_bh(&peer_pool_lock);
+	/* Look up for the address quickly, lockless.
+	 * Because of a concurrent writer, we might not find an existing entry.
+	 */
+	rcu_read_lock_bh();
+	p = lookup_rcu_bh(daddr);
+	rcu_read_unlock_bh();
 
-	if (p != peer_avl_empty) {
-		/* The existing node has been found. */
-		/* Remove the entry from unused list if it was there. */
+	if (p) {
+		/* The existing node has been found.
+		 * Remove the entry from unused list if it was there.
+		 */
 		unlink_from_unused(p);
 		return p;
 	}
 
-	if (!create)
-		return NULL;
-
-	/* Allocate the space outside the locked region. */
-	n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
-	if (n == NULL)
-		return NULL;
-	n->v4daddr = daddr;
-	atomic_set(&n->refcnt, 1);
-	atomic_set(&n->rid, 0);
-	atomic_set(&n->ip_id_count, secure_ip_id(daddr));
-	n->tcp_ts_stamp = 0;
-
-	write_lock_bh(&peer_pool_lock);
-	/* Check if an entry has suddenly appeared. */
+	/* retry an exact lookup, taking the lock before.
+	 * At least, nodes should be hot in our cache.
+	 */
+	spin_lock_bh(&peers.lock);
 	p = lookup(daddr, stack);
-	if (p != peer_avl_empty)
-		goto out_free;
+	if (p != peer_avl_empty) {
+		atomic_inc(&p->refcnt);
+		spin_unlock_bh(&peers.lock);
+		/* Remove the entry from unused list if it was there. */
+		unlink_from_unused(p);
+		return p;
+	}
+	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
+	if (p) {
+		p->v4daddr = daddr;
+		atomic_set(&p->refcnt, 1);
+		atomic_set(&p->rid, 0);
+		atomic_set(&p->ip_id_count, secure_ip_id(daddr));
+		p->tcp_ts_stamp = 0;
+		INIT_LIST_HEAD(&p->unused);
 
-	/* Link the node. */
-	link_to_pool(n);
-	INIT_LIST_HEAD(&n->unused);
-	peer_total++;
-	write_unlock_bh(&peer_pool_lock);
 
-	if (peer_total >= inet_peer_threshold)
+		/* Link the node. */
+		link_to_pool(p);
+		peers.total++;
+	}
+	spin_unlock_bh(&peers.lock);
+
+	if (peers.total >= inet_peer_threshold)
 		/* Remove one less-recently-used entry. */
 		cleanup_once(0);
 
-	return n;
-
-out_free:
-	/* The appropriate node is already in the pool. */
-	atomic_inc(&p->refcnt);
-	write_unlock_bh(&peer_pool_lock);
-	/* Remove the entry from unused list if it was there. */
-	unlink_from_unused(p);
-	/* Free preallocated the preallocated node. */
-	kmem_cache_free(peer_cachep, n);
 	return p;
 }
 
@@ -425,12 +472,12 @@
 	unsigned long now = jiffies;
 	int ttl;
 
-	if (peer_total >= inet_peer_threshold)
+	if (peers.total >= inet_peer_threshold)
 		ttl = inet_peer_minttl;
 	else
 		ttl = inet_peer_maxttl
 				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
-					peer_total / inet_peer_threshold * HZ;
+					peers.total / inet_peer_threshold * HZ;
 	while (!cleanup_once(ttl)) {
 		if (jiffies != now)
 			break;
@@ -439,22 +486,25 @@
 	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
 	 * interval depending on the total number of entries (more entries,
 	 * less interval). */
-	if (peer_total >= inet_peer_threshold)
+	if (peers.total >= inet_peer_threshold)
 		peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
 	else
 		peer_periodic_timer.expires = jiffies
 			+ inet_peer_gc_maxtime
 			- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
-				peer_total / inet_peer_threshold * HZ;
+				peers.total / inet_peer_threshold * HZ;
 	add_timer(&peer_periodic_timer);
 }
 
 void inet_putpeer(struct inet_peer *p)
 {
-	spin_lock_bh(&inet_peer_unused_lock);
-	if (atomic_dec_and_test(&p->refcnt)) {
-		list_add_tail(&p->unused, &unused_peers);
+	local_bh_disable();
+
+	if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
+		list_add_tail(&p->unused, &unused_peers.list);
 		p->dtime = (__u32)jiffies;
+		spin_unlock(&unused_peers.lock);
 	}
-	spin_unlock_bh(&inet_peer_unused_lock);
+
+	local_bh_enable();
 }
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 56cdf68..99461f0 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -87,16 +87,16 @@
 	if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
 		goto sr_failed;
 
-	if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) &&
+	if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
 		     (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
-		IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS);
+		IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
-			  htonl(dst_mtu(&rt->u.dst)));
+			  htonl(dst_mtu(&rt->dst)));
 		goto drop;
 	}
 
 	/* We are about to mangle packet. Copy it! */
-	if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len))
+	if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len))
 		goto drop;
 	iph = ip_hdr(skb);
 
@@ -113,7 +113,7 @@
 	skb->priority = rt_tos2priority(iph->tos);
 
 	return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
-		       rt->u.dst.dev, ip_forward_finish);
+		       rt->dst.dev, ip_forward_finish);
 
 sr_failed:
 	/*
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 75347ea..dd0dbf0 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -124,11 +124,8 @@
 }
 
 /* Memory Tracking Functions. */
-static __inline__ void frag_kfree_skb(struct netns_frags *nf,
-		struct sk_buff *skb, int *work)
+static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
 {
-	if (work)
-		*work -= skb->truesize;
 	atomic_sub(skb->truesize, &nf->mem);
 	kfree_skb(skb);
 }
@@ -309,7 +306,7 @@
 	fp = qp->q.fragments;
 	do {
 		struct sk_buff *xp = fp->next;
-		frag_kfree_skb(qp->q.net, fp, NULL);
+		frag_kfree_skb(qp->q.net, fp);
 		fp = xp;
 	} while (fp);
 
@@ -317,6 +314,7 @@
 	qp->q.len = 0;
 	qp->q.meat = 0;
 	qp->q.fragments = NULL;
+	qp->q.fragments_tail = NULL;
 	qp->iif = 0;
 
 	return 0;
@@ -389,6 +387,11 @@
 	 * in the chain of fragments so far.  We must know where to put
 	 * this fragment, right?
 	 */
+	prev = qp->q.fragments_tail;
+	if (!prev || FRAG_CB(prev)->offset < offset) {
+		next = NULL;
+		goto found;
+	}
 	prev = NULL;
 	for (next = qp->q.fragments; next != NULL; next = next->next) {
 		if (FRAG_CB(next)->offset >= offset)
@@ -396,6 +399,7 @@
 		prev = next;
 	}
 
+found:
 	/* We found where to put this one.  Check for overlap with
 	 * preceding fragment, and, if needed, align things so that
 	 * any overlaps are eliminated.
@@ -446,7 +450,7 @@
 				qp->q.fragments = next;
 
 			qp->q.meat -= free_it->len;
-			frag_kfree_skb(qp->q.net, free_it, NULL);
+			frag_kfree_skb(qp->q.net, free_it);
 		}
 	}
 
@@ -454,6 +458,8 @@
 
 	/* Insert this fragment in the chain of fragments. */
 	skb->next = next;
+	if (!next)
+		qp->q.fragments_tail = skb;
 	if (prev)
 		prev->next = skb;
 	else
@@ -507,6 +513,8 @@
 			goto out_nomem;
 
 		fp->next = head->next;
+		if (!fp->next)
+			qp->q.fragments_tail = fp;
 		prev->next = fp;
 
 		skb_morph(head, qp->q.fragments);
@@ -556,7 +564,6 @@
 
 	skb_shinfo(head)->frag_list = head->next;
 	skb_push(head, head->data - skb_network_header(head));
-	atomic_sub(head->truesize, &qp->q.net->mem);
 
 	for (fp=head->next; fp; fp = fp->next) {
 		head->data_len += fp->len;
@@ -566,8 +573,8 @@
 		else if (head->ip_summed == CHECKSUM_COMPLETE)
 			head->csum = csum_add(head->csum, fp->csum);
 		head->truesize += fp->truesize;
-		atomic_sub(fp->truesize, &qp->q.net->mem);
 	}
+	atomic_sub(head->truesize, &qp->q.net->mem);
 
 	head->next = NULL;
 	head->dev = dev;
@@ -578,6 +585,7 @@
 	iph->tot_len = htons(len);
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
 	qp->q.fragments = NULL;
+	qp->q.fragments_tail = NULL;
 	return 0;
 
 out_nomem:
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 32618e1..749e548 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -745,7 +745,7 @@
 			goto tx_error;
 		}
 	}
-	tdev = rt->u.dst.dev;
+	tdev = rt->dst.dev;
 
 	if (tdev == dev) {
 		ip_rt_put(rt);
@@ -755,7 +755,7 @@
 
 	df = tiph->frag_off;
 	if (df)
-		mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen;
+		mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
 	else
 		mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 
@@ -803,7 +803,7 @@
 			tunnel->err_count = 0;
 	}
 
-	max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len;
+	max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
 
 	if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
@@ -830,7 +830,7 @@
 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
 			      IPSKB_REROUTED);
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/*
 	 *	Push down and install the IPIP header.
@@ -853,7 +853,7 @@
 			iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
 #endif
 		else
-			iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
+			iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
 	}
 
 	((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
@@ -915,7 +915,7 @@
 				    .proto = IPPROTO_GRE };
 		struct rtable *rt;
 		if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
-			tdev = rt->u.dst.dev;
+			tdev = rt->dst.dev;
 			ip_rt_put(rt);
 		}
 
@@ -1174,7 +1174,7 @@
 		struct rtable *rt;
 		if (ip_route_output_key(dev_net(dev), &rt, &fl))
 			return -EADDRNOTAVAIL;
-		dev = rt->u.dst.dev;
+		dev = rt->dst.dev;
 		ip_rt_put(rt);
 		if (__in_dev_get_rtnl(dev) == NULL)
 			return -EADDRNOTAVAIL;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d930dc5..d859bcc 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -146,7 +146,7 @@
 #include <linux/netlink.h>
 
 /*
- *	Process Router Attention IP option
+ *	Process Router Attention IP option (RFC 2113)
  */
 int ip_call_ra_chain(struct sk_buff *skb)
 {
@@ -155,8 +155,7 @@
 	struct sock *last = NULL;
 	struct net_device *dev = skb->dev;
 
-	read_lock(&ip_ra_lock);
-	for (ra = ip_ra_chain; ra; ra = ra->next) {
+	for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
 		struct sock *sk = ra->sk;
 
 		/* If socket is bound to an interface, only report
@@ -167,10 +166,8 @@
 		     sk->sk_bound_dev_if == dev->ifindex) &&
 		    net_eq(sock_net(sk), dev_net(dev))) {
 			if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
-				if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) {
-					read_unlock(&ip_ra_lock);
+				if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
 					return 1;
-				}
 			}
 			if (last) {
 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
@@ -183,10 +180,8 @@
 
 	if (last) {
 		raw_rcv(last, skb);
-		read_unlock(&ip_ra_lock);
 		return 1;
 	}
-	read_unlock(&ip_ra_lock);
 	return 0;
 }
 
@@ -298,18 +293,16 @@
 	}
 
 	if (unlikely(opt->srr)) {
-		struct in_device *in_dev = in_dev_get(dev);
+		struct in_device *in_dev = __in_dev_get_rcu(dev);
+
 		if (in_dev) {
 			if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
 				if (IN_DEV_LOG_MARTIANS(in_dev) &&
 				    net_ratelimit())
 					printk(KERN_INFO "source route option %pI4 -> %pI4\n",
 					       &iph->saddr, &iph->daddr);
-				in_dev_put(in_dev);
 				goto drop;
 			}
-
-			in_dev_put(in_dev);
 		}
 
 		if (ip_options_rcv_srr(skb))
@@ -340,13 +333,16 @@
 			else if (err == -ENETUNREACH)
 				IP_INC_STATS_BH(dev_net(skb->dev),
 						IPSTATS_MIB_INNOROUTES);
+			else if (err == -EXDEV)
+				NET_INC_STATS_BH(dev_net(skb->dev),
+						 LINUX_MIB_IPRPFILTER);
 			goto drop;
 		}
 	}
 
 #ifdef CONFIG_NET_CLS_ROUTE
 	if (unlikely(skb_dst(skb)->tclassid)) {
-		struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id());
+		struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
 		u32 idx = skb_dst(skb)->tclassid;
 		st[idx&0xFF].o_packets++;
 		st[idx&0xFF].o_bytes += skb->len;
@@ -360,10 +356,10 @@
 
 	rt = skb_rtable(skb);
 	if (rt->rt_type == RTN_MULTICAST) {
-		IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST,
+		IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST,
 				skb->len);
 	} else if (rt->rt_type == RTN_BROADCAST)
-		IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCAST,
+		IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST,
 				skb->len);
 
 	return dst_input(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 041d41d..663cb2a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -151,15 +151,15 @@
 	iph->version  = 4;
 	iph->ihl      = 5;
 	iph->tos      = inet->tos;
-	if (ip_dont_fragment(sk, &rt->u.dst))
+	if (ip_dont_fragment(sk, &rt->dst))
 		iph->frag_off = htons(IP_DF);
 	else
 		iph->frag_off = 0;
-	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
+	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 	iph->daddr    = rt->rt_dst;
 	iph->saddr    = rt->rt_src;
 	iph->protocol = sk->sk_protocol;
-	ip_select_ident(iph, &rt->u.dst, sk);
+	ip_select_ident(iph, &rt->dst, sk);
 
 	if (opt && opt->optlen) {
 		iph->ihl += opt->optlen>>2;
@@ -240,7 +240,7 @@
 {
 	struct sock *sk = skb->sk;
 	struct rtable *rt = skb_rtable(skb);
-	struct net_device *dev = rt->u.dst.dev;
+	struct net_device *dev = rt->dst.dev;
 
 	/*
 	 *	If the indicated interface is up and running, send the packet.
@@ -359,9 +359,9 @@
 			if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
 				goto no_route;
 		}
-		sk_setup_caps(sk, &rt->u.dst);
+		sk_setup_caps(sk, &rt->dst);
 	}
-	skb_dst_set_noref(skb, &rt->u.dst);
+	skb_dst_set_noref(skb, &rt->dst);
 
 packet_routed:
 	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
@@ -372,11 +372,11 @@
 	skb_reset_network_header(skb);
 	iph = ip_hdr(skb);
 	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
-	if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df)
+	if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
 		iph->frag_off = htons(IP_DF);
 	else
 		iph->frag_off = 0;
-	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
+	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 	iph->protocol = sk->sk_protocol;
 	iph->saddr    = rt->rt_src;
 	iph->daddr    = rt->rt_dst;
@@ -387,7 +387,7 @@
 		ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
 	}
 
-	ip_select_ident_more(iph, &rt->u.dst, sk,
+	ip_select_ident_more(iph, &rt->dst, sk,
 			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 
 	skb->priority = sk->sk_priority;
@@ -411,7 +411,7 @@
 	to->priority = from->priority;
 	to->protocol = from->protocol;
 	skb_dst_drop(to);
-	skb_dst_set(to, dst_clone(skb_dst(from)));
+	skb_dst_copy(to, from);
 	to->dev = from->dev;
 	to->mark = from->mark;
 
@@ -442,7 +442,6 @@
 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 {
 	struct iphdr *iph;
-	int raw = 0;
 	int ptr;
 	struct net_device *dev;
 	struct sk_buff *skb2;
@@ -452,7 +451,7 @@
 	struct rtable *rt = skb_rtable(skb);
 	int err = 0;
 
-	dev = rt->u.dst.dev;
+	dev = rt->dst.dev;
 
 	/*
 	 *	Point into the IP datagram header.
@@ -473,7 +472,7 @@
 	 */
 
 	hlen = iph->ihl * 4;
-	mtu = dst_mtu(&rt->u.dst) - hlen;	/* Size of data space */
+	mtu = dst_mtu(&rt->dst) - hlen;	/* Size of data space */
 #ifdef CONFIG_BRIDGE_NETFILTER
 	if (skb->nf_bridge)
 		mtu -= nf_bridge_mtu_reduction(skb);
@@ -580,13 +579,13 @@
 
 slow_path:
 	left = skb->len - hlen;		/* Space per frame */
-	ptr = raw + hlen;		/* Where to start from */
+	ptr = hlen;		/* Where to start from */
 
 	/* for bridged IP traffic encapsulated inside f.e. a vlan header,
 	 * we need to make room for the encapsulating header
 	 */
 	pad = nf_bridge_pad(skb);
-	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
+	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, pad);
 	mtu -= pad;
 
 	/*
@@ -833,13 +832,13 @@
 		 */
 		*rtp = NULL;
 		inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
-					    rt->u.dst.dev->mtu :
-					    dst_mtu(rt->u.dst.path);
-		inet->cork.dst = &rt->u.dst;
+					    rt->dst.dev->mtu :
+					    dst_mtu(rt->dst.path);
+		inet->cork.dst = &rt->dst;
 		inet->cork.length = 0;
 		sk->sk_sndmsg_page = NULL;
 		sk->sk_sndmsg_off = 0;
-		if ((exthdrlen = rt->u.dst.header_len) != 0) {
+		if ((exthdrlen = rt->dst.header_len) != 0) {
 			length += exthdrlen;
 			transhdrlen += exthdrlen;
 		}
@@ -852,7 +851,7 @@
 		exthdrlen = 0;
 		mtu = inet->cork.fragsize;
 	}
-	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
+	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 
 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
@@ -869,7 +868,7 @@
 	 */
 	if (transhdrlen &&
 	    length + fragheaderlen <= mtu &&
-	    rt->u.dst.dev->features & NETIF_F_V4_CSUM &&
+	    rt->dst.dev->features & NETIF_F_V4_CSUM &&
 	    !exthdrlen)
 		csummode = CHECKSUM_PARTIAL;
 
@@ -878,7 +877,7 @@
 	inet->cork.length += length;
 	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
-	    (rt->u.dst.dev->features & NETIF_F_UFO)) {
+	    (rt->dst.dev->features & NETIF_F_UFO)) {
 		err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
 					 fragheaderlen, transhdrlen, mtu,
 					 flags);
@@ -926,7 +925,7 @@
 			fraglen = datalen + fragheaderlen;
 
 			if ((flags & MSG_MORE) &&
-			    !(rt->u.dst.dev->features&NETIF_F_SG))
+			    !(rt->dst.dev->features&NETIF_F_SG))
 				alloclen = mtu;
 			else
 				alloclen = datalen + fragheaderlen;
@@ -937,7 +936,7 @@
 			 * the last.
 			 */
 			if (datalen == length + fraggap)
-				alloclen += rt->u.dst.trailer_len;
+				alloclen += rt->dst.trailer_len;
 
 			if (transhdrlen) {
 				skb = sock_alloc_send_skb(sk,
@@ -1010,7 +1009,7 @@
 		if (copy > length)
 			copy = length;
 
-		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
+		if (!(rt->dst.dev->features&NETIF_F_SG)) {
 			unsigned int off;
 
 			off = skb->len;
@@ -1105,10 +1104,10 @@
 	if (inet->cork.flags & IPCORK_OPT)
 		opt = inet->cork.opt;
 
-	if (!(rt->u.dst.dev->features&NETIF_F_SG))
+	if (!(rt->dst.dev->features&NETIF_F_SG))
 		return -EOPNOTSUPP;
 
-	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
+	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 	mtu = inet->cork.fragsize;
 
 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
@@ -1125,7 +1124,7 @@
 	inet->cork.length += size;
 	if ((size + skb->len > mtu) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
-	    (rt->u.dst.dev->features & NETIF_F_UFO)) {
+	    (rt->dst.dev->features & NETIF_F_UFO)) {
 		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
 	}
@@ -1277,8 +1276,8 @@
 	 * If local_df is set too, we still allow to fragment this frame
 	 * locally. */
 	if (inet->pmtudisc >= IP_PMTUDISC_DO ||
-	    (skb->len <= dst_mtu(&rt->u.dst) &&
-	     ip_dont_fragment(sk, &rt->u.dst)))
+	    (skb->len <= dst_mtu(&rt->dst) &&
+	     ip_dont_fragment(sk, &rt->dst)))
 		df = htons(IP_DF);
 
 	if (inet->cork.flags & IPCORK_OPT)
@@ -1287,7 +1286,7 @@
 	if (rt->rt_type == RTN_MULTICAST)
 		ttl = inet->mc_ttl;
 	else
-		ttl = ip_select_ttl(inet, &rt->u.dst);
+		ttl = ip_select_ttl(inet, &rt->dst);
 
 	iph = (struct iphdr *)skb->data;
 	iph->version = 4;
@@ -1298,7 +1297,7 @@
 	}
 	iph->tos = inet->tos;
 	iph->frag_off = df;
-	ip_select_ident(iph, &rt->u.dst, sk);
+	ip_select_ident(iph, &rt->dst, sk);
 	iph->ttl = ttl;
 	iph->protocol = sk->sk_protocol;
 	iph->saddr = rt->rt_src;
@@ -1311,7 +1310,7 @@
 	 * on dst refcount
 	 */
 	inet->cork.dst = NULL;
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	if (iph->protocol == IPPROTO_ICMP)
 		icmp_out_count(net, ((struct icmphdr *)
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ce23178..6c40a8c 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -239,7 +239,16 @@
    sent to multicast group to reach destination designated router.
  */
 struct ip_ra_chain *ip_ra_chain;
-DEFINE_RWLOCK(ip_ra_lock);
+static DEFINE_SPINLOCK(ip_ra_lock);
+
+
+static void ip_ra_destroy_rcu(struct rcu_head *head)
+{
+	struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
+
+	sock_put(ra->saved_sk);
+	kfree(ra);
+}
 
 int ip_ra_control(struct sock *sk, unsigned char on,
 		  void (*destructor)(struct sock *))
@@ -251,35 +260,42 @@
 
 	new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
 
-	write_lock_bh(&ip_ra_lock);
+	spin_lock_bh(&ip_ra_lock);
 	for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
 		if (ra->sk == sk) {
 			if (on) {
-				write_unlock_bh(&ip_ra_lock);
+				spin_unlock_bh(&ip_ra_lock);
 				kfree(new_ra);
 				return -EADDRINUSE;
 			}
-			*rap = ra->next;
-			write_unlock_bh(&ip_ra_lock);
+			/* dont let ip_call_ra_chain() use sk again */
+			ra->sk = NULL;
+			rcu_assign_pointer(*rap, ra->next);
+			spin_unlock_bh(&ip_ra_lock);
 
 			if (ra->destructor)
 				ra->destructor(sk);
-			sock_put(sk);
-			kfree(ra);
+			/*
+			 * Delay sock_put(sk) and kfree(ra) after one rcu grace
+			 * period. This guarantee ip_call_ra_chain() dont need
+			 * to mess with socket refcounts.
+			 */
+			ra->saved_sk = sk;
+			call_rcu(&ra->rcu, ip_ra_destroy_rcu);
 			return 0;
 		}
 	}
 	if (new_ra == NULL) {
-		write_unlock_bh(&ip_ra_lock);
+		spin_unlock_bh(&ip_ra_lock);
 		return -ENOBUFS;
 	}
 	new_ra->sk = sk;
 	new_ra->destructor = destructor;
 
 	new_ra->next = ra;
-	*rap = new_ra;
+	rcu_assign_pointer(*rap, new_ra);
 	sock_hold(sk);
-	write_unlock_bh(&ip_ra_lock);
+	spin_unlock_bh(&ip_ra_lock);
 
 	return 0;
 }
@@ -449,7 +465,7 @@
 			     (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
 			     (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
 			     (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
-			     (1<<IP_MINTTL))) ||
+			     (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
 	    optname == IP_MULTICAST_TTL ||
 	    optname == IP_MULTICAST_ALL ||
 	    optname == IP_MULTICAST_LOOP ||
@@ -572,6 +588,13 @@
 		}
 		inet->hdrincl = val ? 1 : 0;
 		break;
+	case IP_NODEFRAG:
+		if (sk->sk_type != SOCK_RAW) {
+			err = -ENOPROTOOPT;
+			break;
+		}
+		inet->nodefrag = val ? 1 : 0;
+		break;
 	case IP_MTU_DISCOVER:
 		if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
 			goto e_inval;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index b9d84e8..3a6e1ec 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -665,6 +665,13 @@
 		memcpy(e, ic_req_params, sizeof(ic_req_params));
 		e += sizeof(ic_req_params);
 
+		if (ic_host_name_set) {
+			*e++ = 12;	/* host-name */
+			len = strlen(utsname()->nodename);
+			*e++ = len;
+			memcpy(e, utsname()->nodename, len);
+			e += len;
+		}
 		if (*vendor_class_identifier) {
 			printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n",
 			       vendor_class_identifier);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 7fd6367..ec03673 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -435,7 +435,7 @@
 			goto tx_error_icmp;
 		}
 	}
-	tdev = rt->u.dst.dev;
+	tdev = rt->dst.dev;
 
 	if (tdev == dev) {
 		ip_rt_put(rt);
@@ -446,7 +446,7 @@
 	df |= old_iph->frag_off & htons(IP_DF);
 
 	if (df) {
-		mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
+		mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
 
 		if (mtu < 68) {
 			stats->collisions++;
@@ -503,7 +503,7 @@
 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
 			      IPSKB_REROUTED);
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/*
 	 *	Push down and install the IPIP header.
@@ -552,7 +552,7 @@
 				    .proto = IPPROTO_IPIP };
 		struct rtable *rt;
 		if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
-			tdev = rt->u.dst.dev;
+			tdev = rt->dst.dev;
 			ip_rt_put(rt);
 		}
 		dev->flags |= IFF_POINTOPOINT;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 757f25eb..5395922 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1553,9 +1553,9 @@
 			goto out_free;
 	}
 
-	dev = rt->u.dst.dev;
+	dev = rt->dst.dev;
 
-	if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
+	if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
 		/* Do not fragment multicasts. Alas, IPv4 does not
 		   allow to send ICMP, so that packets will disappear
 		   to blackhole.
@@ -1566,7 +1566,7 @@
 		goto out_free;
 	}
 
-	encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
+	encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
 
 	if (skb_cow(skb, encap)) {
 		ip_rt_put(rt);
@@ -1577,7 +1577,7 @@
 	vif->bytes_out += skb->len;
 
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 	ip_decrease_ttl(ip_hdr(skb));
 
 	/* FIXME: forward and output firewalls used to be called here.
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 07de855..d88a46c 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -43,7 +43,7 @@
 
 		/* Drop old route. */
 		skb_dst_drop(skb);
-		skb_dst_set(skb, &rt->u.dst);
+		skb_dst_set(skb, &rt->dst);
 	} else {
 		/* non-local src, find valid iif to satisfy
 		 * rp-filter when calling ip_route_input. */
@@ -53,11 +53,11 @@
 
 		orefdst = skb->_skb_refdst;
 		if (ip_route_input(skb, iph->daddr, iph->saddr,
-				   RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
-			dst_release(&rt->u.dst);
+				   RT_TOS(iph->tos), rt->dst.dev) != 0) {
+			dst_release(&rt->dst);
 			return -1;
 		}
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		refdst_drop(orefdst);
 	}
 
@@ -212,9 +212,7 @@
 		skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
 					       skb->len - dataoff, 0);
 		skb->ip_summed = CHECKSUM_NONE;
-		csum = __skb_checksum_complete_head(skb, dataoff + len);
-		if (!csum)
-			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		return __skb_checksum_complete_head(skb, dataoff + len);
 	}
 	return csum;
 }
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 1ac01b1..16c0ba0 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -758,7 +758,7 @@
 	 * about).
 	 */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc_node(countersize, numa_node_id());
+	counters = vmalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1005,8 +1005,7 @@
 	struct arpt_entry *iter;
 
 	ret = 0;
-	counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
-				numa_node_id());
+	counters = vmalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
@@ -1159,7 +1158,7 @@
 	if (len != size + num_counters * sizeof(struct xt_counters))
 		return -EINVAL;
 
-	paddc = vmalloc_node(len - size, numa_node_id());
+	paddc = vmalloc(len - size);
 	if (!paddc)
 		return -ENOMEM;
 
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index a4e5fc5..d2c1311 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -42,7 +42,7 @@
 
 static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
 static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
-static DEFINE_RWLOCK(queue_lock);
+static DEFINE_SPINLOCK(queue_lock);
 static int peer_pid __read_mostly;
 static unsigned int copy_range __read_mostly;
 static unsigned int queue_total;
@@ -72,10 +72,10 @@
 		break;
 
 	case IPQ_COPY_PACKET:
-		copy_mode = mode;
+		if (range > 0xFFFF)
+			range = 0xFFFF;
 		copy_range = range;
-		if (copy_range > 0xFFFF)
-			copy_range = 0xFFFF;
+		copy_mode = mode;
 		break;
 
 	default:
@@ -101,7 +101,7 @@
 {
 	struct nf_queue_entry *entry = NULL, *i;
 
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 
 	list_for_each_entry(i, &queue_list, list) {
 		if ((unsigned long)i == id) {
@@ -115,7 +115,7 @@
 		queue_total--;
 	}
 
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return entry;
 }
 
@@ -136,9 +136,9 @@
 static void
 ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
 {
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 	__ipq_flush(cmpfn, data);
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 }
 
 static struct sk_buff *
@@ -152,9 +152,7 @@
 	struct nlmsghdr *nlh;
 	struct timeval tv;
 
-	read_lock_bh(&queue_lock);
-
-	switch (copy_mode) {
+	switch (ACCESS_ONCE(copy_mode)) {
 	case IPQ_COPY_META:
 	case IPQ_COPY_NONE:
 		size = NLMSG_SPACE(sizeof(*pmsg));
@@ -162,26 +160,21 @@
 
 	case IPQ_COPY_PACKET:
 		if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
-		    (*errp = skb_checksum_help(entry->skb))) {
-			read_unlock_bh(&queue_lock);
+		    (*errp = skb_checksum_help(entry->skb)))
 			return NULL;
-		}
-		if (copy_range == 0 || copy_range > entry->skb->len)
+
+		data_len = ACCESS_ONCE(copy_range);
+		if (data_len == 0 || data_len > entry->skb->len)
 			data_len = entry->skb->len;
-		else
-			data_len = copy_range;
 
 		size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
 		break;
 
 	default:
 		*errp = -EINVAL;
-		read_unlock_bh(&queue_lock);
 		return NULL;
 	}
 
-	read_unlock_bh(&queue_lock);
-
 	skb = alloc_skb(size, GFP_ATOMIC);
 	if (!skb)
 		goto nlmsg_failure;
@@ -242,7 +235,7 @@
 	if (nskb == NULL)
 		return status;
 
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 
 	if (!peer_pid)
 		goto err_out_free_nskb;
@@ -266,14 +259,14 @@
 
 	__ipq_enqueue_entry(entry);
 
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return status;
 
 err_out_free_nskb:
 	kfree_skb(nskb);
 
 err_out_unlock:
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return status;
 }
 
@@ -342,9 +335,9 @@
 {
 	int status;
 
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 	status = __ipq_set_mode(mode, range);
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return status;
 }
 
@@ -440,11 +433,11 @@
 	if (security_netlink_recv(skb, CAP_NET_ADMIN))
 		RCV_SKB_FAIL(-EPERM);
 
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 
 	if (peer_pid) {
 		if (peer_pid != pid) {
-			write_unlock_bh(&queue_lock);
+			spin_unlock_bh(&queue_lock);
 			RCV_SKB_FAIL(-EBUSY);
 		}
 	} else {
@@ -452,7 +445,7 @@
 		peer_pid = pid;
 	}
 
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 
 	status = ipq_receive_peer(NLMSG_DATA(nlh), type,
 				  nlmsglen - NLMSG_LENGTH(0));
@@ -497,10 +490,10 @@
 	struct netlink_notify *n = ptr;
 
 	if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
-		write_lock_bh(&queue_lock);
+		spin_lock_bh(&queue_lock);
 		if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
 			__ipq_reset();
-		write_unlock_bh(&queue_lock);
+		spin_unlock_bh(&queue_lock);
 	}
 	return NOTIFY_DONE;
 }
@@ -527,7 +520,7 @@
 #ifdef CONFIG_PROC_FS
 static int ip_queue_show(struct seq_file *m, void *v)
 {
-	read_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 
 	seq_printf(m,
 		      "Peer PID          : %d\n"
@@ -545,7 +538,7 @@
 		      queue_dropped,
 		      queue_user_dropped);
 
-	read_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return 0;
 }
 
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 4b6c5ca..b38c118 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -928,7 +928,7 @@
 	   (other than comefrom, which userspace doesn't care
 	   about). */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc_node(countersize, numa_node_id());
+	counters = vmalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1352,7 +1352,7 @@
 	if (len != size + num_counters * sizeof(struct xt_counters))
 		return -EINVAL;
 
-	paddc = vmalloc_node(len - size, numa_node_id());
+	paddc = vmalloc(len - size);
 	if (!paddc)
 		return -ENOMEM;
 
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index f91c94b..64d0875 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -53,12 +53,13 @@
 #endif
 	enum clusterip_hashmode hash_mode;	/* which hashing mode */
 	u_int32_t hash_initval;			/* hash initialization */
+	struct rcu_head rcu;
 };
 
 static LIST_HEAD(clusterip_configs);
 
 /* clusterip_lock protects the clusterip_configs list */
-static DEFINE_RWLOCK(clusterip_lock);
+static DEFINE_SPINLOCK(clusterip_lock);
 
 #ifdef CONFIG_PROC_FS
 static const struct file_operations clusterip_proc_fops;
@@ -71,11 +72,17 @@
 	atomic_inc(&c->refcount);
 }
 
+
+static void clusterip_config_rcu_free(struct rcu_head *head)
+{
+	kfree(container_of(head, struct clusterip_config, rcu));
+}
+
 static inline void
 clusterip_config_put(struct clusterip_config *c)
 {
 	if (atomic_dec_and_test(&c->refcount))
-		kfree(c);
+		call_rcu_bh(&c->rcu, clusterip_config_rcu_free);
 }
 
 /* decrease the count of entries using/referencing this config.  If last
@@ -84,10 +91,11 @@
 static inline void
 clusterip_config_entry_put(struct clusterip_config *c)
 {
-	write_lock_bh(&clusterip_lock);
-	if (atomic_dec_and_test(&c->entries)) {
-		list_del(&c->list);
-		write_unlock_bh(&clusterip_lock);
+	local_bh_disable();
+	if (atomic_dec_and_lock(&c->entries, &clusterip_lock)) {
+		list_del_rcu(&c->list);
+		spin_unlock(&clusterip_lock);
+		local_bh_enable();
 
 		dev_mc_del(c->dev, c->clustermac);
 		dev_put(c->dev);
@@ -100,7 +108,7 @@
 #endif
 		return;
 	}
-	write_unlock_bh(&clusterip_lock);
+	local_bh_enable();
 }
 
 static struct clusterip_config *
@@ -108,7 +116,7 @@
 {
 	struct clusterip_config *c;
 
-	list_for_each_entry(c, &clusterip_configs, list) {
+	list_for_each_entry_rcu(c, &clusterip_configs, list) {
 		if (c->clusterip == clusterip)
 			return c;
 	}
@@ -121,16 +129,15 @@
 {
 	struct clusterip_config *c;
 
-	read_lock_bh(&clusterip_lock);
+	rcu_read_lock_bh();
 	c = __clusterip_config_find(clusterip);
-	if (!c) {
-		read_unlock_bh(&clusterip_lock);
-		return NULL;
+	if (c) {
+		if (unlikely(!atomic_inc_not_zero(&c->refcount)))
+			c = NULL;
+		else if (entry)
+			atomic_inc(&c->entries);
 	}
-	atomic_inc(&c->refcount);
-	if (entry)
-		atomic_inc(&c->entries);
-	read_unlock_bh(&clusterip_lock);
+	rcu_read_unlock_bh();
 
 	return c;
 }
@@ -181,9 +188,9 @@
 	}
 #endif
 
-	write_lock_bh(&clusterip_lock);
-	list_add(&c->list, &clusterip_configs);
-	write_unlock_bh(&clusterip_lock);
+	spin_lock_bh(&clusterip_lock);
+	list_add_rcu(&c->list, &clusterip_configs);
+	spin_unlock_bh(&clusterip_lock);
 
 	return c;
 }
@@ -733,6 +740,9 @@
 #endif
 	nf_unregister_hook(&cip_arp_ops);
 	xt_unregister_target(&clusterip_tg_reg);
+
+	/* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */
+	rcu_barrier_bh();
 }
 
 module_init(clusterip_tg_init);
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 5234f4f..915fc17 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/skbuff.h>
+#include <linux/if_arp.h>
 #include <linux/ip.h>
 #include <net/icmp.h>
 #include <net/udp.h>
@@ -363,6 +364,42 @@
 	/* maxlen = 230+   91  + 230 + 252 = 803 */
 }
 
+static void dump_mac_header(const struct nf_loginfo *info,
+			    const struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+	unsigned int logflags = 0;
+
+	if (info->type == NF_LOG_TYPE_LOG)
+		logflags = info->u.log.logflags;
+
+	if (!(logflags & IPT_LOG_MACDECODE))
+		goto fallback;
+
+	switch (dev->type) {
+	case ARPHRD_ETHER:
+		printk("MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+		       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+		       ntohs(eth_hdr(skb)->h_proto));
+		return;
+	default:
+		break;
+	}
+
+fallback:
+	printk("MAC=");
+	if (dev->hard_header_len &&
+	    skb->mac_header != skb->network_header) {
+		const unsigned char *p = skb_mac_header(skb);
+		unsigned int i;
+
+		printk("%02x", *p++);
+		for (i = 1; i < dev->hard_header_len; i++, p++)
+			printk(":%02x", *p);
+	}
+	printk(" ");
+}
+
 static struct nf_loginfo default_loginfo = {
 	.type	= NF_LOG_TYPE_LOG,
 	.u = {
@@ -404,20 +441,9 @@
 	}
 #endif
 
-	if (in && !out) {
-		/* MAC logging for input chain only. */
-		printk("MAC=");
-		if (skb->dev && skb->dev->hard_header_len &&
-		    skb->mac_header != skb->network_header) {
-			int i;
-			const unsigned char *p = skb_mac_header(skb);
-			for (i = 0; i < skb->dev->hard_header_len; i++,p++)
-				printk("%02x%c", *p,
-				       i==skb->dev->hard_header_len - 1
-				       ? ' ':':');
-		} else
-			printk(" ");
-	}
+	/* MAC logging for input path only. */
+	if (in && !out)
+		dump_mac_header(loginfo, skb);
 
 	dump_packet(loginfo, skb, 0);
 	printk("\n");
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index f43867d..6cdb298 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -48,7 +48,8 @@
 
 	NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
 		     par->hooknum == NF_INET_POST_ROUTING ||
-		     par->hooknum == NF_INET_LOCAL_OUT);
+		     par->hooknum == NF_INET_LOCAL_OUT ||
+		     par->hooknum == NF_INET_LOCAL_IN);
 	ct = nf_ct_get(skb, &ctinfo);
 
 	netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
@@ -77,7 +78,8 @@
 	.table		= "nat",
 	.hooks		= (1 << NF_INET_PRE_ROUTING) |
 			  (1 << NF_INET_POST_ROUTING) |
-			  (1 << NF_INET_LOCAL_OUT),
+			  (1 << NF_INET_LOCAL_OUT) |
+			  (1 << NF_INET_LOCAL_IN),
 	.checkentry 	= netmap_tg_check,
 	.me 		= THIS_MODULE
 };
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index cb763ae..eab8de3 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,6 +66,11 @@
 					  const struct net_device *out,
 					  int (*okfn)(struct sk_buff *))
 {
+	struct inet_sock *inet = inet_sk(skb->sk);
+
+	if (inet && inet->nodefrag)
+		return NF_ACCEPT;
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
 	/* Previously seen (loopback)?  Ignore.  Do this before
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 4f8bddb..c7719b2 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -742,7 +742,7 @@
 	spin_unlock_bh(&nf_nat_lock);
 
 	/* Initialize fake conntrack so that NAT will skip it */
-	nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
+	nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
 
 	l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
 
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 98ed782..ebbd319 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -28,7 +28,8 @@
 
 #define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
 			 (1 << NF_INET_POST_ROUTING) | \
-			 (1 << NF_INET_LOCAL_OUT))
+			 (1 << NF_INET_LOCAL_OUT) | \
+			 (1 << NF_INET_LOCAL_IN))
 
 static const struct xt_table nat_table = {
 	.name		= "nat",
@@ -45,7 +46,8 @@
 	enum ip_conntrack_info ctinfo;
 	const struct nf_nat_multi_range_compat *mr = par->targinfo;
 
-	NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING);
+	NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING ||
+		     par->hooknum == NF_INET_LOCAL_IN);
 
 	ct = nf_ct_get(skb, &ctinfo);
 
@@ -99,7 +101,7 @@
 	return 0;
 }
 
-unsigned int
+static unsigned int
 alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
 {
 	/* Force range to this IP; let proto decide mapping for
@@ -141,7 +143,7 @@
 	.target		= ipt_snat_target,
 	.targetsize	= sizeof(struct nf_nat_multi_range_compat),
 	.table		= "nat",
-	.hooks		= 1 << NF_INET_POST_ROUTING,
+	.hooks		= (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN),
 	.checkentry	= ipt_snat_checkentry,
 	.family		= AF_INET,
 };
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index beb2581..95481fe 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -98,7 +98,7 @@
 		return NF_ACCEPT;
 
 	/* Don't try to NAT if this packet is not conntracked */
-	if (ct == &nf_conntrack_untracked)
+	if (nf_ct_is_untracked(ct))
 		return NF_ACCEPT;
 
 	nat = nfct_nat(ct);
@@ -131,13 +131,7 @@
 		if (!nf_nat_initialized(ct, maniptype)) {
 			unsigned int ret;
 
-			if (hooknum == NF_INET_LOCAL_IN)
-				/* LOCAL_IN hook doesn't have a chain!  */
-				ret = alloc_null_binding(ct, hooknum);
-			else
-				ret = nf_nat_rule_find(skb, hooknum, in, out,
-						       ct);
-
+			ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
 			if (ret != NF_ACCEPT)
 				return ret;
 		} else
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 3dc9914..4ae1f20 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -252,6 +252,7 @@
 	SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
 	SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
 	SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
+	SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
 	SNMP_MIB_SENTINEL
 };
 
@@ -342,10 +343,12 @@
 		   IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2,
 		   sysctl_ip_default_ttl);
 
+	BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
 	for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
-		seq_printf(seq, " %lu",
-			   snmp_fold_field((void __percpu **)net->mib.ip_statistics,
-					   snmp4_ipstats_list[i].entry));
+		seq_printf(seq, " %llu",
+			   snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
+					     snmp4_ipstats_list[i].entry,
+					     offsetof(struct ipstats_mib, syncp)));
 
 	icmp_put(seq);	/* RFC 2011 compatibility */
 	icmpmsg_put(seq);
@@ -431,9 +434,10 @@
 
 	seq_puts(seq, "\nIpExt:");
 	for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
-		seq_printf(seq, " %lu",
-			   snmp_fold_field((void __percpu **)net->mib.ip_statistics,
-					   snmp4_ipextstats_list[i].entry));
+		seq_printf(seq, " %llu",
+			   snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
+					     snmp4_ipextstats_list[i].entry,
+					     offsetof(struct ipstats_mib, syncp)));
 
 	seq_putc(seq, '\n');
 	return 0;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 2c7a163..009a7b2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -314,7 +314,7 @@
 }
 
 static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
-			struct rtable *rt,
+			struct rtable **rtp,
 			unsigned int flags)
 {
 	struct inet_sock *inet = inet_sk(sk);
@@ -323,25 +323,27 @@
 	struct sk_buff *skb;
 	unsigned int iphlen;
 	int err;
+	struct rtable *rt = *rtp;
 
-	if (length > rt->u.dst.dev->mtu) {
+	if (length > rt->dst.dev->mtu) {
 		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
-			       rt->u.dst.dev->mtu);
+			       rt->dst.dev->mtu);
 		return -EMSGSIZE;
 	}
 	if (flags&MSG_PROBE)
 		goto out;
 
 	skb = sock_alloc_send_skb(sk,
-				  length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
+				  length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
 				  flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto error;
-	skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));
+	skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
 
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
-	skb_dst_set(skb, dst_clone(&rt->u.dst));
+	skb_dst_set(skb, &rt->dst);
+	*rtp = NULL;
 
 	skb_reset_network_header(skb);
 	iph = ip_hdr(skb);
@@ -373,7 +375,7 @@
 		iph->check   = 0;
 		iph->tot_len = htons(length);
 		if (!iph->id)
-			ip_select_ident(iph, &rt->u.dst, NULL);
+			ip_select_ident(iph, &rt->dst, NULL);
 
 		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
 	}
@@ -382,7 +384,7 @@
 			skb_transport_header(skb))->type);
 
 	err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
-		      rt->u.dst.dev, dst_output);
+		      rt->dst.dev, dst_output);
 	if (err > 0)
 		err = net_xmit_errno(err);
 	if (err)
@@ -576,7 +578,7 @@
 
 	if (inet->hdrincl)
 		err = raw_send_hdrinc(sk, msg->msg_iov, len,
-					rt, msg->msg_flags);
+					&rt, msg->msg_flags);
 
 	 else {
 		if (!ipc.addr)
@@ -604,7 +606,7 @@
 	return len;
 
 do_confirm:
-	dst_confirm(&rt->u.dst);
+	dst_confirm(&rt->dst);
 	if (!(msg->msg_flags & MSG_PROBE) || len)
 		goto back_from_confirm;
 	err = 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 560acc6..03430de 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -253,8 +253,7 @@
 static unsigned int		rt_hash_log  __read_mostly;
 
 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
-#define RT_CACHE_STAT_INC(field) \
-	(__raw_get_cpu_var(rt_cache_stat).field++)
+#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
 
 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
 				   int genid)
@@ -287,10 +286,10 @@
 		rcu_read_lock_bh();
 		r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
 		while (r) {
-			if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
+			if (dev_net(r->dst.dev) == seq_file_net(seq) &&
 			    r->rt_genid == st->genid)
 				return r;
-			r = rcu_dereference_bh(r->u.dst.rt_next);
+			r = rcu_dereference_bh(r->dst.rt_next);
 		}
 		rcu_read_unlock_bh();
 	}
@@ -302,7 +301,7 @@
 {
 	struct rt_cache_iter_state *st = seq->private;
 
-	r = r->u.dst.rt_next;
+	r = r->dst.rt_next;
 	while (!r) {
 		rcu_read_unlock_bh();
 		do {
@@ -320,7 +319,7 @@
 {
 	struct rt_cache_iter_state *st = seq->private;
 	while ((r = __rt_cache_get_next(seq, r)) != NULL) {
-		if (dev_net(r->u.dst.dev) != seq_file_net(seq))
+		if (dev_net(r->dst.dev) != seq_file_net(seq))
 			continue;
 		if (r->rt_genid == st->genid)
 			break;
@@ -378,19 +377,19 @@
 
 		seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
 			      "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
-			r->u.dst.dev ? r->u.dst.dev->name : "*",
+			r->dst.dev ? r->dst.dev->name : "*",
 			(__force u32)r->rt_dst,
 			(__force u32)r->rt_gateway,
-			r->rt_flags, atomic_read(&r->u.dst.__refcnt),
-			r->u.dst.__use, 0, (__force u32)r->rt_src,
-			(dst_metric(&r->u.dst, RTAX_ADVMSS) ?
-			     (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
-			dst_metric(&r->u.dst, RTAX_WINDOW),
-			(int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
-			      dst_metric(&r->u.dst, RTAX_RTTVAR)),
+			r->rt_flags, atomic_read(&r->dst.__refcnt),
+			r->dst.__use, 0, (__force u32)r->rt_src,
+			(dst_metric(&r->dst, RTAX_ADVMSS) ?
+			     (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
+			dst_metric(&r->dst, RTAX_WINDOW),
+			(int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
+			      dst_metric(&r->dst, RTAX_RTTVAR)),
 			r->fl.fl4_tos,
-			r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
-			r->u.dst.hh ? (r->u.dst.hh->hh_output ==
+			r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
+			r->dst.hh ? (r->dst.hh->hh_output ==
 				       dev_queue_xmit) : 0,
 			r->rt_spec_dst, &len);
 
@@ -609,13 +608,13 @@
 
 static inline void rt_free(struct rtable *rt)
 {
-	call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
 }
 
 static inline void rt_drop(struct rtable *rt)
 {
 	ip_rt_put(rt);
-	call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
 }
 
 static inline int rt_fast_clean(struct rtable *rth)
@@ -623,13 +622,13 @@
 	/* Kill broadcast/multicast entries very aggresively, if they
 	   collide in hash table with more useful entries */
 	return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
-		rth->fl.iif && rth->u.dst.rt_next;
+		rth->fl.iif && rth->dst.rt_next;
 }
 
 static inline int rt_valuable(struct rtable *rth)
 {
 	return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
-		rth->u.dst.expires;
+		rth->dst.expires;
 }
 
 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -637,15 +636,15 @@
 	unsigned long age;
 	int ret = 0;
 
-	if (atomic_read(&rth->u.dst.__refcnt))
+	if (atomic_read(&rth->dst.__refcnt))
 		goto out;
 
 	ret = 1;
-	if (rth->u.dst.expires &&
-	    time_after_eq(jiffies, rth->u.dst.expires))
+	if (rth->dst.expires &&
+	    time_after_eq(jiffies, rth->dst.expires))
 		goto out;
 
-	age = jiffies - rth->u.dst.lastuse;
+	age = jiffies - rth->dst.lastuse;
 	ret = 0;
 	if ((age <= tmo1 && !rt_fast_clean(rth)) ||
 	    (age <= tmo2 && rt_valuable(rth)))
@@ -661,7 +660,7 @@
  */
 static inline u32 rt_score(struct rtable *rt)
 {
-	u32 score = jiffies - rt->u.dst.lastuse;
+	u32 score = jiffies - rt->dst.lastuse;
 
 	score = ~score & ~(3<<30);
 
@@ -701,12 +700,12 @@
 
 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
 {
-	return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev));
+	return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
 }
 
 static inline int rt_is_expired(struct rtable *rth)
 {
-	return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
+	return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
 }
 
 /*
@@ -735,7 +734,7 @@
 		rth = rt_hash_table[i].chain;
 
 		/* defer releasing the head of the list after spin_unlock */
-		for (tail = rth; tail; tail = tail->u.dst.rt_next)
+		for (tail = rth; tail; tail = tail->dst.rt_next)
 			if (!rt_is_expired(tail))
 				break;
 		if (rth != tail)
@@ -744,9 +743,9 @@
 		/* call rt_free on entries after the tail requiring flush */
 		prev = &rt_hash_table[i].chain;
 		for (p = *prev; p; p = next) {
-			next = p->u.dst.rt_next;
+			next = p->dst.rt_next;
 			if (!rt_is_expired(p)) {
-				prev = &p->u.dst.rt_next;
+				prev = &p->dst.rt_next;
 			} else {
 				*prev = next;
 				rt_free(p);
@@ -761,7 +760,7 @@
 		spin_unlock_bh(rt_hash_lock_addr(i));
 
 		for (; rth != tail; rth = next) {
-			next = rth->u.dst.rt_next;
+			next = rth->dst.rt_next;
 			rt_free(rth);
 		}
 	}
@@ -792,7 +791,7 @@
 	while (aux != rth) {
 		if (compare_hash_inputs(&aux->fl, &rth->fl))
 			return 0;
-		aux = aux->u.dst.rt_next;
+		aux = aux->dst.rt_next;
 	}
 	return ONE;
 }
@@ -832,18 +831,18 @@
 		length = 0;
 		spin_lock_bh(rt_hash_lock_addr(i));
 		while ((rth = *rthp) != NULL) {
-			prefetch(rth->u.dst.rt_next);
+			prefetch(rth->dst.rt_next);
 			if (rt_is_expired(rth)) {
-				*rthp = rth->u.dst.rt_next;
+				*rthp = rth->dst.rt_next;
 				rt_free(rth);
 				continue;
 			}
-			if (rth->u.dst.expires) {
+			if (rth->dst.expires) {
 				/* Entry is expired even if it is in use */
-				if (time_before_eq(jiffies, rth->u.dst.expires)) {
+				if (time_before_eq(jiffies, rth->dst.expires)) {
 nofree:
 					tmo >>= 1;
-					rthp = &rth->u.dst.rt_next;
+					rthp = &rth->dst.rt_next;
 					/*
 					 * We only count entries on
 					 * a chain with equal hash inputs once
@@ -859,7 +858,7 @@
 				goto nofree;
 
 			/* Cleanup aged off entries. */
-			*rthp = rth->u.dst.rt_next;
+			*rthp = rth->dst.rt_next;
 			rt_free(rth);
 		}
 		spin_unlock_bh(rt_hash_lock_addr(i));
@@ -1000,10 +999,10 @@
 				if (!rt_is_expired(rth) &&
 					!rt_may_expire(rth, tmo, expire)) {
 					tmo >>= 1;
-					rthp = &rth->u.dst.rt_next;
+					rthp = &rth->dst.rt_next;
 					continue;
 				}
-				*rthp = rth->u.dst.rt_next;
+				*rthp = rth->dst.rt_next;
 				rt_free(rth);
 				goal--;
 			}
@@ -1069,7 +1068,7 @@
 
 	while (rth) {
 		length += has_noalias(head, rth);
-		rth = rth->u.dst.rt_next;
+		rth = rth->dst.rt_next;
 	}
 	return length >> FRACT_BITS;
 }
@@ -1091,7 +1090,7 @@
 	candp = NULL;
 	now = jiffies;
 
-	if (!rt_caching(dev_net(rt->u.dst.dev))) {
+	if (!rt_caching(dev_net(rt->dst.dev))) {
 		/*
 		 * If we're not caching, just tell the caller we
 		 * were successful and don't touch the route.  The
@@ -1109,7 +1108,7 @@
 		 */
 
 		if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
-			int err = arp_bind_neighbour(&rt->u.dst);
+			int err = arp_bind_neighbour(&rt->dst);
 			if (err) {
 				if (net_ratelimit())
 					printk(KERN_WARNING
@@ -1128,19 +1127,19 @@
 	spin_lock_bh(rt_hash_lock_addr(hash));
 	while ((rth = *rthp) != NULL) {
 		if (rt_is_expired(rth)) {
-			*rthp = rth->u.dst.rt_next;
+			*rthp = rth->dst.rt_next;
 			rt_free(rth);
 			continue;
 		}
 		if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
 			/* Put it first */
-			*rthp = rth->u.dst.rt_next;
+			*rthp = rth->dst.rt_next;
 			/*
 			 * Since lookup is lockfree, the deletion
 			 * must be visible to another weakly ordered CPU before
 			 * the insertion at the start of the hash chain.
 			 */
-			rcu_assign_pointer(rth->u.dst.rt_next,
+			rcu_assign_pointer(rth->dst.rt_next,
 					   rt_hash_table[hash].chain);
 			/*
 			 * Since lookup is lockfree, the update writes
@@ -1148,18 +1147,18 @@
 			 */
 			rcu_assign_pointer(rt_hash_table[hash].chain, rth);
 
-			dst_use(&rth->u.dst, now);
+			dst_use(&rth->dst, now);
 			spin_unlock_bh(rt_hash_lock_addr(hash));
 
 			rt_drop(rt);
 			if (rp)
 				*rp = rth;
 			else
-				skb_dst_set(skb, &rth->u.dst);
+				skb_dst_set(skb, &rth->dst);
 			return 0;
 		}
 
-		if (!atomic_read(&rth->u.dst.__refcnt)) {
+		if (!atomic_read(&rth->dst.__refcnt)) {
 			u32 score = rt_score(rth);
 
 			if (score <= min_score) {
@@ -1171,7 +1170,7 @@
 
 		chain_length++;
 
-		rthp = &rth->u.dst.rt_next;
+		rthp = &rth->dst.rt_next;
 	}
 
 	if (cand) {
@@ -1182,17 +1181,17 @@
 		 * only 2 entries per bucket. We will see.
 		 */
 		if (chain_length > ip_rt_gc_elasticity) {
-			*candp = cand->u.dst.rt_next;
+			*candp = cand->dst.rt_next;
 			rt_free(cand);
 		}
 	} else {
 		if (chain_length > rt_chain_length_max &&
 		    slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
-			struct net *net = dev_net(rt->u.dst.dev);
+			struct net *net = dev_net(rt->dst.dev);
 			int num = ++net->ipv4.current_rt_cache_rebuild_count;
 			if (!rt_caching(net)) {
 				printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
-					rt->u.dst.dev->name, num);
+					rt->dst.dev->name, num);
 			}
 			rt_emergency_hash_rebuild(net);
 			spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1207,7 +1206,7 @@
 	   route or unicast forwarding path.
 	 */
 	if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
-		int err = arp_bind_neighbour(&rt->u.dst);
+		int err = arp_bind_neighbour(&rt->dst);
 		if (err) {
 			spin_unlock_bh(rt_hash_lock_addr(hash));
 
@@ -1238,14 +1237,14 @@
 		}
 	}
 
-	rt->u.dst.rt_next = rt_hash_table[hash].chain;
+	rt->dst.rt_next = rt_hash_table[hash].chain;
 
 #if RT_CACHE_DEBUG >= 2
-	if (rt->u.dst.rt_next) {
+	if (rt->dst.rt_next) {
 		struct rtable *trt;
 		printk(KERN_DEBUG "rt_cache @%02x: %pI4",
 		       hash, &rt->rt_dst);
-		for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
+		for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
 			printk(" . %pI4", &trt->rt_dst);
 		printk("\n");
 	}
@@ -1263,7 +1262,7 @@
 	if (rp)
 		*rp = rt;
 	else
-		skb_dst_set(skb, &rt->u.dst);
+		skb_dst_set(skb, &rt->dst);
 	return 0;
 }
 
@@ -1335,20 +1334,21 @@
 	ip_rt_put(rt);
 	while ((aux = *rthp) != NULL) {
 		if (aux == rt || rt_is_expired(aux)) {
-			*rthp = aux->u.dst.rt_next;
+			*rthp = aux->dst.rt_next;
 			rt_free(aux);
 			continue;
 		}
-		rthp = &aux->u.dst.rt_next;
+		rthp = &aux->dst.rt_next;
 	}
 	spin_unlock_bh(rt_hash_lock_addr(hash));
 }
 
+/* called in rcu_read_lock() section */
 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
 		    __be32 saddr, struct net_device *dev)
 {
 	int i, k;
-	struct in_device *in_dev = in_dev_get(dev);
+	struct in_device *in_dev = __in_dev_get_rcu(dev);
 	struct rtable *rth, **rthp;
 	__be32  skeys[2] = { saddr, 0 };
 	int  ikeys[2] = { dev->ifindex, 0 };
@@ -1384,7 +1384,6 @@
 
 			rthp=&rt_hash_table[hash].chain;
 
-			rcu_read_lock();
 			while ((rth = rcu_dereference(*rthp)) != NULL) {
 				struct rtable *rt;
 
@@ -1393,44 +1392,42 @@
 				    rth->fl.oif != ikeys[k] ||
 				    rth->fl.iif != 0 ||
 				    rt_is_expired(rth) ||
-				    !net_eq(dev_net(rth->u.dst.dev), net)) {
-					rthp = &rth->u.dst.rt_next;
+				    !net_eq(dev_net(rth->dst.dev), net)) {
+					rthp = &rth->dst.rt_next;
 					continue;
 				}
 
 				if (rth->rt_dst != daddr ||
 				    rth->rt_src != saddr ||
-				    rth->u.dst.error ||
+				    rth->dst.error ||
 				    rth->rt_gateway != old_gw ||
-				    rth->u.dst.dev != dev)
+				    rth->dst.dev != dev)
 					break;
 
-				dst_hold(&rth->u.dst);
-				rcu_read_unlock();
+				dst_hold(&rth->dst);
 
 				rt = dst_alloc(&ipv4_dst_ops);
 				if (rt == NULL) {
 					ip_rt_put(rth);
-					in_dev_put(in_dev);
 					return;
 				}
 
 				/* Copy all the information. */
 				*rt = *rth;
-				rt->u.dst.__use		= 1;
-				atomic_set(&rt->u.dst.__refcnt, 1);
-				rt->u.dst.child		= NULL;
-				if (rt->u.dst.dev)
-					dev_hold(rt->u.dst.dev);
+				rt->dst.__use		= 1;
+				atomic_set(&rt->dst.__refcnt, 1);
+				rt->dst.child		= NULL;
+				if (rt->dst.dev)
+					dev_hold(rt->dst.dev);
 				if (rt->idev)
 					in_dev_hold(rt->idev);
-				rt->u.dst.obsolete	= -1;
-				rt->u.dst.lastuse	= jiffies;
-				rt->u.dst.path		= &rt->u.dst;
-				rt->u.dst.neighbour	= NULL;
-				rt->u.dst.hh		= NULL;
+				rt->dst.obsolete	= -1;
+				rt->dst.lastuse	= jiffies;
+				rt->dst.path		= &rt->dst;
+				rt->dst.neighbour	= NULL;
+				rt->dst.hh		= NULL;
 #ifdef CONFIG_XFRM
-				rt->u.dst.xfrm		= NULL;
+				rt->dst.xfrm		= NULL;
 #endif
 				rt->rt_genid		= rt_genid(net);
 				rt->rt_flags		|= RTCF_REDIRECTED;
@@ -1439,23 +1436,23 @@
 				rt->rt_gateway		= new_gw;
 
 				/* Redirect received -> path was valid */
-				dst_confirm(&rth->u.dst);
+				dst_confirm(&rth->dst);
 
 				if (rt->peer)
 					atomic_inc(&rt->peer->refcnt);
 
-				if (arp_bind_neighbour(&rt->u.dst) ||
-				    !(rt->u.dst.neighbour->nud_state &
+				if (arp_bind_neighbour(&rt->dst) ||
+				    !(rt->dst.neighbour->nud_state &
 					    NUD_VALID)) {
-					if (rt->u.dst.neighbour)
-						neigh_event_send(rt->u.dst.neighbour, NULL);
+					if (rt->dst.neighbour)
+						neigh_event_send(rt->dst.neighbour, NULL);
 					ip_rt_put(rth);
 					rt_drop(rt);
 					goto do_next;
 				}
 
-				netevent.old = &rth->u.dst;
-				netevent.new = &rt->u.dst;
+				netevent.old = &rth->dst;
+				netevent.new = &rt->dst;
 				call_netevent_notifiers(NETEVENT_REDIRECT,
 							&netevent);
 
@@ -1464,12 +1461,10 @@
 					ip_rt_put(rt);
 				goto do_next;
 			}
-			rcu_read_unlock();
 		do_next:
 			;
 		}
 	}
-	in_dev_put(in_dev);
 	return;
 
 reject_redirect:
@@ -1480,7 +1475,7 @@
 		       &old_gw, dev->name, &new_gw,
 		       &saddr, &daddr);
 #endif
-	in_dev_put(in_dev);
+	;
 }
 
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
@@ -1493,8 +1488,8 @@
 			ip_rt_put(rt);
 			ret = NULL;
 		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
-			   (rt->u.dst.expires &&
-			    time_after_eq(jiffies, rt->u.dst.expires))) {
+			   (rt->dst.expires &&
+			    time_after_eq(jiffies, rt->dst.expires))) {
 			unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
 						rt->fl.oif,
 						rt_genid(dev_net(dst->dev)));
@@ -1532,7 +1527,7 @@
 	int log_martians;
 
 	rcu_read_lock();
-	in_dev = __in_dev_get_rcu(rt->u.dst.dev);
+	in_dev = __in_dev_get_rcu(rt->dst.dev);
 	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
 		rcu_read_unlock();
 		return;
@@ -1543,30 +1538,30 @@
 	/* No redirected packets during ip_rt_redirect_silence;
 	 * reset the algorithm.
 	 */
-	if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
-		rt->u.dst.rate_tokens = 0;
+	if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
+		rt->dst.rate_tokens = 0;
 
 	/* Too many ignored redirects; do not send anything
-	 * set u.dst.rate_last to the last seen redirected packet.
+	 * set dst.rate_last to the last seen redirected packet.
 	 */
-	if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
-		rt->u.dst.rate_last = jiffies;
+	if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
+		rt->dst.rate_last = jiffies;
 		return;
 	}
 
 	/* Check for load limit; set rate_last to the latest sent
 	 * redirect.
 	 */
-	if (rt->u.dst.rate_tokens == 0 ||
+	if (rt->dst.rate_tokens == 0 ||
 	    time_after(jiffies,
-		       (rt->u.dst.rate_last +
-			(ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
+		       (rt->dst.rate_last +
+			(ip_rt_redirect_load << rt->dst.rate_tokens)))) {
 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
-		rt->u.dst.rate_last = jiffies;
-		++rt->u.dst.rate_tokens;
+		rt->dst.rate_last = jiffies;
+		++rt->dst.rate_tokens;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
 		if (log_martians &&
-		    rt->u.dst.rate_tokens == ip_rt_redirect_number &&
+		    rt->dst.rate_tokens == ip_rt_redirect_number &&
 		    net_ratelimit())
 			printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
 				&rt->rt_src, rt->rt_iif,
@@ -1581,7 +1576,7 @@
 	unsigned long now;
 	int code;
 
-	switch (rt->u.dst.error) {
+	switch (rt->dst.error) {
 		case EINVAL:
 		default:
 			goto out;
@@ -1590,7 +1585,7 @@
 			break;
 		case ENETUNREACH:
 			code = ICMP_NET_UNREACH;
-			IP_INC_STATS_BH(dev_net(rt->u.dst.dev),
+			IP_INC_STATS_BH(dev_net(rt->dst.dev),
 					IPSTATS_MIB_INNOROUTES);
 			break;
 		case EACCES:
@@ -1599,12 +1594,12 @@
 	}
 
 	now = jiffies;
-	rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
-	if (rt->u.dst.rate_tokens > ip_rt_error_burst)
-		rt->u.dst.rate_tokens = ip_rt_error_burst;
-	rt->u.dst.rate_last = now;
-	if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
-		rt->u.dst.rate_tokens -= ip_rt_error_cost;
+	rt->dst.rate_tokens += now - rt->dst.rate_last;
+	if (rt->dst.rate_tokens > ip_rt_error_burst)
+		rt->dst.rate_tokens = ip_rt_error_burst;
+	rt->dst.rate_last = now;
+	if (rt->dst.rate_tokens >= ip_rt_error_cost) {
+		rt->dst.rate_tokens -= ip_rt_error_cost;
 		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
 	}
 
@@ -1649,7 +1644,7 @@
 
 			rcu_read_lock();
 			for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-			     rth = rcu_dereference(rth->u.dst.rt_next)) {
+			     rth = rcu_dereference(rth->dst.rt_next)) {
 				unsigned short mtu = new_mtu;
 
 				if (rth->fl.fl4_dst != daddr ||
@@ -1658,8 +1653,8 @@
 				    rth->rt_src != iph->saddr ||
 				    rth->fl.oif != ikeys[k] ||
 				    rth->fl.iif != 0 ||
-				    dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
-				    !net_eq(dev_net(rth->u.dst.dev), net) ||
+				    dst_metric_locked(&rth->dst, RTAX_MTU) ||
+				    !net_eq(dev_net(rth->dst.dev), net) ||
 				    rt_is_expired(rth))
 					continue;
 
@@ -1667,22 +1662,22 @@
 
 					/* BSD 4.2 compatibility hack :-( */
 					if (mtu == 0 &&
-					    old_mtu >= dst_mtu(&rth->u.dst) &&
+					    old_mtu >= dst_mtu(&rth->dst) &&
 					    old_mtu >= 68 + (iph->ihl << 2))
 						old_mtu -= iph->ihl << 2;
 
 					mtu = guess_mtu(old_mtu);
 				}
-				if (mtu <= dst_mtu(&rth->u.dst)) {
-					if (mtu < dst_mtu(&rth->u.dst)) {
-						dst_confirm(&rth->u.dst);
+				if (mtu <= dst_mtu(&rth->dst)) {
+					if (mtu < dst_mtu(&rth->dst)) {
+						dst_confirm(&rth->dst);
 						if (mtu < ip_rt_min_pmtu) {
 							mtu = ip_rt_min_pmtu;
-							rth->u.dst.metrics[RTAX_LOCK-1] |=
+							rth->dst.metrics[RTAX_LOCK-1] |=
 								(1 << RTAX_MTU);
 						}
-						rth->u.dst.metrics[RTAX_MTU-1] = mtu;
-						dst_set_expires(&rth->u.dst,
+						rth->dst.metrics[RTAX_MTU-1] = mtu;
+						dst_set_expires(&rth->dst,
 							ip_rt_mtu_expires);
 					}
 					est_mtu = mtu;
@@ -1755,7 +1750,7 @@
 
 	rt = skb_rtable(skb);
 	if (rt)
-		dst_set_expires(&rt->u.dst, 0);
+		dst_set_expires(&rt->dst, 0);
 }
 
 static int ip_rt_bug(struct sk_buff *skb)
@@ -1783,11 +1778,11 @@
 
 	if (rt->fl.iif == 0)
 		src = rt->rt_src;
-	else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) {
+	else if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) {
 		src = FIB_RES_PREFSRC(res);
 		fib_res_put(&res);
 	} else
-		src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
+		src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
 					RT_SCOPE_UNIVERSE);
 	memcpy(addr, &src, 4);
 }
@@ -1795,10 +1790,10 @@
 #ifdef CONFIG_NET_CLS_ROUTE
 static void set_class_tag(struct rtable *rt, u32 tag)
 {
-	if (!(rt->u.dst.tclassid & 0xFFFF))
-		rt->u.dst.tclassid |= tag & 0xFFFF;
-	if (!(rt->u.dst.tclassid & 0xFFFF0000))
-		rt->u.dst.tclassid |= tag & 0xFFFF0000;
+	if (!(rt->dst.tclassid & 0xFFFF))
+		rt->dst.tclassid |= tag & 0xFFFF;
+	if (!(rt->dst.tclassid & 0xFFFF0000))
+		rt->dst.tclassid |= tag & 0xFFFF0000;
 }
 #endif
 
@@ -1810,30 +1805,30 @@
 		if (FIB_RES_GW(*res) &&
 		    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
 			rt->rt_gateway = FIB_RES_GW(*res);
-		memcpy(rt->u.dst.metrics, fi->fib_metrics,
-		       sizeof(rt->u.dst.metrics));
+		memcpy(rt->dst.metrics, fi->fib_metrics,
+		       sizeof(rt->dst.metrics));
 		if (fi->fib_mtu == 0) {
-			rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
-			if (dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
+			rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
+			if (dst_metric_locked(&rt->dst, RTAX_MTU) &&
 			    rt->rt_gateway != rt->rt_dst &&
-			    rt->u.dst.dev->mtu > 576)
-				rt->u.dst.metrics[RTAX_MTU-1] = 576;
+			    rt->dst.dev->mtu > 576)
+				rt->dst.metrics[RTAX_MTU-1] = 576;
 		}
 #ifdef CONFIG_NET_CLS_ROUTE
-		rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
+		rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
 #endif
 	} else
-		rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
+		rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu;
 
-	if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
-		rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
-	if (dst_mtu(&rt->u.dst) > IP_MAX_MTU)
-		rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
-	if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
-		rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
+	if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
+		rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
+	if (dst_mtu(&rt->dst) > IP_MAX_MTU)
+		rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
+	if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0)
+		rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40,
 				       ip_rt_min_advmss);
-	if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40)
-		rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
+	if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40)
+		rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
 
 #ifdef CONFIG_NET_CLS_ROUTE
 #ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1844,14 +1839,16 @@
 	rt->rt_type = res->type;
 }
 
+/* called in rcu_read_lock() section */
 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 				u8 tos, struct net_device *dev, int our)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct rtable *rth;
 	__be32 spec_dst;
-	struct in_device *in_dev = in_dev_get(dev);
+	struct in_device *in_dev = __in_dev_get_rcu(dev);
 	u32 itag = 0;
+	int err;
 
 	/* Primary sanity checks. */
 
@@ -1866,21 +1863,23 @@
 		if (!ipv4_is_local_multicast(daddr))
 			goto e_inval;
 		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
-	} else if (fib_validate_source(saddr, 0, tos, 0,
-					dev, &spec_dst, &itag, 0) < 0)
-		goto e_inval;
-
+	} else {
+		err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
+					  &itag, 0);
+		if (err < 0)
+			goto e_err;
+	}
 	rth = dst_alloc(&ipv4_dst_ops);
 	if (!rth)
 		goto e_nobufs;
 
-	rth->u.dst.output = ip_rt_bug;
-	rth->u.dst.obsolete = -1;
+	rth->dst.output = ip_rt_bug;
+	rth->dst.obsolete = -1;
 
-	atomic_set(&rth->u.dst.__refcnt, 1);
-	rth->u.dst.flags= DST_HOST;
+	atomic_set(&rth->dst.__refcnt, 1);
+	rth->dst.flags= DST_HOST;
 	if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-		rth->u.dst.flags |= DST_NOPOLICY;
+		rth->dst.flags |= DST_NOPOLICY;
 	rth->fl.fl4_dst	= daddr;
 	rth->rt_dst	= daddr;
 	rth->fl.fl4_tos	= tos;
@@ -1888,13 +1887,13 @@
 	rth->fl.fl4_src	= saddr;
 	rth->rt_src	= saddr;
 #ifdef CONFIG_NET_CLS_ROUTE
-	rth->u.dst.tclassid = itag;
+	rth->dst.tclassid = itag;
 #endif
 	rth->rt_iif	=
 	rth->fl.iif	= dev->ifindex;
-	rth->u.dst.dev	= init_net.loopback_dev;
-	dev_hold(rth->u.dst.dev);
-	rth->idev	= in_dev_get(rth->u.dst.dev);
+	rth->dst.dev	= init_net.loopback_dev;
+	dev_hold(rth->dst.dev);
+	rth->idev	= in_dev_get(rth->dst.dev);
 	rth->fl.oif	= 0;
 	rth->rt_gateway	= daddr;
 	rth->rt_spec_dst= spec_dst;
@@ -1902,27 +1901,25 @@
 	rth->rt_flags	= RTCF_MULTICAST;
 	rth->rt_type	= RTN_MULTICAST;
 	if (our) {
-		rth->u.dst.input= ip_local_deliver;
+		rth->dst.input= ip_local_deliver;
 		rth->rt_flags |= RTCF_LOCAL;
 	}
 
 #ifdef CONFIG_IP_MROUTE
 	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
-		rth->u.dst.input = ip_mr_input;
+		rth->dst.input = ip_mr_input;
 #endif
 	RT_CACHE_STAT_INC(in_slow_mc);
 
-	in_dev_put(in_dev);
 	hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
 	return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
 
 e_nobufs:
-	in_dev_put(in_dev);
 	return -ENOBUFS;
-
 e_inval:
-	in_dev_put(in_dev);
 	return -EINVAL;
+e_err:
+	return err;
 }
 
 
@@ -1956,22 +1953,22 @@
 #endif
 }
 
+/* called in rcu_read_lock() section */
 static int __mkroute_input(struct sk_buff *skb,
 			   struct fib_result *res,
 			   struct in_device *in_dev,
 			   __be32 daddr, __be32 saddr, u32 tos,
 			   struct rtable **result)
 {
-
 	struct rtable *rth;
 	int err;
 	struct in_device *out_dev;
-	unsigned flags = 0;
+	unsigned int flags = 0;
 	__be32 spec_dst;
 	u32 itag;
 
 	/* get a working reference to the output device */
-	out_dev = in_dev_get(FIB_RES_DEV(*res));
+	out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
 	if (out_dev == NULL) {
 		if (net_ratelimit())
 			printk(KERN_CRIT "Bug in ip_route_input" \
@@ -1986,7 +1983,6 @@
 		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
 					 saddr);
 
-		err = -EINVAL;
 		goto cleanup;
 	}
 
@@ -2020,12 +2016,12 @@
 		goto cleanup;
 	}
 
-	atomic_set(&rth->u.dst.__refcnt, 1);
-	rth->u.dst.flags= DST_HOST;
+	atomic_set(&rth->dst.__refcnt, 1);
+	rth->dst.flags= DST_HOST;
 	if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-		rth->u.dst.flags |= DST_NOPOLICY;
+		rth->dst.flags |= DST_NOPOLICY;
 	if (IN_DEV_CONF_GET(out_dev, NOXFRM))
-		rth->u.dst.flags |= DST_NOXFRM;
+		rth->dst.flags |= DST_NOXFRM;
 	rth->fl.fl4_dst	= daddr;
 	rth->rt_dst	= daddr;
 	rth->fl.fl4_tos	= tos;
@@ -2035,16 +2031,16 @@
 	rth->rt_gateway	= daddr;
 	rth->rt_iif 	=
 		rth->fl.iif	= in_dev->dev->ifindex;
-	rth->u.dst.dev	= (out_dev)->dev;
-	dev_hold(rth->u.dst.dev);
-	rth->idev	= in_dev_get(rth->u.dst.dev);
+	rth->dst.dev	= (out_dev)->dev;
+	dev_hold(rth->dst.dev);
+	rth->idev	= in_dev_get(rth->dst.dev);
 	rth->fl.oif 	= 0;
 	rth->rt_spec_dst= spec_dst;
 
-	rth->u.dst.obsolete = -1;
-	rth->u.dst.input = ip_forward;
-	rth->u.dst.output = ip_output;
-	rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
+	rth->dst.obsolete = -1;
+	rth->dst.input = ip_forward;
+	rth->dst.output = ip_output;
+	rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
 
 	rt_set_nexthop(rth, res, itag);
 
@@ -2053,8 +2049,6 @@
 	*result = rth;
 	err = 0;
  cleanup:
-	/* release the working reference to the output device */
-	in_dev_put(out_dev);
 	return err;
 }
 
@@ -2080,7 +2074,7 @@
 
 	/* put it into the cache */
 	hash = rt_hash(daddr, saddr, fl->iif,
-		       rt_genid(dev_net(rth->u.dst.dev)));
+		       rt_genid(dev_net(rth->dst.dev)));
 	return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
 }
 
@@ -2098,7 +2092,7 @@
 			       u8 tos, struct net_device *dev)
 {
 	struct fib_result res;
-	struct in_device *in_dev = in_dev_get(dev);
+	struct in_device *in_dev = __in_dev_get_rcu(dev);
 	struct flowi fl = { .nl_u = { .ip4_u =
 				      { .daddr = daddr,
 					.saddr = saddr,
@@ -2158,13 +2152,12 @@
 		goto brd_input;
 
 	if (res.type == RTN_LOCAL) {
-		int result;
-		result = fib_validate_source(saddr, daddr, tos,
+		err = fib_validate_source(saddr, daddr, tos,
 					     net->loopback_dev->ifindex,
 					     dev, &spec_dst, &itag, skb->mark);
-		if (result < 0)
-			goto martian_source;
-		if (result)
+		if (err < 0)
+			goto martian_source_keep_err;
+		if (err)
 			flags |= RTCF_DIRECTSRC;
 		spec_dst = daddr;
 		goto local_input;
@@ -2177,7 +2170,6 @@
 
 	err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
 done:
-	in_dev_put(in_dev);
 	if (free_res)
 		fib_res_put(&res);
 out:	return err;
@@ -2192,7 +2184,7 @@
 		err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
 					  &itag, skb->mark);
 		if (err < 0)
-			goto martian_source;
+			goto martian_source_keep_err;
 		if (err)
 			flags |= RTCF_DIRECTSRC;
 	}
@@ -2205,14 +2197,14 @@
 	if (!rth)
 		goto e_nobufs;
 
-	rth->u.dst.output= ip_rt_bug;
-	rth->u.dst.obsolete = -1;
+	rth->dst.output= ip_rt_bug;
+	rth->dst.obsolete = -1;
 	rth->rt_genid = rt_genid(net);
 
-	atomic_set(&rth->u.dst.__refcnt, 1);
-	rth->u.dst.flags= DST_HOST;
+	atomic_set(&rth->dst.__refcnt, 1);
+	rth->dst.flags= DST_HOST;
 	if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-		rth->u.dst.flags |= DST_NOPOLICY;
+		rth->dst.flags |= DST_NOPOLICY;
 	rth->fl.fl4_dst	= daddr;
 	rth->rt_dst	= daddr;
 	rth->fl.fl4_tos	= tos;
@@ -2220,20 +2212,20 @@
 	rth->fl.fl4_src	= saddr;
 	rth->rt_src	= saddr;
 #ifdef CONFIG_NET_CLS_ROUTE
-	rth->u.dst.tclassid = itag;
+	rth->dst.tclassid = itag;
 #endif
 	rth->rt_iif	=
 	rth->fl.iif	= dev->ifindex;
-	rth->u.dst.dev	= net->loopback_dev;
-	dev_hold(rth->u.dst.dev);
-	rth->idev	= in_dev_get(rth->u.dst.dev);
+	rth->dst.dev	= net->loopback_dev;
+	dev_hold(rth->dst.dev);
+	rth->idev	= in_dev_get(rth->dst.dev);
 	rth->rt_gateway	= daddr;
 	rth->rt_spec_dst= spec_dst;
-	rth->u.dst.input= ip_local_deliver;
+	rth->dst.input= ip_local_deliver;
 	rth->rt_flags 	= flags|RTCF_LOCAL;
 	if (res.type == RTN_UNREACHABLE) {
-		rth->u.dst.input= ip_error;
-		rth->u.dst.error= -err;
+		rth->dst.input= ip_error;
+		rth->dst.error= -err;
 		rth->rt_flags 	&= ~RTCF_LOCAL;
 	}
 	rth->rt_type	= res.type;
@@ -2273,8 +2265,10 @@
 	goto done;
 
 martian_source:
+	err = -EINVAL;
+martian_source_keep_err:
 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
-	goto e_inval;
+	goto done;
 }
 
 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2284,32 +2278,34 @@
 	unsigned	hash;
 	int iif = dev->ifindex;
 	struct net *net;
+	int res;
 
 	net = dev_net(dev);
 
+	rcu_read_lock();
+
 	if (!rt_caching(net))
 		goto skip_cache;
 
 	tos &= IPTOS_RT_MASK;
 	hash = rt_hash(daddr, saddr, iif, rt_genid(net));
 
-	rcu_read_lock();
 	for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-	     rth = rcu_dereference(rth->u.dst.rt_next)) {
+	     rth = rcu_dereference(rth->dst.rt_next)) {
 		if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
 		     ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
 		     (rth->fl.iif ^ iif) |
 		     rth->fl.oif |
 		     (rth->fl.fl4_tos ^ tos)) == 0 &&
 		    rth->fl.mark == skb->mark &&
-		    net_eq(dev_net(rth->u.dst.dev), net) &&
+		    net_eq(dev_net(rth->dst.dev), net) &&
 		    !rt_is_expired(rth)) {
 			if (noref) {
-				dst_use_noref(&rth->u.dst, jiffies);
-				skb_dst_set_noref(skb, &rth->u.dst);
+				dst_use_noref(&rth->dst, jiffies);
+				skb_dst_set_noref(skb, &rth->dst);
 			} else {
-				dst_use(&rth->u.dst, jiffies);
-				skb_dst_set(skb, &rth->u.dst);
+				dst_use(&rth->dst, jiffies);
+				skb_dst_set(skb, &rth->dst);
 			}
 			RT_CACHE_STAT_INC(in_hit);
 			rcu_read_unlock();
@@ -2317,7 +2313,6 @@
 		}
 		RT_CACHE_STAT_INC(in_hlist_search);
 	}
-	rcu_read_unlock();
 
 skip_cache:
 	/* Multicast recognition logic is moved from route cache to here.
@@ -2332,12 +2327,11 @@
 	   route cache entry is created eventually.
 	 */
 	if (ipv4_is_multicast(daddr)) {
-		struct in_device *in_dev;
+		struct in_device *in_dev = __in_dev_get_rcu(dev);
 
-		rcu_read_lock();
-		if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
+		if (in_dev) {
 			int our = ip_check_mc(in_dev, daddr, saddr,
-				ip_hdr(skb)->protocol);
+					      ip_hdr(skb)->protocol);
 			if (our
 #ifdef CONFIG_IP_MROUTE
 				||
@@ -2345,15 +2339,18 @@
 			     IN_DEV_MFORWARD(in_dev))
 #endif
 			   ) {
+				int res = ip_route_input_mc(skb, daddr, saddr,
+							    tos, dev, our);
 				rcu_read_unlock();
-				return ip_route_input_mc(skb, daddr, saddr,
-							 tos, dev, our);
+				return res;
 			}
 		}
 		rcu_read_unlock();
 		return -EINVAL;
 	}
-	return ip_route_input_slow(skb, daddr, saddr, tos, dev);
+	res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
+	rcu_read_unlock();
+	return res;
 }
 EXPORT_SYMBOL(ip_route_input_common);
 
@@ -2415,12 +2412,12 @@
 		goto cleanup;
 	}
 
-	atomic_set(&rth->u.dst.__refcnt, 1);
-	rth->u.dst.flags= DST_HOST;
+	atomic_set(&rth->dst.__refcnt, 1);
+	rth->dst.flags= DST_HOST;
 	if (IN_DEV_CONF_GET(in_dev, NOXFRM))
-		rth->u.dst.flags |= DST_NOXFRM;
+		rth->dst.flags |= DST_NOXFRM;
 	if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-		rth->u.dst.flags |= DST_NOPOLICY;
+		rth->dst.flags |= DST_NOPOLICY;
 
 	rth->fl.fl4_dst	= oldflp->fl4_dst;
 	rth->fl.fl4_tos	= tos;
@@ -2432,35 +2429,35 @@
 	rth->rt_iif	= oldflp->oif ? : dev_out->ifindex;
 	/* get references to the devices that are to be hold by the routing
 	   cache entry */
-	rth->u.dst.dev	= dev_out;
+	rth->dst.dev	= dev_out;
 	dev_hold(dev_out);
 	rth->idev	= in_dev_get(dev_out);
 	rth->rt_gateway = fl->fl4_dst;
 	rth->rt_spec_dst= fl->fl4_src;
 
-	rth->u.dst.output=ip_output;
-	rth->u.dst.obsolete = -1;
+	rth->dst.output=ip_output;
+	rth->dst.obsolete = -1;
 	rth->rt_genid = rt_genid(dev_net(dev_out));
 
 	RT_CACHE_STAT_INC(out_slow_tot);
 
 	if (flags & RTCF_LOCAL) {
-		rth->u.dst.input = ip_local_deliver;
+		rth->dst.input = ip_local_deliver;
 		rth->rt_spec_dst = fl->fl4_dst;
 	}
 	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
 		rth->rt_spec_dst = fl->fl4_src;
 		if (flags & RTCF_LOCAL &&
 		    !(dev_out->flags & IFF_LOOPBACK)) {
-			rth->u.dst.output = ip_mc_output;
+			rth->dst.output = ip_mc_output;
 			RT_CACHE_STAT_INC(out_slow_mc);
 		}
 #ifdef CONFIG_IP_MROUTE
 		if (res->type == RTN_MULTICAST) {
 			if (IN_DEV_MFORWARD(in_dev) &&
 			    !ipv4_is_local_multicast(oldflp->fl4_dst)) {
-				rth->u.dst.input = ip_mr_input;
-				rth->u.dst.output = ip_mc_output;
+				rth->dst.input = ip_mr_input;
+				rth->dst.output = ip_mc_output;
 			}
 		}
 #endif
@@ -2715,7 +2712,7 @@
 
 	rcu_read_lock_bh();
 	for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
-		rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
+		rth = rcu_dereference_bh(rth->dst.rt_next)) {
 		if (rth->fl.fl4_dst == flp->fl4_dst &&
 		    rth->fl.fl4_src == flp->fl4_src &&
 		    rth->fl.iif == 0 &&
@@ -2723,9 +2720,9 @@
 		    rth->fl.mark == flp->mark &&
 		    !((rth->fl.fl4_tos ^ flp->fl4_tos) &
 			    (IPTOS_RT_MASK | RTO_ONLINK)) &&
-		    net_eq(dev_net(rth->u.dst.dev), net) &&
+		    net_eq(dev_net(rth->dst.dev), net) &&
 		    !rt_is_expired(rth)) {
-			dst_use(&rth->u.dst, jiffies);
+			dst_use(&rth->dst, jiffies);
 			RT_CACHE_STAT_INC(out_hit);
 			rcu_read_unlock_bh();
 			*rp = rth;
@@ -2762,15 +2759,15 @@
 		dst_alloc(&ipv4_dst_blackhole_ops);
 
 	if (rt) {
-		struct dst_entry *new = &rt->u.dst;
+		struct dst_entry *new = &rt->dst;
 
 		atomic_set(&new->__refcnt, 1);
 		new->__use = 1;
 		new->input = dst_discard;
 		new->output = dst_discard;
-		memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
+		memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
 
-		new->dev = ort->u.dst.dev;
+		new->dev = ort->dst.dev;
 		if (new->dev)
 			dev_hold(new->dev);
 
@@ -2794,7 +2791,7 @@
 		dst_free(new);
 	}
 
-	dst_release(&(*rp)->u.dst);
+	dst_release(&(*rp)->dst);
 	*rp = rt;
 	return (rt ? 0 : -ENOMEM);
 }
@@ -2864,11 +2861,11 @@
 		r->rtm_src_len = 32;
 		NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
 	}
-	if (rt->u.dst.dev)
-		NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
+	if (rt->dst.dev)
+		NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
 #ifdef CONFIG_NET_CLS_ROUTE
-	if (rt->u.dst.tclassid)
-		NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
+	if (rt->dst.tclassid)
+		NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
 #endif
 	if (rt->fl.iif)
 		NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
@@ -2878,12 +2875,13 @@
 	if (rt->rt_dst != rt->rt_gateway)
 		NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
 
-	if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
+	if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
 		goto nla_put_failure;
 
-	error = rt->u.dst.error;
-	expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
+	error = rt->dst.error;
+	expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
 	if (rt->peer) {
+		inet_peer_refcheck(rt->peer);
 		id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
 		if (rt->peer->tcp_ts_stamp) {
 			ts = rt->peer->tcp_ts;
@@ -2914,7 +2912,7 @@
 			NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
 	}
 
-	if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
+	if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
 			       expires, error) < 0)
 		goto nla_put_failure;
 
@@ -2979,8 +2977,8 @@
 		local_bh_enable();
 
 		rt = skb_rtable(skb);
-		if (err == 0 && rt->u.dst.error)
-			err = -rt->u.dst.error;
+		if (err == 0 && rt->dst.error)
+			err = -rt->dst.error;
 	} else {
 		struct flowi fl = {
 			.nl_u = {
@@ -2998,7 +2996,7 @@
 	if (err)
 		goto errout_free;
 
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 	if (rtm->rtm_flags & RTM_F_NOTIFY)
 		rt->rt_flags |= RTCF_NOTIFY;
 
@@ -3034,12 +3032,12 @@
 			continue;
 		rcu_read_lock_bh();
 		for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
-		     rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
-			if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
+		     rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
+			if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
 				continue;
 			if (rt_is_expired(rt))
 				continue;
-			skb_dst_set_noref(skb, &rt->u.dst);
+			skb_dst_set_noref(skb, &rt->dst);
 			if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
 					 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
 					 1, NLM_F_MULTI) <= 0) {
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 9f6b222..650cace 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -18,8 +18,8 @@
 #include <net/tcp.h>
 #include <net/route.h>
 
-/* Timestamps: lowest 9 bits store TCP options */
-#define TSBITS 9
+/* Timestamps: lowest bits store TCP options */
+#define TSBITS 6
 #define TSMASK (((__u32)1 << TSBITS) - 1)
 
 extern int sysctl_tcp_syncookies;
@@ -58,7 +58,7 @@
 
 /*
  * when syncookies are in effect and tcp timestamps are enabled we encode
- * tcp options in the lowest 9 bits of the timestamp value that will be
+ * tcp options in the lower bits of the timestamp value that will be
  * sent in the syn-ack.
  * Since subsequent timestamps use the normal tcp_time_stamp value, we
  * must make sure that the resulting initial timestamp is <= tcp_time_stamp.
@@ -70,11 +70,10 @@
 	u32 options = 0;
 
 	ireq = inet_rsk(req);
-	if (ireq->wscale_ok) {
-		options = ireq->snd_wscale;
-		options |= ireq->rcv_wscale << 4;
-	}
-	options |= ireq->sack_ok << 8;
+
+	options = ireq->wscale_ok ? ireq->snd_wscale : 0xf;
+	options |= ireq->sack_ok << 4;
+	options |= ireq->ecn_ok << 5;
 
 	ts = ts_now & ~TSMASK;
 	ts |= options;
@@ -138,23 +137,23 @@
 }
 
 /*
- * This table has to be sorted and terminated with (__u16)-1.
- * XXX generate a better table.
- * Unresolved Issues: HIPPI with a 64k MSS is not well supported.
+ * MSS Values are taken from the 2009 paper
+ * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
+ *  - values 1440 to 1460 accounted for 80% of observed mss values
+ *  - values outside the 536-1460 range are rare (<0.2%).
+ *
+ * Table must be sorted.
  */
 static __u16 const msstab[] = {
-	64 - 1,
-	256 - 1,
-	512 - 1,
-	536 - 1,
-	1024 - 1,
-	1440 - 1,
-	1460 - 1,
-	4312 - 1,
-	(__u16)-1
+	64,
+	512,
+	536,
+	1024,
+	1440,
+	1460,
+	4312,
+	8960,
 };
-/* The number doesn't include the -1 terminator */
-#define NUM_MSS (ARRAY_SIZE(msstab) - 1)
 
 /*
  * Generate a syncookie.  mssp points to the mss, which is returned
@@ -169,10 +168,10 @@
 
 	tcp_synq_overflow(sk);
 
-	/* XXX sort msstab[] by probability?  Binary search? */
-	for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
-		;
-	*mssp = msstab[mssind] + 1;
+	for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
+		if (mss >= msstab[mssind])
+			break;
+	*mssp = msstab[mssind];
 
 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
 
@@ -202,7 +201,7 @@
 					    jiffies / (HZ * 60),
 					    COUNTER_TRIES);
 
-	return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
+	return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
 }
 
 static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
@@ -227,26 +226,38 @@
  * additional tcp options in the timestamp.
  * This extracts these options from the timestamp echo.
  *
- * The lowest 4 bits are for snd_wscale
- * The next 4 lsb are for rcv_wscale
- * The next lsb is for sack_ok
+ * The lowest 4 bits store snd_wscale.
+ * next 2 bits indicate SACK and ECN support.
+ *
+ * return false if we decode an option that should not be.
  */
-void cookie_check_timestamp(struct tcp_options_received *tcp_opt)
+bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
 {
-	/* echoed timestamp, 9 lowest bits contain options */
+	/* echoed timestamp, lowest bits contain options */
 	u32 options = tcp_opt->rcv_tsecr & TSMASK;
 
-	tcp_opt->snd_wscale = options & 0xf;
-	options >>= 4;
-	tcp_opt->rcv_wscale = options & 0xf;
+	if (!tcp_opt->saw_tstamp)  {
+		tcp_clear_options(tcp_opt);
+		return true;
+	}
+
+	if (!sysctl_tcp_timestamps)
+		return false;
 
 	tcp_opt->sack_ok = (options >> 4) & 0x1;
+	*ecn_ok = (options >> 5) & 1;
+	if (*ecn_ok && !sysctl_tcp_ecn)
+		return false;
 
-	if (tcp_opt->sack_ok)
-		tcp_sack_reset(tcp_opt);
+	if (tcp_opt->sack_ok && !sysctl_tcp_sack)
+		return false;
 
-	if (tcp_opt->snd_wscale || tcp_opt->rcv_wscale)
-		tcp_opt->wscale_ok = 1;
+	if ((options & 0xf) == 0xf)
+		return true; /* no window scaling */
+
+	tcp_opt->wscale_ok = 1;
+	tcp_opt->snd_wscale = options & 0xf;
+	return sysctl_tcp_window_scaling != 0;
 }
 EXPORT_SYMBOL(cookie_check_timestamp);
 
@@ -265,8 +276,9 @@
 	int mss;
 	struct rtable *rt;
 	__u8 rcv_wscale;
+	bool ecn_ok;
 
-	if (!sysctl_tcp_syncookies || !th->ack)
+	if (!sysctl_tcp_syncookies || !th->ack || th->rst)
 		goto out;
 
 	if (tcp_synq_no_recent_overflow(sk) ||
@@ -281,8 +293,8 @@
 	memset(&tcp_opt, 0, sizeof(tcp_opt));
 	tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
 
-	if (tcp_opt.saw_tstamp)
-		cookie_check_timestamp(&tcp_opt);
+	if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
+		goto out;
 
 	ret = NULL;
 	req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
@@ -298,9 +310,8 @@
 	ireq->rmt_port		= th->source;
 	ireq->loc_addr		= ip_hdr(skb)->daddr;
 	ireq->rmt_addr		= ip_hdr(skb)->saddr;
-	ireq->ecn_ok		= 0;
+	ireq->ecn_ok		= ecn_ok;
 	ireq->snd_wscale	= tcp_opt.snd_wscale;
-	ireq->rcv_wscale	= tcp_opt.rcv_wscale;
 	ireq->sack_ok		= tcp_opt.sack_ok;
 	ireq->wscale_ok		= tcp_opt.wscale_ok;
 	ireq->tstamp_ok		= tcp_opt.saw_tstamp;
@@ -354,15 +365,15 @@
 	}
 
 	/* Try to redo what tcp_v4_send_synack did. */
-	req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
+	req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
 
 	tcp_select_initial_window(tcp_full_space(sk), req->mss,
 				  &req->rcv_wnd, &req->window_clamp,
 				  ireq->wscale_ok, &rcv_wscale,
-				  dst_metric(&rt->u.dst, RTAX_INITRWND));
+				  dst_metric(&rt->dst, RTAX_INITRWND));
 
 	ireq->rcv_wscale  = rcv_wscale;
 
-	ret = get_cookie_sock(sk, skb, req, &rt->u.dst);
+	ret = get_cookie_sock(sk, skb, req, &rt->dst);
 out:	return ret;
 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6596b4f..4e6ddfb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -511,7 +511,7 @@
 
 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
 {
-	TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+	TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
 	tp->pushed_seq = tp->write_seq;
 }
 
@@ -527,7 +527,7 @@
 
 	skb->csum    = 0;
 	tcb->seq     = tcb->end_seq = tp->write_seq;
-	tcb->flags   = TCPCB_FLAG_ACK;
+	tcb->flags   = TCPHDR_ACK;
 	tcb->sacked  = 0;
 	skb_header_release(skb);
 	tcp_add_write_queue_tail(sk, skb);
@@ -815,7 +815,7 @@
 		skb_shinfo(skb)->gso_segs = 0;
 
 		if (!copied)
-			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+			TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
 
 		copied += copy;
 		poffset += copy;
@@ -1061,7 +1061,7 @@
 			}
 
 			if (!copied)
-				TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+				TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
 
 			tp->write_seq += copy;
 			TCP_SKB_CB(skb)->end_seq += copy;
@@ -1898,6 +1898,10 @@
 
 	sk_mem_reclaim(sk);
 
+	/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
+	if (sk->sk_state == TCP_CLOSE)
+		goto adjudge_to_death;
+
 	/* As outlined in RFC 2525, section 2.17, we send a RST here because
 	 * data was lost. To witness the awful effects of the old behavior of
 	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
@@ -2958,7 +2962,7 @@
 	spin_unlock(&tcp_md5sig_pool_lock);
 
 	if (p)
-		return *per_cpu_ptr(p, smp_processor_id());
+		return *this_cpu_ptr(p);
 
 	local_bh_enable();
 	return NULL;
@@ -2999,6 +3003,7 @@
 	const unsigned head_data_len = skb_headlen(skb) > header_len ?
 				       skb_headlen(skb) - header_len : 0;
 	const struct skb_shared_info *shi = skb_shinfo(skb);
+	struct sk_buff *frag_iter;
 
 	sg_init_table(&sg, 1);
 
@@ -3013,6 +3018,10 @@
 			return 1;
 	}
 
+	skb_walk_frags(skb, frag_iter)
+		if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
+			return 1;
+
 	return 0;
 }
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 548d575..0433466 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3286,7 +3286,7 @@
 		 * connection startup slow start one packet too
 		 * quickly.  This is severely frowned upon behavior.
 		 */
-		if (!(scb->flags & TCPCB_FLAG_SYN)) {
+		if (!(scb->flags & TCPHDR_SYN)) {
 			flag |= FLAG_DATA_ACKED;
 		} else {
 			flag |= FLAG_SYN_ACKED;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index fe193e5..8fa32f5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -204,10 +204,12 @@
 		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
 		 * when trying new connection.
 		 */
-		if (peer != NULL &&
-		    (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
-			tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
-			tp->rx_opt.ts_recent = peer->tcp_ts;
+		if (peer) {
+			inet_peer_refcheck(peer);
+			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
+				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
+				tp->rx_opt.ts_recent = peer->tcp_ts;
+			}
 		}
 	}
 
@@ -237,7 +239,7 @@
 
 	/* OK, now commit destination to socket.  */
 	sk->sk_gso_type = SKB_GSO_TCPV4;
-	sk_setup_caps(sk, &rt->u.dst);
+	sk_setup_caps(sk, &rt->dst);
 
 	if (!tp->write_seq)
 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
@@ -793,19 +795,20 @@
 	kfree(inet_rsk(req)->opt);
 }
 
-#ifdef CONFIG_SYN_COOKIES
-static void syn_flood_warning(struct sk_buff *skb)
+static void syn_flood_warning(const struct sk_buff *skb)
 {
-	static unsigned long warntime;
+	const char *msg;
 
-	if (time_after(jiffies, (warntime + HZ * 60))) {
-		warntime = jiffies;
-		printk(KERN_INFO
-		       "possible SYN flooding on port %d. Sending cookies.\n",
-		       ntohs(tcp_hdr(skb)->dest));
-	}
-}
+#ifdef CONFIG_SYN_COOKIES
+	if (sysctl_tcp_syncookies)
+		msg = "Sending cookies";
+	else
 #endif
+		msg = "Dropping request";
+
+	pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
+				ntohs(tcp_hdr(skb)->dest), msg);
+}
 
 /*
  * Save and compile IPv4 options into the request_sock if needed.
@@ -1243,6 +1246,8 @@
 	 * evidently real one.
 	 */
 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+		if (net_ratelimit())
+			syn_flood_warning(skb);
 #ifdef CONFIG_SYN_COOKIES
 		if (sysctl_tcp_syncookies) {
 			want_cookie = 1;
@@ -1323,15 +1328,12 @@
 	if (security_inet_conn_request(sk, skb, req))
 		goto drop_and_free;
 
-	if (!want_cookie)
+	if (!want_cookie || tmp_opt.tstamp_ok)
 		TCP_ECN_create_request(req, tcp_hdr(skb));
 
 	if (want_cookie) {
-#ifdef CONFIG_SYN_COOKIES
-		syn_flood_warning(skb);
-		req->cookie_ts = tmp_opt.tstamp_ok;
-#endif
 		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
+		req->cookie_ts = tmp_opt.tstamp_ok;
 	} else if (!isn) {
 		struct inet_peer *peer = NULL;
 
@@ -1349,6 +1351,7 @@
 		    (dst = inet_csk_route_req(sk, req)) != NULL &&
 		    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
 		    peer->v4daddr == saddr) {
+			inet_peer_refcheck(peer);
 			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
 			    (s32)(peer->tcp_ts - req->ts_recent) >
 							TCP_PAWS_WINDOW) {
@@ -1504,7 +1507,7 @@
 	}
 
 #ifdef CONFIG_SYN_COOKIES
-	if (!th->rst && !th->syn && th->ack)
+	if (!th->syn)
 		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
 #endif
 	return sk;
@@ -1978,6 +1981,11 @@
 		hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
 }
 
+/*
+ * Get next listener socket follow cur.  If cur is NULL, get first socket
+ * starting from bucket given in st->bucket; when st->bucket is zero the
+ * very first socket in the hash table is returned.
+ */
 static void *listening_get_next(struct seq_file *seq, void *cur)
 {
 	struct inet_connection_sock *icsk;
@@ -1988,14 +1996,15 @@
 	struct net *net = seq_file_net(seq);
 
 	if (!sk) {
-		st->bucket = 0;
-		ilb = &tcp_hashinfo.listening_hash[0];
+		ilb = &tcp_hashinfo.listening_hash[st->bucket];
 		spin_lock_bh(&ilb->lock);
 		sk = sk_nulls_head(&ilb->head);
+		st->offset = 0;
 		goto get_sk;
 	}
 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
 	++st->num;
+	++st->offset;
 
 	if (st->state == TCP_SEQ_STATE_OPENREQ) {
 		struct request_sock *req = cur;
@@ -2010,6 +2019,7 @@
 				}
 				req = req->dl_next;
 			}
+			st->offset = 0;
 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
 				break;
 get_req:
@@ -2045,6 +2055,7 @@
 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 	}
 	spin_unlock_bh(&ilb->lock);
+	st->offset = 0;
 	if (++st->bucket < INET_LHTABLE_SIZE) {
 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
 		spin_lock_bh(&ilb->lock);
@@ -2058,7 +2069,12 @@
 
 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
 {
-	void *rc = listening_get_next(seq, NULL);
+	struct tcp_iter_state *st = seq->private;
+	void *rc;
+
+	st->bucket = 0;
+	st->offset = 0;
+	rc = listening_get_next(seq, NULL);
 
 	while (rc && *pos) {
 		rc = listening_get_next(seq, rc);
@@ -2073,13 +2089,18 @@
 		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
 }
 
+/*
+ * Get first established socket starting from bucket given in st->bucket.
+ * If st->bucket is zero, the very first socket in the hash is returned.
+ */
 static void *established_get_first(struct seq_file *seq)
 {
 	struct tcp_iter_state *st = seq->private;
 	struct net *net = seq_file_net(seq);
 	void *rc = NULL;
 
-	for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
+	st->offset = 0;
+	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
 		struct sock *sk;
 		struct hlist_nulls_node *node;
 		struct inet_timewait_sock *tw;
@@ -2124,6 +2145,7 @@
 	struct net *net = seq_file_net(seq);
 
 	++st->num;
+	++st->offset;
 
 	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
 		tw = cur;
@@ -2140,6 +2162,7 @@
 		st->state = TCP_SEQ_STATE_ESTABLISHED;
 
 		/* Look for next non empty bucket */
+		st->offset = 0;
 		while (++st->bucket <= tcp_hashinfo.ehash_mask &&
 				empty_bucket(st))
 			;
@@ -2167,7 +2190,11 @@
 
 static void *established_get_idx(struct seq_file *seq, loff_t pos)
 {
-	void *rc = established_get_first(seq);
+	struct tcp_iter_state *st = seq->private;
+	void *rc;
+
+	st->bucket = 0;
+	rc = established_get_first(seq);
 
 	while (rc && pos) {
 		rc = established_get_next(seq, rc);
@@ -2192,24 +2219,72 @@
 	return rc;
 }
 
+static void *tcp_seek_last_pos(struct seq_file *seq)
+{
+	struct tcp_iter_state *st = seq->private;
+	int offset = st->offset;
+	int orig_num = st->num;
+	void *rc = NULL;
+
+	switch (st->state) {
+	case TCP_SEQ_STATE_OPENREQ:
+	case TCP_SEQ_STATE_LISTENING:
+		if (st->bucket >= INET_LHTABLE_SIZE)
+			break;
+		st->state = TCP_SEQ_STATE_LISTENING;
+		rc = listening_get_next(seq, NULL);
+		while (offset-- && rc)
+			rc = listening_get_next(seq, rc);
+		if (rc)
+			break;
+		st->bucket = 0;
+		/* Fallthrough */
+	case TCP_SEQ_STATE_ESTABLISHED:
+	case TCP_SEQ_STATE_TIME_WAIT:
+		st->state = TCP_SEQ_STATE_ESTABLISHED;
+		if (st->bucket > tcp_hashinfo.ehash_mask)
+			break;
+		rc = established_get_first(seq);
+		while (offset-- && rc)
+			rc = established_get_next(seq, rc);
+	}
+
+	st->num = orig_num;
+
+	return rc;
+}
+
 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
 {
 	struct tcp_iter_state *st = seq->private;
+	void *rc;
+
+	if (*pos && *pos == st->last_pos) {
+		rc = tcp_seek_last_pos(seq);
+		if (rc)
+			goto out;
+	}
+
 	st->state = TCP_SEQ_STATE_LISTENING;
 	st->num = 0;
-	return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+	st->bucket = 0;
+	st->offset = 0;
+	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+
+out:
+	st->last_pos = *pos;
+	return rc;
 }
 
 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
+	struct tcp_iter_state *st = seq->private;
 	void *rc = NULL;
-	struct tcp_iter_state *st;
 
 	if (v == SEQ_START_TOKEN) {
 		rc = tcp_get_idx(seq, 0);
 		goto out;
 	}
-	st = seq->private;
 
 	switch (st->state) {
 	case TCP_SEQ_STATE_OPENREQ:
@@ -2217,6 +2292,8 @@
 		rc = listening_get_next(seq, v);
 		if (!rc) {
 			st->state = TCP_SEQ_STATE_ESTABLISHED;
+			st->bucket = 0;
+			st->offset = 0;
 			rc	  = established_get_first(seq);
 		}
 		break;
@@ -2227,6 +2304,7 @@
 	}
 out:
 	++*pos;
+	st->last_pos = *pos;
 	return rc;
 }
 
@@ -2265,6 +2343,7 @@
 
 	s = ((struct seq_file *)file->private_data)->private;
 	s->family		= afinfo->family;
+	s->last_pos 		= 0;
 	return 0;
 }
 
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b4ed957..25ff62e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -294,9 +294,9 @@
 /* Packet ECN state for a SYN-ACK */
 static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
 {
-	TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
+	TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
 	if (!(tp->ecn_flags & TCP_ECN_OK))
-		TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
+		TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
 }
 
 /* Packet ECN state for a SYN.  */
@@ -306,7 +306,7 @@
 
 	tp->ecn_flags = 0;
 	if (sysctl_tcp_ecn == 1) {
-		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
+		TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
 		tp->ecn_flags = TCP_ECN_OK;
 	}
 }
@@ -361,7 +361,7 @@
 	skb_shinfo(skb)->gso_type = 0;
 
 	TCP_SKB_CB(skb)->seq = seq;
-	if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN))
+	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
 		seq++;
 	TCP_SKB_CB(skb)->end_seq = seq;
 }
@@ -820,7 +820,7 @@
 	tcb = TCP_SKB_CB(skb);
 	memset(&opts, 0, sizeof(opts));
 
-	if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
+	if (unlikely(tcb->flags & TCPHDR_SYN))
 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
 	else
 		tcp_options_size = tcp_established_options(sk, skb, &opts,
@@ -843,7 +843,7 @@
 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
 					tcb->flags);
 
-	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
+	if (unlikely(tcb->flags & TCPHDR_SYN)) {
 		/* RFC1323: The window in SYN & SYN/ACK segments
 		 * is never scaled.
 		 */
@@ -866,7 +866,7 @@
 	}
 
 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
-	if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
+	if (likely((tcb->flags & TCPHDR_SYN) == 0))
 		TCP_ECN_send(sk, skb, tcp_header_size);
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -880,7 +880,7 @@
 
 	icsk->icsk_af_ops->send_check(sk, skb);
 
-	if (likely(tcb->flags & TCPCB_FLAG_ACK))
+	if (likely(tcb->flags & TCPHDR_ACK))
 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
 
 	if (skb->len != tcp_header_size)
@@ -1023,7 +1023,7 @@
 
 	/* PSH and FIN should only be set in the second packet. */
 	flags = TCP_SKB_CB(skb)->flags;
-	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+	TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
 	TCP_SKB_CB(buff)->flags = flags;
 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
 
@@ -1328,8 +1328,7 @@
 	u32 in_flight, cwnd;
 
 	/* Don't be strict about the congestion window for the final FIN.  */
-	if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
-	    tcp_skb_pcount(skb) == 1)
+	if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
 		return 1;
 
 	in_flight = tcp_packets_in_flight(tp);
@@ -1398,7 +1397,7 @@
 	 * Nagle can be ignored during F-RTO too (see RFC4138).
 	 */
 	if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
-	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
+	    (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
 		return 1;
 
 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1461,7 +1460,7 @@
  * packet has never been sent out before (and thus is not cloned).
  */
 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
-			unsigned int mss_now)
+			unsigned int mss_now, gfp_t gfp)
 {
 	struct sk_buff *buff;
 	int nlen = skb->len - len;
@@ -1471,7 +1470,7 @@
 	if (skb->len != skb->data_len)
 		return tcp_fragment(sk, skb, len, mss_now);
 
-	buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC);
+	buff = sk_stream_alloc_skb(sk, 0, gfp);
 	if (unlikely(buff == NULL))
 		return -ENOMEM;
 
@@ -1487,7 +1486,7 @@
 
 	/* PSH and FIN should only be set in the second packet. */
 	flags = TCP_SKB_CB(skb)->flags;
-	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+	TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
 	TCP_SKB_CB(buff)->flags = flags;
 
 	/* This packet was never sent out yet, so no SACK bits. */
@@ -1518,7 +1517,7 @@
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	u32 send_win, cong_win, limit, in_flight;
 
-	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
+	if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
 		goto send_now;
 
 	if (icsk->icsk_ca_state != TCP_CA_Open)
@@ -1644,7 +1643,7 @@
 
 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
-	TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
+	TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
 	TCP_SKB_CB(nskb)->sacked = 0;
 	nskb->csum = 0;
 	nskb->ip_summed = skb->ip_summed;
@@ -1669,7 +1668,7 @@
 			sk_wmem_free_skb(sk, skb);
 		} else {
 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
-						   ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
+						   ~(TCPHDR_FIN|TCPHDR_PSH);
 			if (!skb_shinfo(skb)->nr_frags) {
 				skb_pull(skb, copy);
 				if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1769,7 +1768,7 @@
 						    cwnd_quota);
 
 		if (skb->len > limit &&
-		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
+		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
 			break;
 
 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2020,7 +2019,7 @@
 
 	if (!sysctl_tcp_retrans_collapse)
 		return;
-	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)
+	if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
 		return;
 
 	tcp_for_write_queue_from_safe(skb, tmp, sk) {
@@ -2112,7 +2111,7 @@
 	 * since it is cheap to do so and saves bytes on the network.
 	 */
 	if (skb->len > 0 &&
-	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+	    (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
 	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
 		if (!pskb_trim(skb, 0)) {
 			/* Reuse, even though it does some unnecessary work */
@@ -2301,7 +2300,7 @@
 	mss_now = tcp_current_mss(sk);
 
 	if (tcp_send_head(sk) != NULL) {
-		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
+		TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
 		TCP_SKB_CB(skb)->end_seq++;
 		tp->write_seq++;
 	} else {
@@ -2318,7 +2317,7 @@
 		skb_reserve(skb, MAX_TCP_HEADER);
 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
 		tcp_init_nondata_skb(skb, tp->write_seq,
-				     TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
+				     TCPHDR_ACK | TCPHDR_FIN);
 		tcp_queue_skb(sk, skb);
 	}
 	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
@@ -2343,7 +2342,7 @@
 	/* Reserve space for headers and prepare control bits. */
 	skb_reserve(skb, MAX_TCP_HEADER);
 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
-			     TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
+			     TCPHDR_ACK | TCPHDR_RST);
 	/* Send it off. */
 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
 	if (tcp_transmit_skb(sk, skb, 0, priority))
@@ -2363,11 +2362,11 @@
 	struct sk_buff *skb;
 
 	skb = tcp_write_queue_head(sk);
-	if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) {
+	if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
 		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
 		return -EFAULT;
 	}
-	if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) {
+	if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
 		if (skb_cloned(skb)) {
 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
 			if (nskb == NULL)
@@ -2381,7 +2380,7 @@
 			skb = nskb;
 		}
 
-		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
+		TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
 		TCP_ECN_send_synack(tcp_sk(sk), skb);
 	}
 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2460,7 +2459,7 @@
 	 * not even correctly set)
 	 */
 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
-			     TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
+			     TCPHDR_SYN | TCPHDR_ACK);
 
 	if (OPTION_COOKIE_EXTENSION & opts.options) {
 		if (s_data_desired) {
@@ -2592,7 +2591,7 @@
 	skb_reserve(buff, MAX_TCP_HEADER);
 
 	tp->snd_nxt = tp->write_seq;
-	tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN);
+	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
 	TCP_ECN_send_syn(sk, buff);
 
 	/* Send it off. */
@@ -2698,7 +2697,7 @@
 
 	/* Reserve space for headers and prepare control bits. */
 	skb_reserve(buff, MAX_TCP_HEADER);
-	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK);
+	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
 
 	/* Send it off, this clears delayed acks for us. */
 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2732,7 +2731,7 @@
 	 * end to send an ack.  Don't queue or clone SKB, just
 	 * send it.
 	 */
-	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK);
+	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
@@ -2762,13 +2761,13 @@
 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
 		    skb->len > mss) {
 			seg_size = min(seg_size, mss);
-			TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+			TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
 			if (tcp_fragment(sk, skb, seg_size, mss))
 				return -1;
 		} else if (!tcp_skb_pcount(skb))
 			tcp_set_skb_tso_segs(sk, skb, mss);
 
-		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+		TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
 		if (!err)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index eec4ff4..32e0bef 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -914,7 +914,7 @@
 		    !sock_flag(sk, SOCK_BROADCAST))
 			goto out;
 		if (connected)
-			sk_dst_set(sk, dst_clone(&rt->u.dst));
+			sk_dst_set(sk, dst_clone(&rt->dst));
 	}
 
 	if (msg->msg_flags&MSG_CONFIRM)
@@ -978,7 +978,7 @@
 	return err;
 
 do_confirm:
-	dst_confirm(&rt->u.dst);
+	dst_confirm(&rt->dst);
 	if (!(msg->msg_flags&MSG_PROBE) || len)
 		goto back_from_confirm;
 	err = 0;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 23883a4..869078d 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -37,7 +37,7 @@
 		fl.fl4_src = saddr->a4;
 
 	err = __ip_route_output_key(net, &rt, &fl);
-	dst = &rt->u.dst;
+	dst = &rt->dst;
 	if (err)
 		dst = ERR_PTR(err);
 	return dst;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e1a698d..e81155d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -121,8 +121,6 @@
 static int __ipv6_regen_rndid(struct inet6_dev *idev);
 static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
 static void ipv6_regen_rndid(unsigned long data);
-
-static int desync_factor = MAX_DESYNC_FACTOR * HZ;
 #endif
 
 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
@@ -284,13 +282,16 @@
 static int snmp6_alloc_dev(struct inet6_dev *idev)
 {
 	if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
-			  sizeof(struct ipstats_mib)) < 0)
+			  sizeof(struct ipstats_mib),
+			  __alignof__(struct ipstats_mib)) < 0)
 		goto err_ip;
 	if (snmp_mib_init((void __percpu **)idev->stats.icmpv6,
-			  sizeof(struct icmpv6_mib)) < 0)
+			  sizeof(struct icmpv6_mib),
+			  __alignof__(struct icmpv6_mib)) < 0)
 		goto err_icmp;
 	if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg,
-			  sizeof(struct icmpv6msg_mib)) < 0)
+			  sizeof(struct icmpv6msg_mib),
+			  __alignof__(struct icmpv6msg_mib)) < 0)
 		goto err_icmpmsg;
 
 	return 0;
@@ -557,7 +558,7 @@
 		pr_warning("Freeing alive inet6 address %p\n", ifp);
 		return;
 	}
-	dst_release(&ifp->rt->u.dst);
+	dst_release(&ifp->rt->dst);
 
 	call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu);
 }
@@ -823,7 +824,7 @@
 				rt->rt6i_flags |= RTF_EXPIRES;
 			}
 		}
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 	}
 
 out:
@@ -890,7 +891,8 @@
 			      idev->cnf.temp_valid_lft);
 	tmp_prefered_lft = min_t(__u32,
 				 ifp->prefered_lft,
-				 idev->cnf.temp_prefered_lft - desync_factor / HZ);
+				 idev->cnf.temp_prefered_lft -
+				 idev->cnf.max_desync_factor);
 	tmp_plen = ifp->prefix_len;
 	max_addresses = idev->cnf.max_addresses;
 	tmp_cstamp = ifp->cstamp;
@@ -1650,7 +1652,8 @@
 
 	expires = jiffies +
 		idev->cnf.temp_prefered_lft * HZ -
-		idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - desync_factor;
+		idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
+		idev->cnf.max_desync_factor * HZ;
 	if (time_before(expires, jiffies)) {
 		printk(KERN_WARNING
 			"ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n",
@@ -1863,7 +1866,7 @@
 					      dev, expires, flags);
 		}
 		if (rt)
-			dst_release(&rt->u.dst);
+			dst_release(&rt->dst);
 	}
 
 	/* Try to figure out our local address for this prefix */
@@ -3492,8 +3495,12 @@
 				preferred -= tval;
 			else
 				preferred = 0;
-			if (valid != INFINITY_LIFE_TIME)
-				valid -= tval;
+			if (valid != INFINITY_LIFE_TIME) {
+				if (valid > tval)
+					valid -= tval;
+				else
+					valid = 0;
+			}
 		}
 	} else {
 		preferred = INFINITY_LIFE_TIME;
@@ -3855,12 +3862,28 @@
 	memset(&stats[items], 0, pad);
 }
 
+static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib,
+				      int items, int bytes, size_t syncpoff)
+{
+	int i;
+	int pad = bytes - sizeof(u64) * items;
+	BUG_ON(pad < 0);
+
+	/* Use put_unaligned() because stats may not be aligned for u64. */
+	put_unaligned(items, &stats[0]);
+	for (i = 1; i < items; i++)
+		put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]);
+
+	memset(&stats[items], 0, pad);
+}
+
 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
 			     int bytes)
 {
 	switch (attrtype) {
 	case IFLA_INET6_STATS:
-		__snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
+		__snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6,
+				     IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
 		break;
 	case IFLA_INET6_ICMP6STATS:
 		__snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes);
@@ -4093,11 +4116,11 @@
 		if (ifp->idev->cnf.forwarding)
 			addrconf_leave_anycast(ifp);
 		addrconf_leave_solict(ifp->idev, &ifp->addr);
-		dst_hold(&ifp->rt->u.dst);
+		dst_hold(&ifp->rt->dst);
 
 		if (ifp->state == INET6_IFADDR_STATE_DEAD &&
 		    ip6_del_rt(ifp->rt))
-			dst_free(&ifp->rt->u.dst);
+			dst_free(&ifp->rt->dst);
 		break;
 	}
 }
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 8c4348c..f0e774c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -53,11 +53,7 @@
 static inline
 struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
 {
-#ifdef CONFIG_NET_NS
-	return lbl->lbl_net;
-#else
-	return &init_net;
-#endif
+	return read_pnet(&lbl->lbl_net);
 }
 
 /*
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e733942d..e830cd4 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -651,7 +651,7 @@
 
 	if (dst == NULL) {
 		struct inet_sock *inet = inet_sk(sk);
-		struct in6_addr *final_p = NULL, final;
+		struct in6_addr *final_p, final;
 		struct flowi fl;
 
 		memset(&fl, 0, sizeof(fl));
@@ -665,12 +665,7 @@
 		fl.fl_ip_sport = inet->inet_sport;
 		security_sk_classify_flow(sk, &fl);
 
-		if (np->opt && np->opt->srcrt) {
-			struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
-			ipv6_addr_copy(&final, &fl.fl6_dst);
-			ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-			final_p = &final;
-		}
+		final_p = fl6_update_dst(&fl, np->opt, &final);
 
 		err = ip6_dst_lookup(sk, &dst, &fl);
 		if (err) {
@@ -976,19 +971,24 @@
 static int __net_init ipv6_init_mibs(struct net *net)
 {
 	if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
-			  sizeof (struct udp_mib)) < 0)
+			  sizeof(struct udp_mib),
+			  __alignof__(struct udp_mib)) < 0)
 		return -ENOMEM;
 	if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6,
-			  sizeof (struct udp_mib)) < 0)
+			  sizeof(struct udp_mib),
+			  __alignof__(struct udp_mib)) < 0)
 		goto err_udplite_mib;
 	if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics,
-			  sizeof(struct ipstats_mib)) < 0)
+			  sizeof(struct ipstats_mib),
+			  __alignof__(struct ipstats_mib)) < 0)
 		goto err_ip_mib;
 	if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
-			  sizeof(struct icmpv6_mib)) < 0)
+			  sizeof(struct icmpv6_mib),
+			  __alignof__(struct icmpv6_mib)) < 0)
 		goto err_icmp_mib;
 	if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
-			  sizeof(struct icmpv6msg_mib)) < 0)
+			  sizeof(struct icmpv6msg_mib),
+			  __alignof__(struct icmpv6msg_mib)) < 0)
 		goto err_icmpmsg_mib;
 	return 0;
 
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index b5b0705..0e5e943 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -77,41 +77,40 @@
 	pac->acl_next = NULL;
 	ipv6_addr_copy(&pac->acl_addr, addr);
 
+	rcu_read_lock();
 	if (ifindex == 0) {
 		struct rt6_info *rt;
 
 		rt = rt6_lookup(net, addr, NULL, 0, 0);
 		if (rt) {
 			dev = rt->rt6i_dev;
-			dev_hold(dev);
-			dst_release(&rt->u.dst);
+			dst_release(&rt->dst);
 		} else if (ishost) {
 			err = -EADDRNOTAVAIL;
-			goto out_free_pac;
+			goto error;
 		} else {
 			/* router, no matching interface: just pick one */
-
-			dev = dev_get_by_flags(net, IFF_UP, IFF_UP|IFF_LOOPBACK);
+			dev = dev_get_by_flags_rcu(net, IFF_UP,
+						   IFF_UP | IFF_LOOPBACK);
 		}
 	} else
-		dev = dev_get_by_index(net, ifindex);
+		dev = dev_get_by_index_rcu(net, ifindex);
 
 	if (dev == NULL) {
 		err = -ENODEV;
-		goto out_free_pac;
+		goto error;
 	}
 
-	idev = in6_dev_get(dev);
+	idev = __in6_dev_get(dev);
 	if (!idev) {
 		if (ifindex)
 			err = -ENODEV;
 		else
 			err = -EADDRNOTAVAIL;
-		goto out_dev_put;
+		goto error;
 	}
 	/* reset ishost, now that we have a specific device */
 	ishost = !idev->cnf.forwarding;
-	in6_dev_put(idev);
 
 	pac->acl_ifindex = dev->ifindex;
 
@@ -124,26 +123,22 @@
 		if (ishost)
 			err = -EADDRNOTAVAIL;
 		if (err)
-			goto out_dev_put;
+			goto error;
 	}
 
 	err = ipv6_dev_ac_inc(dev, addr);
-	if (err)
-		goto out_dev_put;
+	if (!err) {
+		write_lock_bh(&ipv6_sk_ac_lock);
+		pac->acl_next = np->ipv6_ac_list;
+		np->ipv6_ac_list = pac;
+		write_unlock_bh(&ipv6_sk_ac_lock);
+		pac = NULL;
+	}
 
-	write_lock_bh(&ipv6_sk_ac_lock);
-	pac->acl_next = np->ipv6_ac_list;
-	np->ipv6_ac_list = pac;
-	write_unlock_bh(&ipv6_sk_ac_lock);
-
-	dev_put(dev);
-
-	return 0;
-
-out_dev_put:
-	dev_put(dev);
-out_free_pac:
-	sock_kfree_s(sk, pac, sizeof(*pac));
+error:
+	rcu_read_unlock();
+	if (pac)
+		sock_kfree_s(sk, pac, sizeof(*pac));
 	return err;
 }
 
@@ -176,11 +171,12 @@
 
 	write_unlock_bh(&ipv6_sk_ac_lock);
 
-	dev = dev_get_by_index(net, pac->acl_ifindex);
-	if (dev) {
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
+	if (dev)
 		ipv6_dev_ac_dec(dev, &pac->acl_addr);
-		dev_put(dev);
-	}
+	rcu_read_unlock();
+
 	sock_kfree_s(sk, pac, sizeof(*pac));
 	return 0;
 }
@@ -199,13 +195,12 @@
 	write_unlock_bh(&ipv6_sk_ac_lock);
 
 	prev_index = 0;
+	rcu_read_lock();
 	while (pac) {
 		struct ipv6_ac_socklist *next = pac->acl_next;
 
 		if (pac->acl_ifindex != prev_index) {
-			if (dev)
-				dev_put(dev);
-			dev = dev_get_by_index(net, pac->acl_ifindex);
+			dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
 			prev_index = pac->acl_ifindex;
 		}
 		if (dev)
@@ -213,8 +208,7 @@
 		sock_kfree_s(sk, pac, sizeof(*pac));
 		pac = next;
 	}
-	if (dev)
-		dev_put(dev);
+	rcu_read_unlock();
 }
 
 #if 0
@@ -250,7 +244,7 @@
 {
 	if (atomic_dec_and_test(&ac->aca_refcnt)) {
 		in6_dev_put(ac->aca_idev);
-		dst_release(&ac->aca_rt->u.dst);
+		dst_release(&ac->aca_rt->dst);
 		kfree(ac);
 	}
 }
@@ -356,40 +350,39 @@
 	write_unlock_bh(&idev->lock);
 	addrconf_leave_solict(idev, &aca->aca_addr);
 
-	dst_hold(&aca->aca_rt->u.dst);
+	dst_hold(&aca->aca_rt->dst);
 	ip6_del_rt(aca->aca_rt);
 
 	aca_put(aca);
 	return 0;
 }
 
+/* called with rcu_read_lock() */
 static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
 {
-	int ret;
-	struct inet6_dev *idev = in6_dev_get(dev);
+	struct inet6_dev *idev = __in6_dev_get(dev);
+
 	if (idev == NULL)
 		return -ENODEV;
-	ret = __ipv6_dev_ac_dec(idev, addr);
-	in6_dev_put(idev);
-	return ret;
+	return __ipv6_dev_ac_dec(idev, addr);
 }
 
 /*
  *	check if the interface has this anycast address
+ *	called with rcu_read_lock()
  */
 static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr)
 {
 	struct inet6_dev *idev;
 	struct ifacaddr6 *aca;
 
-	idev = in6_dev_get(dev);
+	idev = __in6_dev_get(dev);
 	if (idev) {
 		read_lock_bh(&idev->lock);
 		for (aca = idev->ac_list; aca; aca = aca->aca_next)
 			if (ipv6_addr_equal(&aca->aca_addr, addr))
 				break;
 		read_unlock_bh(&idev->lock);
-		in6_dev_put(idev);
 		return aca != NULL;
 	}
 	return 0;
@@ -403,14 +396,15 @@
 {
 	int found = 0;
 
-	if (dev)
-		return ipv6_chk_acast_dev(dev, addr);
 	rcu_read_lock();
-	for_each_netdev_rcu(net, dev)
-		if (ipv6_chk_acast_dev(dev, addr)) {
-			found = 1;
-			break;
-		}
+	if (dev)
+		found = ipv6_chk_acast_dev(dev, addr);
+	else
+		for_each_netdev_rcu(net, dev)
+			if (ipv6_chk_acast_dev(dev, addr)) {
+				found = 1;
+				break;
+			}
 	rcu_read_unlock();
 	return found;
 }
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 7126846..7d929a2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -38,10 +38,11 @@
 	struct sockaddr_in6	*usin = (struct sockaddr_in6 *) uaddr;
 	struct inet_sock      	*inet = inet_sk(sk);
 	struct ipv6_pinfo      	*np = inet6_sk(sk);
-	struct in6_addr		*daddr, *final_p = NULL, final;
+	struct in6_addr		*daddr, *final_p, final;
 	struct dst_entry	*dst;
 	struct flowi		fl;
 	struct ip6_flowlabel	*flowlabel = NULL;
+	struct ipv6_txoptions   *opt;
 	int			addr_type;
 	int			err;
 
@@ -155,19 +156,8 @@
 
 	security_sk_classify_flow(sk, &fl);
 
-	if (flowlabel) {
-		if (flowlabel->opt && flowlabel->opt->srcrt) {
-			struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
-			ipv6_addr_copy(&final, &fl.fl6_dst);
-			ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-			final_p = &final;
-		}
-	} else if (np->opt && np->opt->srcrt) {
-		struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
-		ipv6_addr_copy(&final, &fl.fl6_dst);
-		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-		final_p = &final;
-	}
+	opt = flowlabel ? flowlabel->opt : np->opt;
+	final_p = fl6_update_dst(&fl, opt, &final);
 
 	err = ip6_dst_lookup(sk, &dst, &fl);
 	if (err)
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 8a659f9..262f105 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -312,6 +312,7 @@
   Routing header.
  ********************************/
 
+/* called with rcu_read_lock() */
 static int ipv6_rthdr_rcv(struct sk_buff *skb)
 {
 	struct inet6_skb_parm *opt = IP6CB(skb);
@@ -324,12 +325,9 @@
 	struct net *net = dev_net(skb->dev);
 	int accept_source_route = net->ipv6.devconf_all->accept_source_route;
 
-	idev = in6_dev_get(skb->dev);
-	if (idev) {
-		if (accept_source_route > idev->cnf.accept_source_route)
-			accept_source_route = idev->cnf.accept_source_route;
-		in6_dev_put(idev);
-	}
+	idev = __in6_dev_get(skb->dev);
+	if (idev && accept_source_route > idev->cnf.accept_source_route)
+		accept_source_route = idev->cnf.accept_source_route;
 
 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
 	    !pskb_may_pull(skb, (skb_transport_offset(skb) +
@@ -874,3 +872,27 @@
 	return opt;
 }
 
+/**
+ * fl6_update_dst - update flowi destination address with info given
+ *                  by srcrt option, if any.
+ *
+ * @fl: flowi for which fl6_dst is to be updated
+ * @opt: struct ipv6_txoptions in which to look for srcrt opt
+ * @orig: copy of original fl6_dst address if modified
+ *
+ * Returns NULL if no txoptions or no srcrt, otherwise returns orig
+ * and initial value of fl->fl6_dst set in orig
+ */
+struct in6_addr *fl6_update_dst(struct flowi *fl,
+				const struct ipv6_txoptions *opt,
+				struct in6_addr *orig)
+{
+	if (!opt || !opt->srcrt)
+		return NULL;
+
+	ipv6_addr_copy(orig, &fl->fl6_dst);
+	ipv6_addr_copy(&fl->fl6_dst, ((struct rt0_hdr *)opt->srcrt)->addr);
+	return orig;
+}
+
+EXPORT_SYMBOL_GPL(fl6_update_dst);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 8e44f8f..b1108ed 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -43,8 +43,8 @@
 	if (arg.result)
 		return arg.result;
 
-	dst_hold(&net->ipv6.ip6_null_entry->u.dst);
-	return &net->ipv6.ip6_null_entry->u.dst;
+	dst_hold(&net->ipv6.ip6_null_entry->dst);
+	return &net->ipv6.ip6_null_entry->dst;
 }
 
 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -86,7 +86,7 @@
 			struct in6_addr saddr;
 
 			if (ipv6_dev_get_saddr(net,
-					       ip6_dst_idev(&rt->u.dst)->dev,
+					       ip6_dst_idev(&rt->dst)->dev,
 					       &flp->fl6_dst,
 					       rt6_flags2srcprefs(flags),
 					       &saddr))
@@ -99,12 +99,12 @@
 		goto out;
 	}
 again:
-	dst_release(&rt->u.dst);
+	dst_release(&rt->dst);
 	rt = NULL;
 	goto out;
 
 discard_pkt:
-	dst_hold(&rt->u.dst);
+	dst_hold(&rt->dst);
 out:
 	arg->result = rt;
 	return rt == NULL ? -EAGAIN : 0;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 0c5e3c3..8a16280 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -185,7 +185,7 @@
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct flowi fl;
 	struct dst_entry *dst;
-	struct in6_addr *final_p = NULL, final;
+	struct in6_addr *final_p, final;
 
 	memset(&fl, 0, sizeof(fl));
 	fl.proto = sk->sk_protocol;
@@ -199,12 +199,7 @@
 	fl.fl_ip_dport = inet->inet_dport;
 	security_sk_classify_flow(sk, &fl);
 
-	if (np->opt && np->opt->srcrt) {
-		struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
-		ipv6_addr_copy(&final, &fl.fl6_dst);
-		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-		final_p = &final;
-	}
+	final_p = fl6_update_dst(&fl, np->opt, &final);
 
 	dst = __inet6_csk_dst_check(sk, np->dst_cookie);
 
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 92a122b..b6a5859 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -165,7 +165,7 @@
 static __inline__ void rt6_release(struct rt6_info *rt)
 {
 	if (atomic_dec_and_test(&rt->rt6i_ref))
-		dst_free(&rt->u.dst);
+		dst_free(&rt->dst);
 }
 
 static void fib6_link_table(struct net *net, struct fib6_table *tb)
@@ -278,7 +278,7 @@
 	int res;
 	struct rt6_info *rt;
 
-	for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
+	for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
 		res = rt6_dump_route(rt, w->args);
 		if (res < 0) {
 			/* Frame is full, suspend walking */
@@ -619,7 +619,7 @@
 
 	ins = &fn->leaf;
 
-	for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) {
+	for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) {
 		/*
 		 *	Search for duplicates
 		 */
@@ -647,7 +647,7 @@
 		if (iter->rt6i_metric > rt->rt6i_metric)
 			break;
 
-		ins = &iter->u.dst.rt6_next;
+		ins = &iter->dst.rt6_next;
 	}
 
 	/* Reset round-robin state, if necessary */
@@ -658,7 +658,7 @@
 	 *	insert node
 	 */
 
-	rt->u.dst.rt6_next = iter;
+	rt->dst.rt6_next = iter;
 	*ins = rt;
 	rt->rt6i_node = fn;
 	atomic_inc(&rt->rt6i_ref);
@@ -799,7 +799,7 @@
 			atomic_inc(&pn->leaf->rt6i_ref);
 		}
 #endif
-		dst_free(&rt->u.dst);
+		dst_free(&rt->dst);
 	}
 	return err;
 
@@ -810,7 +810,7 @@
 st_failure:
 	if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
 		fib6_repair_tree(info->nl_net, fn);
-	dst_free(&rt->u.dst);
+	dst_free(&rt->dst);
 	return err;
 #endif
 }
@@ -1108,7 +1108,7 @@
 	RT6_TRACE("fib6_del_route\n");
 
 	/* Unlink it */
-	*rtp = rt->u.dst.rt6_next;
+	*rtp = rt->dst.rt6_next;
 	rt->rt6i_node = NULL;
 	net->ipv6.rt6_stats->fib_rt_entries--;
 	net->ipv6.rt6_stats->fib_discarded_routes++;
@@ -1122,14 +1122,14 @@
 	FOR_WALKERS(w) {
 		if (w->state == FWS_C && w->leaf == rt) {
 			RT6_TRACE("walker %p adjusted by delroute\n", w);
-			w->leaf = rt->u.dst.rt6_next;
+			w->leaf = rt->dst.rt6_next;
 			if (w->leaf == NULL)
 				w->state = FWS_U;
 		}
 	}
 	read_unlock(&fib6_walker_lock);
 
-	rt->u.dst.rt6_next = NULL;
+	rt->dst.rt6_next = NULL;
 
 	/* If it was last route, expunge its radix tree node */
 	if (fn->leaf == NULL) {
@@ -1168,7 +1168,7 @@
 	struct rt6_info **rtp;
 
 #if RT6_DEBUG >= 2
-	if (rt->u.dst.obsolete>0) {
+	if (rt->dst.obsolete>0) {
 		WARN_ON(fn != NULL);
 		return -ENOENT;
 	}
@@ -1195,7 +1195,7 @@
 	 *	Walk the leaf entries looking for ourself
 	 */
 
-	for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) {
+	for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) {
 		if (*rtp == rt) {
 			fib6_del_route(fn, rtp, info);
 			return 0;
@@ -1334,7 +1334,7 @@
 		.nl_net = c->net,
 	};
 
-	for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
+	for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
 		res = c->func(rt, c->arg);
 		if (res < 0) {
 			w->leaf = rt;
@@ -1448,8 +1448,8 @@
 		}
 		gc_args.more++;
 	} else if (rt->rt6i_flags & RTF_CACHE) {
-		if (atomic_read(&rt->u.dst.__refcnt) == 0 &&
-		    time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) {
+		if (atomic_read(&rt->dst.__refcnt) == 0 &&
+		    time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) {
 			RT6_TRACE("aging clone %p\n", rt);
 			return -1;
 		} else if ((rt->rt6i_flags & RTF_GATEWAY) &&
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 89425af..d40b330 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -698,7 +698,7 @@
 		ipv6_hdr(skb)->payload_len = htons(first_len -
 						   sizeof(struct ipv6hdr));
 
-		dst_hold(&rt->u.dst);
+		dst_hold(&rt->dst);
 
 		for (;;) {
 			/* Prepare header of the next frame,
@@ -726,7 +726,7 @@
 
 			err = output(skb);
 			if(!err)
-				IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
+				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 					      IPSTATS_MIB_FRAGCREATES);
 
 			if (err || !frag)
@@ -740,9 +740,9 @@
 		kfree(tmp_hdr);
 
 		if (err == 0) {
-			IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
+			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 				      IPSTATS_MIB_FRAGOKS);
-			dst_release(&rt->u.dst);
+			dst_release(&rt->dst);
 			return 0;
 		}
 
@@ -752,9 +752,9 @@
 			frag = skb;
 		}
 
-		IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
+		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 			      IPSTATS_MIB_FRAGFAILS);
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		return err;
 	}
 
@@ -785,7 +785,7 @@
 		 *	Allocate buffer.
 		 */
 
-		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
+		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
 			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 				      IPSTATS_MIB_FRAGFAILS);
@@ -798,7 +798,7 @@
 		 */
 
 		ip6_copy_metadata(frag, skb);
-		skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
+		skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
 		skb_reset_network_header(frag);
 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
@@ -1156,24 +1156,24 @@
 
 			/* need source address above miyazawa*/
 		}
-		dst_hold(&rt->u.dst);
-		inet->cork.dst = &rt->u.dst;
+		dst_hold(&rt->dst);
+		inet->cork.dst = &rt->dst;
 		inet->cork.fl = *fl;
 		np->cork.hop_limit = hlimit;
 		np->cork.tclass = tclass;
 		mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
-		      rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path);
+		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
 		if (np->frag_size < mtu) {
 			if (np->frag_size)
 				mtu = np->frag_size;
 		}
 		inet->cork.fragsize = mtu;
-		if (dst_allfrag(rt->u.dst.path))
+		if (dst_allfrag(rt->dst.path))
 			inet->cork.flags |= IPCORK_ALLFRAG;
 		inet->cork.length = 0;
 		sk->sk_sndmsg_page = NULL;
 		sk->sk_sndmsg_off = 0;
-		exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0) -
+		exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
 			    rt->rt6i_nfheader_len;
 		length += exthdrlen;
 		transhdrlen += exthdrlen;
@@ -1186,7 +1186,7 @@
 		mtu = inet->cork.fragsize;
 	}
 
-	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
+	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 
 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
 			(opt ? opt->opt_nflen : 0);
@@ -1224,7 +1224,7 @@
 		}
 
 		if (proto == IPPROTO_UDP &&
-		    (rt->u.dst.dev->features & NETIF_F_UFO)) {
+		    (rt->dst.dev->features & NETIF_F_UFO)) {
 
 			err = ip6_ufo_append_data(sk, getfrag, from, length,
 						  hh_len, fragheaderlen,
@@ -1270,7 +1270,7 @@
 
 			fraglen = datalen + fragheaderlen;
 			if ((flags & MSG_MORE) &&
-			    !(rt->u.dst.dev->features&NETIF_F_SG))
+			    !(rt->dst.dev->features&NETIF_F_SG))
 				alloclen = mtu;
 			else
 				alloclen = datalen + fragheaderlen;
@@ -1281,7 +1281,7 @@
 			 * because we have no idea if we're the last one.
 			 */
 			if (datalen == length + fraggap)
-				alloclen += rt->u.dst.trailer_len;
+				alloclen += rt->dst.trailer_len;
 
 			/*
 			 * We just reserve space for fragment header.
@@ -1358,7 +1358,7 @@
 		if (copy > length)
 			copy = length;
 
-		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
+		if (!(rt->dst.dev->features&NETIF_F_SG)) {
 			unsigned int off;
 
 			off = skb->len;
@@ -1503,7 +1503,7 @@
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
 
-	skb_dst_set(skb, dst_clone(&rt->u.dst));
+	skb_dst_set(skb, dst_clone(&rt->dst));
 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
 	if (proto == IPPROTO_ICMPV6) {
 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8f39893..0fd027f 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -552,7 +552,7 @@
 	if (ip_route_output_key(dev_net(skb->dev), &rt, &fl))
 		goto out;
 
-	skb2->dev = rt->u.dst.dev;
+	skb2->dev = rt->dst.dev;
 
 	/* route "incoming" packet */
 	if (rt->rt_flags & RTCF_LOCAL) {
@@ -562,7 +562,7 @@
 		fl.fl4_src = eiph->saddr;
 		fl.fl4_tos = eiph->tos;
 		if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
-		    rt->u.dst.dev->type != ARPHRD_TUNNEL) {
+		    rt->dst.dev->type != ARPHRD_TUNNEL) {
 			ip_rt_put(rt);
 			goto out;
 		}
@@ -626,7 +626,7 @@
 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
 
 		if (rt)
-			dst_release(&rt->u.dst);
+			dst_release(&rt->dst);
 
 		kfree_skb(skb2);
 	}
@@ -1135,7 +1135,7 @@
 			if (dev->mtu < IPV6_MIN_MTU)
 				dev->mtu = IPV6_MIN_MTU;
 		}
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 	}
 }
 
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index bd43f01..a7f66bc 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -55,8 +55,6 @@
 
 #include <asm/uaccess.h>
 
-DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
-
 struct ip6_ra_chain *ip6_ra_chain;
 DEFINE_RWLOCK(ip6_ra_lock);
 
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ab1622d..d1444b9 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -152,18 +152,19 @@
 	mc_lst->next = NULL;
 	ipv6_addr_copy(&mc_lst->addr, addr);
 
+	rcu_read_lock();
 	if (ifindex == 0) {
 		struct rt6_info *rt;
 		rt = rt6_lookup(net, addr, NULL, 0, 0);
 		if (rt) {
 			dev = rt->rt6i_dev;
-			dev_hold(dev);
-			dst_release(&rt->u.dst);
+			dst_release(&rt->dst);
 		}
 	} else
-		dev = dev_get_by_index(net, ifindex);
+		dev = dev_get_by_index_rcu(net, ifindex);
 
 	if (dev == NULL) {
+		rcu_read_unlock();
 		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 		return -ENODEV;
 	}
@@ -180,8 +181,8 @@
 	err = ipv6_dev_mc_inc(dev, addr);
 
 	if (err) {
+		rcu_read_unlock();
 		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
-		dev_put(dev);
 		return err;
 	}
 
@@ -190,7 +191,7 @@
 	np->ipv6_mc_list = mc_lst;
 	write_unlock_bh(&ipv6_sk_mc_lock);
 
-	dev_put(dev);
+	rcu_read_unlock();
 
 	return 0;
 }
@@ -213,18 +214,17 @@
 			*lnk = mc_lst->next;
 			write_unlock_bh(&ipv6_sk_mc_lock);
 
-			dev = dev_get_by_index(net, mc_lst->ifindex);
+			rcu_read_lock();
+			dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
 			if (dev != NULL) {
-				struct inet6_dev *idev = in6_dev_get(dev);
+				struct inet6_dev *idev = __in6_dev_get(dev);
 
 				(void) ip6_mc_leave_src(sk, mc_lst, idev);
-				if (idev) {
+				if (idev)
 					__ipv6_dev_mc_dec(idev, &mc_lst->addr);
-					in6_dev_put(idev);
-				}
-				dev_put(dev);
 			} else
 				(void) ip6_mc_leave_src(sk, mc_lst, NULL);
+			rcu_read_unlock();
 			sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 			return 0;
 		}
@@ -234,43 +234,36 @@
 	return -EADDRNOTAVAIL;
 }
 
-static struct inet6_dev *ip6_mc_find_dev(struct net *net,
-					 struct in6_addr *group,
-					 int ifindex)
+/* called with rcu_read_lock() */
+static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
+					     struct in6_addr *group,
+					     int ifindex)
 {
 	struct net_device *dev = NULL;
 	struct inet6_dev *idev = NULL;
 
 	if (ifindex == 0) {
-		struct rt6_info *rt;
+		struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
 
-		rt = rt6_lookup(net, group, NULL, 0, 0);
 		if (rt) {
 			dev = rt->rt6i_dev;
 			dev_hold(dev);
-			dst_release(&rt->u.dst);
+			dst_release(&rt->dst);
 		}
 	} else
-		dev = dev_get_by_index(net, ifindex);
+		dev = dev_get_by_index_rcu(net, ifindex);
 
 	if (!dev)
-		goto nodev;
-	idev = in6_dev_get(dev);
+		return NULL;
+	idev = __in6_dev_get(dev);
 	if (!idev)
-		goto release;
+		return NULL;;
 	read_lock_bh(&idev->lock);
-	if (idev->dead)
-		goto unlock_release;
-
+	if (idev->dead) {
+		read_unlock_bh(&idev->lock);
+		return NULL;
+	}
 	return idev;
-
-unlock_release:
-	read_unlock_bh(&idev->lock);
-	in6_dev_put(idev);
-release:
-	dev_put(dev);
-nodev:
-	return NULL;
 }
 
 void ipv6_sock_mc_close(struct sock *sk)
@@ -286,19 +279,17 @@
 		np->ipv6_mc_list = mc_lst->next;
 		write_unlock_bh(&ipv6_sk_mc_lock);
 
-		dev = dev_get_by_index(net, mc_lst->ifindex);
+		rcu_read_lock();
+		dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
 		if (dev) {
-			struct inet6_dev *idev = in6_dev_get(dev);
+			struct inet6_dev *idev = __in6_dev_get(dev);
 
 			(void) ip6_mc_leave_src(sk, mc_lst, idev);
-			if (idev) {
+			if (idev)
 				__ipv6_dev_mc_dec(idev, &mc_lst->addr);
-				in6_dev_put(idev);
-			}
-			dev_put(dev);
 		} else
 			(void) ip6_mc_leave_src(sk, mc_lst, NULL);
-
+		rcu_read_unlock();
 		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 
 		write_lock_bh(&ipv6_sk_mc_lock);
@@ -327,14 +318,17 @@
 	if (!ipv6_addr_is_multicast(group))
 		return -EINVAL;
 
-	idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface);
-	if (!idev)
+	rcu_read_lock();
+	idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
+	if (!idev) {
+		rcu_read_unlock();
 		return -ENODEV;
+	}
 	dev = idev->dev;
 
 	err = -EADDRNOTAVAIL;
 
-	read_lock_bh(&ipv6_sk_mc_lock);
+	read_lock(&ipv6_sk_mc_lock);
 	for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
 		if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
 			continue;
@@ -358,7 +352,7 @@
 		pmc->sfmode = omode;
 	}
 
-	write_lock_bh(&pmc->sflock);
+	write_lock(&pmc->sflock);
 	pmclocked = 1;
 
 	psl = pmc->sflist;
@@ -433,11 +427,10 @@
 	ip6_mc_add_src(idev, group, omode, 1, source, 1);
 done:
 	if (pmclocked)
-		write_unlock_bh(&pmc->sflock);
-	read_unlock_bh(&ipv6_sk_mc_lock);
+		write_unlock(&pmc->sflock);
+	read_unlock(&ipv6_sk_mc_lock);
 	read_unlock_bh(&idev->lock);
-	in6_dev_put(idev);
-	dev_put(dev);
+	rcu_read_unlock();
 	if (leavegroup)
 		return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
 	return err;
@@ -463,14 +456,17 @@
 	    gsf->gf_fmode != MCAST_EXCLUDE)
 		return -EINVAL;
 
-	idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
+	rcu_read_lock();
+	idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
 
-	if (!idev)
+	if (!idev) {
+		rcu_read_unlock();
 		return -ENODEV;
+	}
 	dev = idev->dev;
 
 	err = 0;
-	read_lock_bh(&ipv6_sk_mc_lock);
+	read_lock(&ipv6_sk_mc_lock);
 
 	if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
 		leavegroup = 1;
@@ -512,7 +508,7 @@
 		(void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
 	}
 
-	write_lock_bh(&pmc->sflock);
+	write_lock(&pmc->sflock);
 	psl = pmc->sflist;
 	if (psl) {
 		(void) ip6_mc_del_src(idev, group, pmc->sfmode,
@@ -522,13 +518,12 @@
 		(void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
 	pmc->sflist = newpsl;
 	pmc->sfmode = gsf->gf_fmode;
-	write_unlock_bh(&pmc->sflock);
+	write_unlock(&pmc->sflock);
 	err = 0;
 done:
-	read_unlock_bh(&ipv6_sk_mc_lock);
+	read_unlock(&ipv6_sk_mc_lock);
 	read_unlock_bh(&idev->lock);
-	in6_dev_put(idev);
-	dev_put(dev);
+	rcu_read_unlock();
 	if (leavegroup)
 		err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
 	return err;
@@ -551,11 +546,13 @@
 	if (!ipv6_addr_is_multicast(group))
 		return -EINVAL;
 
-	idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
+	rcu_read_lock();
+	idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
 
-	if (!idev)
+	if (!idev) {
+		rcu_read_unlock();
 		return -ENODEV;
-
+	}
 	dev = idev->dev;
 
 	err = -EADDRNOTAVAIL;
@@ -577,8 +574,7 @@
 	psl = pmc->sflist;
 	count = psl ? psl->sl_count : 0;
 	read_unlock_bh(&idev->lock);
-	in6_dev_put(idev);
-	dev_put(dev);
+	rcu_read_unlock();
 
 	copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
 	gsf->gf_numsrc = count;
@@ -604,8 +600,7 @@
 	return 0;
 done:
 	read_unlock_bh(&idev->lock);
-	in6_dev_put(idev);
-	dev_put(dev);
+	rcu_read_unlock();
 	return err;
 }
 
@@ -822,6 +817,7 @@
 	struct ifmcaddr6 *mc;
 	struct inet6_dev *idev;
 
+	/* we need to take a reference on idev */
 	idev = in6_dev_get(dev);
 
 	if (idev == NULL)
@@ -860,7 +856,7 @@
 	setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
 
 	ipv6_addr_copy(&mc->mca_addr, addr);
-	mc->idev = idev;
+	mc->idev = idev; /* (reference taken) */
 	mc->mca_users = 1;
 	/* mca_stamp should be updated upon changes */
 	mc->mca_cstamp = mc->mca_tstamp = jiffies;
@@ -915,16 +911,18 @@
 
 int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
 {
-	struct inet6_dev *idev = in6_dev_get(dev);
+	struct inet6_dev *idev;
 	int err;
 
+	rcu_read_lock();
+
+	idev = __in6_dev_get(dev);
 	if (!idev)
-		return -ENODEV;
+		err = -ENODEV;
+	else
+		err = __ipv6_dev_mc_dec(idev, addr);
 
-	err = __ipv6_dev_mc_dec(idev, addr);
-
-	in6_dev_put(idev);
-
+	rcu_read_unlock();
 	return err;
 }
 
@@ -965,7 +963,8 @@
 	struct ifmcaddr6 *mc;
 	int rv = 0;
 
-	idev = in6_dev_get(dev);
+	rcu_read_lock();
+	idev = __in6_dev_get(dev);
 	if (idev) {
 		read_lock_bh(&idev->lock);
 		for (mc = idev->mc_list; mc; mc=mc->next) {
@@ -992,8 +991,8 @@
 				rv = 1; /* don't filter unspecified source */
 		}
 		read_unlock_bh(&idev->lock);
-		in6_dev_put(idev);
 	}
+	rcu_read_unlock();
 	return rv;
 }
 
@@ -1104,6 +1103,7 @@
 	return 1;
 }
 
+/* called with rcu_read_lock() */
 int igmp6_event_query(struct sk_buff *skb)
 {
 	struct mld2_query *mlh2 = NULL;
@@ -1127,7 +1127,7 @@
 	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
 		return -EINVAL;
 
-	idev = in6_dev_get(skb->dev);
+	idev = __in6_dev_get(skb->dev);
 
 	if (idev == NULL)
 		return 0;
@@ -1137,10 +1137,8 @@
 	group_type = ipv6_addr_type(group);
 
 	if (group_type != IPV6_ADDR_ANY &&
-	    !(group_type&IPV6_ADDR_MULTICAST)) {
-		in6_dev_put(idev);
+	    !(group_type&IPV6_ADDR_MULTICAST))
 		return -EINVAL;
-	}
 
 	if (len == 24) {
 		int switchback;
@@ -1161,10 +1159,9 @@
 	} else if (len >= 28) {
 		int srcs_offset = sizeof(struct mld2_query) -
 				  sizeof(struct icmp6hdr);
-		if (!pskb_may_pull(skb, srcs_offset)) {
-			in6_dev_put(idev);
+		if (!pskb_may_pull(skb, srcs_offset))
 			return -EINVAL;
-		}
+
 		mlh2 = (struct mld2_query *)skb_transport_header(skb);
 		max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
 		if (!max_delay)
@@ -1173,28 +1170,23 @@
 		if (mlh2->mld2q_qrv)
 			idev->mc_qrv = mlh2->mld2q_qrv;
 		if (group_type == IPV6_ADDR_ANY) { /* general query */
-			if (mlh2->mld2q_nsrcs) {
-				in6_dev_put(idev);
+			if (mlh2->mld2q_nsrcs)
 				return -EINVAL; /* no sources allowed */
-			}
+
 			mld_gq_start_timer(idev);
-			in6_dev_put(idev);
 			return 0;
 		}
 		/* mark sources to include, if group & source-specific */
 		if (mlh2->mld2q_nsrcs != 0) {
 			if (!pskb_may_pull(skb, srcs_offset +
-			    ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) {
-				in6_dev_put(idev);
+			    ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
 				return -EINVAL;
-			}
+
 			mlh2 = (struct mld2_query *)skb_transport_header(skb);
 			mark = 1;
 		}
-	} else {
-		in6_dev_put(idev);
+	} else
 		return -EINVAL;
-	}
 
 	read_lock_bh(&idev->lock);
 	if (group_type == IPV6_ADDR_ANY) {
@@ -1227,12 +1219,11 @@
 		}
 	}
 	read_unlock_bh(&idev->lock);
-	in6_dev_put(idev);
 
 	return 0;
 }
 
-
+/* called with rcu_read_lock() */
 int igmp6_event_report(struct sk_buff *skb)
 {
 	struct ifmcaddr6 *ma;
@@ -1260,7 +1251,7 @@
 	    !(addr_type&IPV6_ADDR_LINKLOCAL))
 		return -EINVAL;
 
-	idev = in6_dev_get(skb->dev);
+	idev = __in6_dev_get(skb->dev);
 	if (idev == NULL)
 		return -ENODEV;
 
@@ -1280,7 +1271,6 @@
 		}
 	}
 	read_unlock_bh(&idev->lock);
-	in6_dev_put(idev);
 	return 0;
 }
 
@@ -1396,12 +1386,14 @@
 	struct mld2_report *pmr =
 			      (struct mld2_report *)skb_transport_header(skb);
 	int payload_len, mldlen;
-	struct inet6_dev *idev = in6_dev_get(skb->dev);
+	struct inet6_dev *idev;
 	struct net *net = dev_net(skb->dev);
 	int err;
 	struct flowi fl;
 	struct dst_entry *dst;
 
+	rcu_read_lock();
+	idev = __in6_dev_get(skb->dev);
 	IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
 
 	payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
@@ -1441,8 +1433,7 @@
 	} else
 		IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
 
-	if (likely(idev != NULL))
-		in6_dev_put(idev);
+	rcu_read_unlock();
 	return;
 
 err_out:
@@ -1779,7 +1770,8 @@
 					 IPPROTO_ICMPV6,
 					 csum_partial(hdr, len, 0));
 
-	idev = in6_dev_get(skb->dev);
+	rcu_read_lock();
+	idev = __in6_dev_get(skb->dev);
 
 	dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
 	if (!dst) {
@@ -1806,8 +1798,7 @@
 	} else
 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
 
-	if (likely(idev != NULL))
-		in6_dev_put(idev);
+	rcu_read_unlock();
 	return;
 
 err_out:
@@ -1998,8 +1989,7 @@
 				    &psf->sf_addr))
 					break;
 			if (!dpsf) {
-				dpsf = (struct ip6_sf_list *)
-					kmalloc(sizeof(*dpsf), GFP_ATOMIC);
+				dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
 				if (!dpsf)
 					continue;
 				*dpsf = *psf;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2efef52..58841c4 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1229,7 +1229,7 @@
 			ND_PRINTK0(KERN_ERR
 				   "ICMPv6 RA: %s() got default router without neighbour.\n",
 				   __func__);
-			dst_release(&rt->u.dst);
+			dst_release(&rt->dst);
 			in6_dev_put(in6_dev);
 			return;
 		}
@@ -1244,7 +1244,7 @@
 	if (ra_msg->icmph.icmp6_hop_limit) {
 		in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
 		if (rt)
-			rt->u.dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit;
+			rt->dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit;
 	}
 
 skip_defrtr:
@@ -1363,7 +1363,7 @@
 			in6_dev->cnf.mtu6 = mtu;
 
 			if (rt)
-				rt->u.dst.metrics[RTAX_MTU-1] = mtu;
+				rt->dst.metrics[RTAX_MTU-1] = mtu;
 
 			rt6_mtu_change(skb->dev, mtu);
 		}
@@ -1384,7 +1384,7 @@
 	}
 out:
 	if (rt)
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 	else if (neigh)
 		neigh_release(neigh);
 	in6_dev_put(in6_dev);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index a74951c..7155b24 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -151,9 +151,7 @@
 							 protocol,
 							 csum_sub(0, hsum)));
 		skb->ip_summed = CHECKSUM_NONE;
-		csum = __skb_checksum_complete_head(skb, dataoff + len);
-		if (!csum)
-			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		return __skb_checksum_complete_head(skb, dataoff + len);
 	}
 	return csum;
 };
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 8c20174..413ab07 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -43,7 +43,7 @@
 
 static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
 static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
-static DEFINE_RWLOCK(queue_lock);
+static DEFINE_SPINLOCK(queue_lock);
 static int peer_pid __read_mostly;
 static unsigned int copy_range __read_mostly;
 static unsigned int queue_total;
@@ -73,10 +73,10 @@
 		break;
 
 	case IPQ_COPY_PACKET:
-		copy_mode = mode;
+		if (range > 0xFFFF)
+			range = 0xFFFF;
 		copy_range = range;
-		if (copy_range > 0xFFFF)
-			copy_range = 0xFFFF;
+		copy_mode = mode;
 		break;
 
 	default:
@@ -102,7 +102,7 @@
 {
 	struct nf_queue_entry *entry = NULL, *i;
 
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 
 	list_for_each_entry(i, &queue_list, list) {
 		if ((unsigned long)i == id) {
@@ -116,7 +116,7 @@
 		queue_total--;
 	}
 
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return entry;
 }
 
@@ -137,9 +137,9 @@
 static void
 ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
 {
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 	__ipq_flush(cmpfn, data);
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 }
 
 static struct sk_buff *
@@ -153,9 +153,7 @@
 	struct nlmsghdr *nlh;
 	struct timeval tv;
 
-	read_lock_bh(&queue_lock);
-
-	switch (copy_mode) {
+	switch (ACCESS_ONCE(copy_mode)) {
 	case IPQ_COPY_META:
 	case IPQ_COPY_NONE:
 		size = NLMSG_SPACE(sizeof(*pmsg));
@@ -163,26 +161,21 @@
 
 	case IPQ_COPY_PACKET:
 		if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
-		    (*errp = skb_checksum_help(entry->skb))) {
-			read_unlock_bh(&queue_lock);
+		    (*errp = skb_checksum_help(entry->skb)))
 			return NULL;
-		}
-		if (copy_range == 0 || copy_range > entry->skb->len)
+
+		data_len = ACCESS_ONCE(copy_range);
+		if (data_len == 0 || data_len > entry->skb->len)
 			data_len = entry->skb->len;
-		else
-			data_len = copy_range;
 
 		size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
 		break;
 
 	default:
 		*errp = -EINVAL;
-		read_unlock_bh(&queue_lock);
 		return NULL;
 	}
 
-	read_unlock_bh(&queue_lock);
-
 	skb = alloc_skb(size, GFP_ATOMIC);
 	if (!skb)
 		goto nlmsg_failure;
@@ -242,7 +235,7 @@
 	if (nskb == NULL)
 		return status;
 
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 
 	if (!peer_pid)
 		goto err_out_free_nskb;
@@ -266,14 +259,14 @@
 
 	__ipq_enqueue_entry(entry);
 
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return status;
 
 err_out_free_nskb:
 	kfree_skb(nskb);
 
 err_out_unlock:
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return status;
 }
 
@@ -342,9 +335,9 @@
 {
 	int status;
 
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 	status = __ipq_set_mode(mode, range);
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return status;
 }
 
@@ -441,11 +434,11 @@
 	if (security_netlink_recv(skb, CAP_NET_ADMIN))
 		RCV_SKB_FAIL(-EPERM);
 
-	write_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 
 	if (peer_pid) {
 		if (peer_pid != pid) {
-			write_unlock_bh(&queue_lock);
+			spin_unlock_bh(&queue_lock);
 			RCV_SKB_FAIL(-EBUSY);
 		}
 	} else {
@@ -453,7 +446,7 @@
 		peer_pid = pid;
 	}
 
-	write_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 
 	status = ipq_receive_peer(NLMSG_DATA(nlh), type,
 				  nlmsglen - NLMSG_LENGTH(0));
@@ -498,10 +491,10 @@
 	struct netlink_notify *n = ptr;
 
 	if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
-		write_lock_bh(&queue_lock);
+		spin_lock_bh(&queue_lock);
 		if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
 			__ipq_reset();
-		write_unlock_bh(&queue_lock);
+		spin_unlock_bh(&queue_lock);
 	}
 	return NOTIFY_DONE;
 }
@@ -528,7 +521,7 @@
 #ifdef CONFIG_PROC_FS
 static int ip6_queue_show(struct seq_file *m, void *v)
 {
-	read_lock_bh(&queue_lock);
+	spin_lock_bh(&queue_lock);
 
 	seq_printf(m,
 		      "Peer PID          : %d\n"
@@ -546,7 +539,7 @@
 		      queue_dropped,
 		      queue_user_dropped);
 
-	read_unlock_bh(&queue_lock);
+	spin_unlock_bh(&queue_lock);
 	return 0;
 }
 
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9d2d68f..dc41d6d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -943,7 +943,7 @@
 	   (other than comefrom, which userspace doesn't care
 	   about). */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc_node(countersize, numa_node_id());
+	counters = vmalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1213,8 +1213,7 @@
 	struct ip6t_entry *iter;
 
 	ret = 0;
-	counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
-				numa_node_id());
+	counters = vmalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
@@ -1368,7 +1367,7 @@
 	if (len != size + num_counters * sizeof(struct xt_counters))
 		return -EINVAL;
 
-	paddc = vmalloc_node(len - size, numa_node_id());
+	paddc = vmalloc(len - size);
 	if (!paddc)
 		return -ENOMEM;
 
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index af4ee11..0a07ae7 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -373,6 +373,56 @@
 		printk("MARK=0x%x ", skb->mark);
 }
 
+static void dump_mac_header(const struct nf_loginfo *info,
+			    const struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+	unsigned int logflags = 0;
+
+	if (info->type == NF_LOG_TYPE_LOG)
+		logflags = info->u.log.logflags;
+
+	if (!(logflags & IP6T_LOG_MACDECODE))
+		goto fallback;
+
+	switch (dev->type) {
+	case ARPHRD_ETHER:
+		printk("MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+		       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+		       ntohs(eth_hdr(skb)->h_proto));
+		return;
+	default:
+		break;
+	}
+
+fallback:
+	printk("MAC=");
+	if (dev->hard_header_len &&
+	    skb->mac_header != skb->network_header) {
+		const unsigned char *p = skb_mac_header(skb);
+		unsigned int len = dev->hard_header_len;
+		unsigned int i;
+
+		if (dev->type == ARPHRD_SIT &&
+		    (p -= ETH_HLEN) < skb->head)
+			p = NULL;
+
+		if (p != NULL) {
+			printk("%02x", *p++);
+			for (i = 1; i < len; i++)
+				printk(":%02x", p[i]);
+		}
+		printk(" ");
+
+		if (dev->type == ARPHRD_SIT) {
+			const struct iphdr *iph =
+				(struct iphdr *)skb_mac_header(skb);
+			printk("TUNNEL=%pI4->%pI4 ", &iph->saddr, &iph->daddr);
+		}
+	} else
+		printk(" ");
+}
+
 static struct nf_loginfo default_loginfo = {
 	.type	= NF_LOG_TYPE_LOG,
 	.u = {
@@ -400,35 +450,10 @@
 		prefix,
 		in ? in->name : "",
 		out ? out->name : "");
-	if (in && !out) {
-		unsigned int len;
-		/* MAC logging for input chain only. */
-		printk("MAC=");
-		if (skb->dev && (len = skb->dev->hard_header_len) &&
-		    skb->mac_header != skb->network_header) {
-			const unsigned char *p = skb_mac_header(skb);
-			int i;
 
-			if (skb->dev->type == ARPHRD_SIT &&
-			    (p -= ETH_HLEN) < skb->head)
-				p = NULL;
-
-			if (p != NULL) {
-				for (i = 0; i < len; i++)
-					printk("%02x%s", p[i],
-					       i == len - 1 ? "" : ":");
-			}
-			printk(" ");
-
-			if (skb->dev->type == ARPHRD_SIT) {
-				const struct iphdr *iph =
-					(struct iphdr *)skb_mac_header(skb);
-				printk("TUNNEL=%pI4->%pI4 ",
-				       &iph->saddr, &iph->daddr);
-			}
-		} else
-			printk(" ");
-	}
+	/* MAC logging for input path only. */
+	if (in && !out)
+		dump_mac_header(loginfo, skb);
 
 	dump_packet(loginfo, skb, skb_network_offset(skb), 1);
 	printk("\n");
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 9be8177..1df3c8b 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -208,7 +208,7 @@
 	type = icmp6h->icmp6_type - 130;
 	if (type >= 0 && type < sizeof(noct_valid_new) &&
 	    noct_valid_new[type]) {
-		skb->nfct = &nf_conntrack_untracked.ct_general;
+		skb->nfct = &nf_ct_untracked_get()->ct_general;
 		skb->nfctinfo = IP_CT_NEW;
 		nf_conntrack_get(skb->nfct);
 		return NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6fb8901..9254008 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -114,10 +114,8 @@
 }
 
 /* Memory Tracking Functions. */
-static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work)
+static void frag_kfree_skb(struct sk_buff *skb)
 {
-	if (work)
-		*work -= skb->truesize;
 	atomic_sub(skb->truesize, &nf_init_frags.mem);
 	nf_skb_free(skb);
 	kfree_skb(skb);
@@ -335,7 +333,7 @@
 				fq->q.fragments = next;
 
 			fq->q.meat -= free_it->len;
-			frag_kfree_skb(free_it, NULL);
+			frag_kfree_skb(free_it);
 		}
 	}
 
@@ -442,7 +440,6 @@
 	skb_shinfo(head)->frag_list = head->next;
 	skb_reset_transport_header(head);
 	skb_push(head, head->data - skb_network_header(head));
-	atomic_sub(head->truesize, &nf_init_frags.mem);
 
 	for (fp=head->next; fp; fp = fp->next) {
 		head->data_len += fp->len;
@@ -452,8 +449,8 @@
 		else if (head->ip_summed == CHECKSUM_COMPLETE)
 			head->csum = csum_add(head->csum, fp->csum);
 		head->truesize += fp->truesize;
-		atomic_sub(fp->truesize, &nf_init_frags.mem);
 	}
+	atomic_sub(head->truesize, &nf_init_frags.mem);
 
 	head->next = NULL;
 	head->dev = dev;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 566798d..d082eae 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -174,17 +174,28 @@
 				const struct snmp_mib *itemlist)
 {
 	int i;
-	for (i=0; itemlist[i].name; i++)
+
+	for (i = 0; itemlist[i].name; i++)
 		seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
 			   snmp_fold_field(mib, itemlist[i].entry));
 }
 
+static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib,
+				  const struct snmp_mib *itemlist, size_t syncpoff)
+{
+	int i;
+
+	for (i = 0; itemlist[i].name; i++)
+		seq_printf(seq, "%-32s\t%llu\n", itemlist[i].name,
+			   snmp_fold_field64(mib, itemlist[i].entry, syncpoff));
+}
+
 static int snmp6_seq_show(struct seq_file *seq, void *v)
 {
 	struct net *net = (struct net *)seq->private;
 
-	snmp6_seq_show_item(seq, (void __percpu **)net->mib.ipv6_statistics,
-			    snmp6_ipstats_list);
+	snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics,
+			    snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
 	snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
 			    snmp6_icmp6_list);
 	snmp6_seq_show_icmpv6msg(seq,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4a4dcbe..e677937 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -602,31 +602,33 @@
 }
 
 static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
-			struct flowi *fl, struct rt6_info *rt,
+			struct flowi *fl, struct dst_entry **dstp,
 			unsigned int flags)
 {
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct ipv6hdr *iph;
 	struct sk_buff *skb;
 	int err;
+	struct rt6_info *rt = (struct rt6_info *)*dstp;
 
-	if (length > rt->u.dst.dev->mtu) {
-		ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu);
+	if (length > rt->dst.dev->mtu) {
+		ipv6_local_error(sk, EMSGSIZE, fl, rt->dst.dev->mtu);
 		return -EMSGSIZE;
 	}
 	if (flags&MSG_PROBE)
 		goto out;
 
 	skb = sock_alloc_send_skb(sk,
-				  length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
+				  length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
 				  flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto error;
-	skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));
+	skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
 
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
-	skb_dst_set(skb, dst_clone(&rt->u.dst));
+	skb_dst_set(skb, &rt->dst);
+	*dstp = NULL;
 
 	skb_put(skb, length);
 	skb_reset_network_header(skb);
@@ -641,7 +643,7 @@
 
 	IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
-		      rt->u.dst.dev, dst_output);
+		      rt->dst.dev, dst_output);
 	if (err > 0)
 		err = net_xmit_errno(err);
 	if (err)
@@ -725,7 +727,7 @@
 {
 	struct ipv6_txoptions opt_space;
 	struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
-	struct in6_addr *daddr, *final_p = NULL, final;
+	struct in6_addr *daddr, *final_p, final;
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct raw6_sock *rp = raw6_sk(sk);
@@ -847,13 +849,7 @@
 	if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
 		ipv6_addr_copy(&fl.fl6_src, &np->saddr);
 
-	/* merge ip6_build_xmit from ip6_output */
-	if (opt && opt->srcrt) {
-		struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
-		ipv6_addr_copy(&final, &fl.fl6_dst);
-		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-		final_p = &final;
-	}
+	final_p = fl6_update_dst(&fl, opt, &final);
 
 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
 		fl.oif = np->mcast_oif;
@@ -892,9 +888,9 @@
 		goto do_confirm;
 
 back_from_confirm:
-	if (inet->hdrincl) {
-		err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags);
-	} else {
+	if (inet->hdrincl)
+		err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, &dst, msg->msg_flags);
+	else {
 		lock_sock(sk);
 		err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
 			len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 6d4292f..545c414 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -150,11 +150,8 @@
 EXPORT_SYMBOL(ip6_frag_match);
 
 /* Memory Tracking Functions. */
-static inline void frag_kfree_skb(struct netns_frags *nf,
-		struct sk_buff *skb, int *work)
+static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
 {
-	if (work)
-		*work -= skb->truesize;
 	atomic_sub(skb->truesize, &nf->mem);
 	kfree_skb(skb);
 }
@@ -336,6 +333,11 @@
 	 * in the chain of fragments so far.  We must know where to put
 	 * this fragment, right?
 	 */
+	prev = fq->q.fragments_tail;
+	if (!prev || FRAG6_CB(prev)->offset < offset) {
+		next = NULL;
+		goto found;
+	}
 	prev = NULL;
 	for(next = fq->q.fragments; next != NULL; next = next->next) {
 		if (FRAG6_CB(next)->offset >= offset)
@@ -343,6 +345,7 @@
 		prev = next;
 	}
 
+found:
 	/* We found where to put this one.  Check for overlap with
 	 * preceding fragment, and, if needed, align things so that
 	 * any overlaps are eliminated.
@@ -392,7 +395,7 @@
 				fq->q.fragments = next;
 
 			fq->q.meat -= free_it->len;
-			frag_kfree_skb(fq->q.net, free_it, NULL);
+			frag_kfree_skb(fq->q.net, free_it);
 		}
 	}
 
@@ -400,6 +403,8 @@
 
 	/* Insert this fragment in the chain of fragments. */
 	skb->next = next;
+	if (!next)
+		fq->q.fragments_tail = skb;
 	if (prev)
 		prev->next = skb;
 	else
@@ -466,6 +471,8 @@
 			goto out_oom;
 
 		fp->next = head->next;
+		if (!fp->next)
+			fq->q.fragments_tail = fp;
 		prev->next = fp;
 
 		skb_morph(head, fq->q.fragments);
@@ -524,7 +531,6 @@
 	skb_shinfo(head)->frag_list = head->next;
 	skb_reset_transport_header(head);
 	skb_push(head, head->data - skb_network_header(head));
-	atomic_sub(head->truesize, &fq->q.net->mem);
 
 	for (fp=head->next; fp; fp = fp->next) {
 		head->data_len += fp->len;
@@ -534,8 +540,8 @@
 		else if (head->ip_summed == CHECKSUM_COMPLETE)
 			head->csum = csum_add(head->csum, fp->csum);
 		head->truesize += fp->truesize;
-		atomic_sub(fp->truesize, &fq->q.net->mem);
 	}
+	atomic_sub(head->truesize, &fq->q.net->mem);
 
 	head->next = NULL;
 	head->dev = dev;
@@ -553,6 +559,7 @@
 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
 	rcu_read_unlock();
 	fq->q.fragments = NULL;
+	fq->q.fragments_tail = NULL;
 	return 1;
 
 out_oversize:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 252d761..8f2d040 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -126,16 +126,14 @@
 };
 
 static struct rt6_info ip6_null_entry_template = {
-	.u = {
-		.dst = {
-			.__refcnt	= ATOMIC_INIT(1),
-			.__use		= 1,
-			.obsolete	= -1,
-			.error		= -ENETUNREACH,
-			.metrics	= { [RTAX_HOPLIMIT - 1] = 255, },
-			.input		= ip6_pkt_discard,
-			.output		= ip6_pkt_discard_out,
-		}
+	.dst = {
+		.__refcnt	= ATOMIC_INIT(1),
+		.__use		= 1,
+		.obsolete	= -1,
+		.error		= -ENETUNREACH,
+		.metrics	= { [RTAX_HOPLIMIT - 1] = 255, },
+		.input		= ip6_pkt_discard,
+		.output		= ip6_pkt_discard_out,
 	},
 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
 	.rt6i_protocol  = RTPROT_KERNEL,
@@ -149,16 +147,14 @@
 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
 
 static struct rt6_info ip6_prohibit_entry_template = {
-	.u = {
-		.dst = {
-			.__refcnt	= ATOMIC_INIT(1),
-			.__use		= 1,
-			.obsolete	= -1,
-			.error		= -EACCES,
-			.metrics	= { [RTAX_HOPLIMIT - 1] = 255, },
-			.input		= ip6_pkt_prohibit,
-			.output		= ip6_pkt_prohibit_out,
-		}
+	.dst = {
+		.__refcnt	= ATOMIC_INIT(1),
+		.__use		= 1,
+		.obsolete	= -1,
+		.error		= -EACCES,
+		.metrics	= { [RTAX_HOPLIMIT - 1] = 255, },
+		.input		= ip6_pkt_prohibit,
+		.output		= ip6_pkt_prohibit_out,
 	},
 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
 	.rt6i_protocol  = RTPROT_KERNEL,
@@ -167,16 +163,14 @@
 };
 
 static struct rt6_info ip6_blk_hole_entry_template = {
-	.u = {
-		.dst = {
-			.__refcnt	= ATOMIC_INIT(1),
-			.__use		= 1,
-			.obsolete	= -1,
-			.error		= -EINVAL,
-			.metrics	= { [RTAX_HOPLIMIT - 1] = 255, },
-			.input		= dst_discard,
-			.output		= dst_discard,
-		}
+	.dst = {
+		.__refcnt	= ATOMIC_INIT(1),
+		.__use		= 1,
+		.obsolete	= -1,
+		.error		= -EINVAL,
+		.metrics	= { [RTAX_HOPLIMIT - 1] = 255, },
+		.input		= dst_discard,
+		.output		= dst_discard,
 	},
 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
 	.rt6i_protocol  = RTPROT_KERNEL,
@@ -249,7 +243,7 @@
 	if (!oif && ipv6_addr_any(saddr))
 		goto out;
 
-	for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
+	for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
 		struct net_device *dev = sprt->rt6i_dev;
 
 		if (oif) {
@@ -407,10 +401,10 @@
 
 	match = NULL;
 	for (rt = rr_head; rt && rt->rt6i_metric == metric;
-	     rt = rt->u.dst.rt6_next)
+	     rt = rt->dst.rt6_next)
 		match = find_match(rt, oif, strict, &mpri, match);
 	for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
-	     rt = rt->u.dst.rt6_next)
+	     rt = rt->dst.rt6_next)
 		match = find_match(rt, oif, strict, &mpri, match);
 
 	return match;
@@ -432,7 +426,7 @@
 
 	if (!match &&
 	    (strict & RT6_LOOKUP_F_REACHABLE)) {
-		struct rt6_info *next = rt0->u.dst.rt6_next;
+		struct rt6_info *next = rt0->dst.rt6_next;
 
 		/* no entries matched; do round-robin */
 		if (!next || next->rt6i_metric != rt0->rt6i_metric)
@@ -517,7 +511,7 @@
 			rt->rt6i_expires = jiffies + HZ * lifetime;
 			rt->rt6i_flags |= RTF_EXPIRES;
 		}
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 	}
 	return 0;
 }
@@ -555,7 +549,7 @@
 	rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
 	BACKTRACK(net, &fl->fl6_src);
 out:
-	dst_use(&rt->u.dst, jiffies);
+	dst_use(&rt->dst, jiffies);
 	read_unlock_bh(&table->tb6_lock);
 	return rt;
 
@@ -643,7 +637,7 @@
 		ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
 		rt->rt6i_dst.plen = 128;
 		rt->rt6i_flags |= RTF_CACHE;
-		rt->u.dst.flags |= DST_HOST;
+		rt->dst.flags |= DST_HOST;
 
 #ifdef CONFIG_IPV6_SUBTREES
 		if (rt->rt6i_src.plen && saddr) {
@@ -677,7 +671,7 @@
 			if (net_ratelimit())
 				printk(KERN_WARNING
 				       "Neighbour table overflow.\n");
-			dst_free(&rt->u.dst);
+			dst_free(&rt->dst);
 			return NULL;
 		}
 		rt->rt6i_nexthop = neigh;
@@ -694,7 +688,7 @@
 		ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
 		rt->rt6i_dst.plen = 128;
 		rt->rt6i_flags |= RTF_CACHE;
-		rt->u.dst.flags |= DST_HOST;
+		rt->dst.flags |= DST_HOST;
 		rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
 	}
 	return rt;
@@ -726,7 +720,7 @@
 	    rt->rt6i_flags & RTF_CACHE)
 		goto out;
 
-	dst_hold(&rt->u.dst);
+	dst_hold(&rt->dst);
 	read_unlock_bh(&table->tb6_lock);
 
 	if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
@@ -739,10 +733,10 @@
 #endif
 	}
 
-	dst_release(&rt->u.dst);
+	dst_release(&rt->dst);
 	rt = nrt ? : net->ipv6.ip6_null_entry;
 
-	dst_hold(&rt->u.dst);
+	dst_hold(&rt->dst);
 	if (nrt) {
 		err = ip6_ins_rt(nrt);
 		if (!err)
@@ -756,7 +750,7 @@
 	 * Race condition! In the gap, when table->tb6_lock was
 	 * released someone could insert this route.  Relookup.
 	 */
-	dst_release(&rt->u.dst);
+	dst_release(&rt->dst);
 	goto relookup;
 
 out:
@@ -764,11 +758,11 @@
 		reachable = 0;
 		goto restart_2;
 	}
-	dst_hold(&rt->u.dst);
+	dst_hold(&rt->dst);
 	read_unlock_bh(&table->tb6_lock);
 out2:
-	rt->u.dst.lastuse = jiffies;
-	rt->u.dst.__use++;
+	rt->dst.lastuse = jiffies;
+	rt->dst.__use++;
 
 	return rt;
 }
@@ -835,15 +829,15 @@
 	struct dst_entry *new = NULL;
 
 	if (rt) {
-		new = &rt->u.dst;
+		new = &rt->dst;
 
 		atomic_set(&new->__refcnt, 1);
 		new->__use = 1;
 		new->input = dst_discard;
 		new->output = dst_discard;
 
-		memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
-		new->dev = ort->u.dst.dev;
+		memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+		new->dev = ort->dst.dev;
 		if (new->dev)
 			dev_hold(new->dev);
 		rt->rt6i_idev = ort->rt6i_idev;
@@ -912,7 +906,7 @@
 	rt = (struct rt6_info *) skb_dst(skb);
 	if (rt) {
 		if (rt->rt6i_flags&RTF_CACHE) {
-			dst_set_expires(&rt->u.dst, 0);
+			dst_set_expires(&rt->dst, 0);
 			rt->rt6i_flags |= RTF_EXPIRES;
 		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
 			rt->rt6i_node->fn_sernum = -1;
@@ -986,14 +980,14 @@
 	rt->rt6i_dev	  = dev;
 	rt->rt6i_idev     = idev;
 	rt->rt6i_nexthop  = neigh;
-	atomic_set(&rt->u.dst.__refcnt, 1);
-	rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
-	rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
-	rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
-	rt->u.dst.output  = ip6_output;
+	atomic_set(&rt->dst.__refcnt, 1);
+	rt->dst.metrics[RTAX_HOPLIMIT-1] = 255;
+	rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
+	rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+	rt->dst.output  = ip6_output;
 
 #if 0	/* there's no chance to use these for ndisc */
-	rt->u.dst.flags   = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
+	rt->dst.flags   = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
 				? DST_HOST
 				: 0;
 	ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
@@ -1001,14 +995,14 @@
 #endif
 
 	spin_lock_bh(&icmp6_dst_lock);
-	rt->u.dst.next = icmp6_dst_gc_list;
-	icmp6_dst_gc_list = &rt->u.dst;
+	rt->dst.next = icmp6_dst_gc_list;
+	icmp6_dst_gc_list = &rt->dst;
 	spin_unlock_bh(&icmp6_dst_lock);
 
 	fib6_force_start_gc(net);
 
 out:
-	return &rt->u.dst;
+	return &rt->dst;
 }
 
 int icmp6_dst_gc(void)
@@ -1090,11 +1084,11 @@
 	int mtu = IPV6_MIN_MTU;
 	struct inet6_dev *idev;
 
-	idev = in6_dev_get(dev);
-	if (idev) {
+	rcu_read_lock();
+	idev = __in6_dev_get(dev);
+	if (idev)
 		mtu = idev->cnf.mtu6;
-		in6_dev_put(idev);
-	}
+	rcu_read_unlock();
 	return mtu;
 }
 
@@ -1103,12 +1097,15 @@
 	int hoplimit = dst_metric(dst, RTAX_HOPLIMIT);
 	if (hoplimit < 0) {
 		struct net_device *dev = dst->dev;
-		struct inet6_dev *idev = in6_dev_get(dev);
-		if (idev) {
+		struct inet6_dev *idev;
+
+		rcu_read_lock();
+		idev = __in6_dev_get(dev);
+		if (idev)
 			hoplimit = idev->cnf.hop_limit;
-			in6_dev_put(idev);
-		} else
+		else
 			hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
+		rcu_read_unlock();
 	}
 	return hoplimit;
 }
@@ -1159,7 +1156,7 @@
 		goto out;
 	}
 
-	rt->u.dst.obsolete = -1;
+	rt->dst.obsolete = -1;
 	rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
 				jiffies + clock_t_to_jiffies(cfg->fc_expires) :
 				0;
@@ -1171,16 +1168,16 @@
 	addr_type = ipv6_addr_type(&cfg->fc_dst);
 
 	if (addr_type & IPV6_ADDR_MULTICAST)
-		rt->u.dst.input = ip6_mc_input;
+		rt->dst.input = ip6_mc_input;
 	else
-		rt->u.dst.input = ip6_forward;
+		rt->dst.input = ip6_forward;
 
-	rt->u.dst.output = ip6_output;
+	rt->dst.output = ip6_output;
 
 	ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
 	rt->rt6i_dst.plen = cfg->fc_dst_len;
 	if (rt->rt6i_dst.plen == 128)
-	       rt->u.dst.flags = DST_HOST;
+	       rt->dst.flags = DST_HOST;
 
 #ifdef CONFIG_IPV6_SUBTREES
 	ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1208,9 +1205,9 @@
 				goto out;
 			}
 		}
-		rt->u.dst.output = ip6_pkt_discard_out;
-		rt->u.dst.input = ip6_pkt_discard;
-		rt->u.dst.error = -ENETUNREACH;
+		rt->dst.output = ip6_pkt_discard_out;
+		rt->dst.input = ip6_pkt_discard;
+		rt->dst.error = -ENETUNREACH;
 		rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
 		goto install_route;
 	}
@@ -1244,7 +1241,7 @@
 				goto out;
 			if (dev) {
 				if (dev != grt->rt6i_dev) {
-					dst_release(&grt->u.dst);
+					dst_release(&grt->dst);
 					goto out;
 				}
 			} else {
@@ -1255,7 +1252,7 @@
 			}
 			if (!(grt->rt6i_flags&RTF_GATEWAY))
 				err = 0;
-			dst_release(&grt->u.dst);
+			dst_release(&grt->dst);
 
 			if (err)
 				goto out;
@@ -1294,18 +1291,18 @@
 					goto out;
 				}
 
-				rt->u.dst.metrics[type - 1] = nla_get_u32(nla);
+				rt->dst.metrics[type - 1] = nla_get_u32(nla);
 			}
 		}
 	}
 
-	if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
-		rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
-	if (!dst_mtu(&rt->u.dst))
-		rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
-	if (!dst_metric(&rt->u.dst, RTAX_ADVMSS))
-		rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
-	rt->u.dst.dev = dev;
+	if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
+		rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
+	if (!dst_mtu(&rt->dst))
+		rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
+	if (!dst_metric(&rt->dst, RTAX_ADVMSS))
+		rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+	rt->dst.dev = dev;
 	rt->rt6i_idev = idev;
 	rt->rt6i_table = table;
 
@@ -1319,7 +1316,7 @@
 	if (idev)
 		in6_dev_put(idev);
 	if (rt)
-		dst_free(&rt->u.dst);
+		dst_free(&rt->dst);
 	return err;
 }
 
@@ -1336,7 +1333,7 @@
 	write_lock_bh(&table->tb6_lock);
 
 	err = fib6_del(rt, info);
-	dst_release(&rt->u.dst);
+	dst_release(&rt->dst);
 
 	write_unlock_bh(&table->tb6_lock);
 
@@ -1369,7 +1366,7 @@
 			 &cfg->fc_src, cfg->fc_src_len);
 
 	if (fn) {
-		for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
+		for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
 			if (cfg->fc_ifindex &&
 			    (rt->rt6i_dev == NULL ||
 			     rt->rt6i_dev->ifindex != cfg->fc_ifindex))
@@ -1379,7 +1376,7 @@
 				continue;
 			if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
 				continue;
-			dst_hold(&rt->u.dst);
+			dst_hold(&rt->dst);
 			read_unlock_bh(&table->tb6_lock);
 
 			return __ip6_del_rt(rt, &cfg->fc_nlinfo);
@@ -1421,7 +1418,7 @@
 	read_lock_bh(&table->tb6_lock);
 	fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
 restart:
-	for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
+	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
 		/*
 		 * Current route is on-link; redirect is always invalid.
 		 *
@@ -1445,7 +1442,7 @@
 		rt = net->ipv6.ip6_null_entry;
 	BACKTRACK(net, &fl->fl6_src);
 out:
-	dst_hold(&rt->u.dst);
+	dst_hold(&rt->dst);
 
 	read_unlock_bh(&table->tb6_lock);
 
@@ -1513,10 +1510,10 @@
 	 * Look, redirects are sent only in response to data packets,
 	 * so that this nexthop apparently is reachable. --ANK
 	 */
-	dst_confirm(&rt->u.dst);
+	dst_confirm(&rt->dst);
 
 	/* Duplicate redirect: silently ignore. */
-	if (neigh == rt->u.dst.neighbour)
+	if (neigh == rt->dst.neighbour)
 		goto out;
 
 	nrt = ip6_rt_copy(rt);
@@ -1529,20 +1526,20 @@
 
 	ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
 	nrt->rt6i_dst.plen = 128;
-	nrt->u.dst.flags |= DST_HOST;
+	nrt->dst.flags |= DST_HOST;
 
 	ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
 	nrt->rt6i_nexthop = neigh_clone(neigh);
 	/* Reset pmtu, it may be better */
-	nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
-	nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
-							dst_mtu(&nrt->u.dst));
+	nrt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
+	nrt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
+							dst_mtu(&nrt->dst));
 
 	if (ip6_ins_rt(nrt))
 		goto out;
 
-	netevent.old = &rt->u.dst;
-	netevent.new = &nrt->u.dst;
+	netevent.old = &rt->dst;
+	netevent.new = &nrt->dst;
 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
 
 	if (rt->rt6i_flags&RTF_CACHE) {
@@ -1551,7 +1548,7 @@
 	}
 
 out:
-	dst_release(&rt->u.dst);
+	dst_release(&rt->dst);
 }
 
 /*
@@ -1570,7 +1567,7 @@
 	if (rt == NULL)
 		return;
 
-	if (pmtu >= dst_mtu(&rt->u.dst))
+	if (pmtu >= dst_mtu(&rt->dst))
 		goto out;
 
 	if (pmtu < IPV6_MIN_MTU) {
@@ -1588,7 +1585,7 @@
 	   They are sent only in response to data packets,
 	   so that this nexthop apparently is reachable. --ANK
 	 */
-	dst_confirm(&rt->u.dst);
+	dst_confirm(&rt->dst);
 
 	/* Host route. If it is static, it would be better
 	   not to override it, but add new one, so that
@@ -1596,10 +1593,10 @@
 	   would return automatically.
 	 */
 	if (rt->rt6i_flags & RTF_CACHE) {
-		rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
+		rt->dst.metrics[RTAX_MTU-1] = pmtu;
 		if (allfrag)
-			rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
-		dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
+			rt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+		dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
 		rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
 		goto out;
 	}
@@ -1615,9 +1612,9 @@
 		nrt = rt6_alloc_clone(rt, daddr);
 
 	if (nrt) {
-		nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
+		nrt->dst.metrics[RTAX_MTU-1] = pmtu;
 		if (allfrag)
-			nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+			nrt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
 
 		/* According to RFC 1981, detecting PMTU increase shouldn't be
 		 * happened within 5 mins, the recommended timer is 10 mins.
@@ -1625,13 +1622,13 @@
 		 * which is 10 mins. After 10 mins the decreased pmtu is expired
 		 * and detecting PMTU increase will be automatically happened.
 		 */
-		dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
+		dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
 		nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
 
 		ip6_ins_rt(nrt);
 	}
 out:
-	dst_release(&rt->u.dst);
+	dst_release(&rt->dst);
 }
 
 /*
@@ -1644,18 +1641,18 @@
 	struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
 
 	if (rt) {
-		rt->u.dst.input = ort->u.dst.input;
-		rt->u.dst.output = ort->u.dst.output;
+		rt->dst.input = ort->dst.input;
+		rt->dst.output = ort->dst.output;
 
-		memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
-		rt->u.dst.error = ort->u.dst.error;
-		rt->u.dst.dev = ort->u.dst.dev;
-		if (rt->u.dst.dev)
-			dev_hold(rt->u.dst.dev);
+		memcpy(rt->dst.metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+		rt->dst.error = ort->dst.error;
+		rt->dst.dev = ort->dst.dev;
+		if (rt->dst.dev)
+			dev_hold(rt->dst.dev);
 		rt->rt6i_idev = ort->rt6i_idev;
 		if (rt->rt6i_idev)
 			in6_dev_hold(rt->rt6i_idev);
-		rt->u.dst.lastuse = jiffies;
+		rt->dst.lastuse = jiffies;
 		rt->rt6i_expires = 0;
 
 		ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
@@ -1689,14 +1686,14 @@
 	if (!fn)
 		goto out;
 
-	for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
+	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
 		if (rt->rt6i_dev->ifindex != ifindex)
 			continue;
 		if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
 			continue;
 		if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
 			continue;
-		dst_hold(&rt->u.dst);
+		dst_hold(&rt->dst);
 		break;
 	}
 out:
@@ -1744,14 +1741,14 @@
 		return NULL;
 
 	write_lock_bh(&table->tb6_lock);
-	for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
+	for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
 		if (dev == rt->rt6i_dev &&
 		    ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
 		    ipv6_addr_equal(&rt->rt6i_gateway, addr))
 			break;
 	}
 	if (rt)
-		dst_hold(&rt->u.dst);
+		dst_hold(&rt->dst);
 	write_unlock_bh(&table->tb6_lock);
 	return rt;
 }
@@ -1790,9 +1787,9 @@
 
 restart:
 	read_lock_bh(&table->tb6_lock);
-	for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
+	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
-			dst_hold(&rt->u.dst);
+			dst_hold(&rt->dst);
 			read_unlock_bh(&table->tb6_lock);
 			ip6_del_rt(rt);
 			goto restart;
@@ -1930,15 +1927,15 @@
 	dev_hold(net->loopback_dev);
 	in6_dev_hold(idev);
 
-	rt->u.dst.flags = DST_HOST;
-	rt->u.dst.input = ip6_input;
-	rt->u.dst.output = ip6_output;
+	rt->dst.flags = DST_HOST;
+	rt->dst.input = ip6_input;
+	rt->dst.output = ip6_output;
 	rt->rt6i_dev = net->loopback_dev;
 	rt->rt6i_idev = idev;
-	rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
-	rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
-	rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
-	rt->u.dst.obsolete = -1;
+	rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
+	rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+	rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
+	rt->dst.obsolete = -1;
 
 	rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
 	if (anycast)
@@ -1947,7 +1944,7 @@
 		rt->rt6i_flags |= RTF_LOCAL;
 	neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
 	if (IS_ERR(neigh)) {
-		dst_free(&rt->u.dst);
+		dst_free(&rt->dst);
 
 		/* We are casting this because that is the return
 		 * value type.  But an errno encoded pointer is the
@@ -1962,7 +1959,7 @@
 	rt->rt6i_dst.plen = 128;
 	rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
 
-	atomic_set(&rt->u.dst.__refcnt, 1);
+	atomic_set(&rt->dst.__refcnt, 1);
 
 	return rt;
 }
@@ -2033,12 +2030,12 @@
 	   PMTU discouvery.
 	 */
 	if (rt->rt6i_dev == arg->dev &&
-	    !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
-	    (dst_mtu(&rt->u.dst) >= arg->mtu ||
-	     (dst_mtu(&rt->u.dst) < arg->mtu &&
-	      dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
-		rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
-		rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
+	    !dst_metric_locked(&rt->dst, RTAX_MTU) &&
+	    (dst_mtu(&rt->dst) >= arg->mtu ||
+	     (dst_mtu(&rt->dst) < arg->mtu &&
+	      dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
+		rt->dst.metrics[RTAX_MTU-1] = arg->mtu;
+		rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
 	}
 	return 0;
 }
@@ -2252,20 +2249,20 @@
 #endif
 			NLA_PUT_U32(skb, RTA_IIF, iif);
 	} else if (dst) {
-		struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
+		struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
 		struct in6_addr saddr_buf;
 		if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
 				       dst, 0, &saddr_buf) == 0)
 			NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
 	}
 
-	if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
+	if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
 		goto nla_put_failure;
 
-	if (rt->u.dst.neighbour)
-		NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
+	if (rt->dst.neighbour)
+		NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key);
 
-	if (rt->u.dst.dev)
+	if (rt->dst.dev)
 		NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
 
 	NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
@@ -2277,8 +2274,8 @@
 	else
 		expires = INT_MAX;
 
-	if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
-			       expires, rt->u.dst.error) < 0)
+	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
+			       expires, rt->dst.error) < 0)
 		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
@@ -2364,7 +2361,7 @@
 	skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
 
 	rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
 			    RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
@@ -2416,12 +2413,12 @@
 	struct net *net = dev_net(dev);
 
 	if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
-		net->ipv6.ip6_null_entry->u.dst.dev = dev;
+		net->ipv6.ip6_null_entry->dst.dev = dev;
 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
-		net->ipv6.ip6_prohibit_entry->u.dst.dev = dev;
+		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
-		net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev;
+		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
 #endif
 	}
@@ -2464,8 +2461,8 @@
 		seq_puts(m, "00000000000000000000000000000000");
 	}
 	seq_printf(m, " %08x %08x %08x %08x %8s\n",
-		   rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
-		   rt->u.dst.__use, rt->rt6i_flags,
+		   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
+		   rt->dst.__use, rt->rt6i_flags,
 		   rt->rt6i_dev ? rt->rt6i_dev->name : "");
 	return 0;
 }
@@ -2646,9 +2643,9 @@
 					   GFP_KERNEL);
 	if (!net->ipv6.ip6_null_entry)
 		goto out_ip6_dst_ops;
-	net->ipv6.ip6_null_entry->u.dst.path =
+	net->ipv6.ip6_null_entry->dst.path =
 		(struct dst_entry *)net->ipv6.ip6_null_entry;
-	net->ipv6.ip6_null_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
+	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2656,18 +2653,18 @@
 					       GFP_KERNEL);
 	if (!net->ipv6.ip6_prohibit_entry)
 		goto out_ip6_null_entry;
-	net->ipv6.ip6_prohibit_entry->u.dst.path =
+	net->ipv6.ip6_prohibit_entry->dst.path =
 		(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
-	net->ipv6.ip6_prohibit_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
+	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 
 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
 					       GFP_KERNEL);
 	if (!net->ipv6.ip6_blk_hole_entry)
 		goto out_ip6_prohibit_entry;
-	net->ipv6.ip6_blk_hole_entry->u.dst.path =
+	net->ipv6.ip6_blk_hole_entry->dst.path =
 		(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
-	net->ipv6.ip6_blk_hole_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
+	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 #endif
 
 	net->ipv6.sysctl.flush_delay = 0;
@@ -2742,12 +2739,12 @@
 	/* Registering of the loopback is done before this portion of code,
 	 * the loopback reference in rt6_info will not be taken, do it
 	 * manually for init_net */
-	init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
 	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
-	init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
-	init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
   #endif
 	ret = fib6_init();
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e51e650..4699cd3 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -249,8 +249,6 @@
 	return NULL;
 }
 
-static DEFINE_SPINLOCK(ipip6_prl_lock);
-
 #define for_each_prl_rcu(start)			\
 	for (prl = rcu_dereference(start);	\
 	     prl;				\
@@ -340,7 +338,7 @@
 	if (a->addr == htonl(INADDR_ANY))
 		return -EINVAL;
 
-	spin_lock(&ipip6_prl_lock);
+	ASSERT_RTNL();
 
 	for (p = t->prl; p; p = p->next) {
 		if (p->addr == a->addr) {
@@ -370,7 +368,6 @@
 	t->prl_count++;
 	rcu_assign_pointer(t->prl, p);
 out:
-	spin_unlock(&ipip6_prl_lock);
 	return err;
 }
 
@@ -397,7 +394,7 @@
 	struct ip_tunnel_prl_entry *x, **p;
 	int err = 0;
 
-	spin_lock(&ipip6_prl_lock);
+	ASSERT_RTNL();
 
 	if (a && a->addr != htonl(INADDR_ANY)) {
 		for (p = &t->prl; *p; p = &(*p)->next) {
@@ -419,7 +416,6 @@
 		}
 	}
 out:
-	spin_unlock(&ipip6_prl_lock);
 	return err;
 }
 
@@ -716,7 +712,7 @@
 		stats->tx_carrier_errors++;
 		goto tx_error_icmp;
 	}
-	tdev = rt->u.dst.dev;
+	tdev = rt->dst.dev;
 
 	if (tdev == dev) {
 		ip_rt_put(rt);
@@ -725,7 +721,7 @@
 	}
 
 	if (df) {
-		mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
+		mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
 
 		if (mtu < 68) {
 			stats->collisions++;
@@ -784,7 +780,7 @@
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 	IPCB(skb)->flags = 0;
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/*
 	 *	Push down and install the IPIP header.
@@ -833,7 +829,7 @@
 				    .proto = IPPROTO_IPV6 };
 		struct rtable *rt;
 		if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
-			tdev = rt->u.dst.dev;
+			tdev = rt->dst.dev;
 			ip_rt_put(rt);
 		}
 		dev->flags |= IFF_POINTOPOINT;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 34d1f06..09fd34f 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -27,28 +27,17 @@
 #define COOKIEBITS 24	/* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
 
-/*
- * This table has to be sorted and terminated with (__u16)-1.
- * XXX generate a better table.
- * Unresolved Issues: HIPPI with a 64k MSS is not well supported.
- *
- * Taken directly from ipv4 implementation.
- * Should this list be modified for ipv6 use or is it close enough?
- * rfc 2460 8.3 suggests mss values 20 bytes less than ipv4 counterpart
- */
+/* Table must be sorted. */
 static __u16 const msstab[] = {
-	64 - 1,
-	256 - 1,
-	512 - 1,
-	536 - 1,
-	1024 - 1,
-	1440 - 1,
-	1460 - 1,
-	4312 - 1,
-	(__u16)-1
+	64,
+	512,
+	536,
+	1280 - 60,
+	1480 - 60,
+	1500 - 60,
+	4460 - 60,
+	9000 - 60,
 };
-/* The number doesn't include the -1 terminator */
-#define NUM_MSS (ARRAY_SIZE(msstab) - 1)
 
 /*
  * This (misnamed) value is the age of syncookie which is permitted.
@@ -134,9 +123,11 @@
 
 	tcp_synq_overflow(sk);
 
-	for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
-		;
-	*mssp = msstab[mssind] + 1;
+	for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
+		if (mss >= msstab[mssind])
+			break;
+
+	*mssp = msstab[mssind];
 
 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
 
@@ -154,7 +145,7 @@
 					    th->source, th->dest, seq,
 					    jiffies / (HZ * 60), COUNTER_TRIES);
 
-	return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
+	return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
 }
 
 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
@@ -173,8 +164,9 @@
 	int mss;
 	struct dst_entry *dst;
 	__u8 rcv_wscale;
+	bool ecn_ok;
 
-	if (!sysctl_tcp_syncookies || !th->ack)
+	if (!sysctl_tcp_syncookies || !th->ack || th->rst)
 		goto out;
 
 	if (tcp_synq_no_recent_overflow(sk) ||
@@ -189,8 +181,8 @@
 	memset(&tcp_opt, 0, sizeof(tcp_opt));
 	tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
 
-	if (tcp_opt.saw_tstamp)
-		cookie_check_timestamp(&tcp_opt);
+	if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
+		goto out;
 
 	ret = NULL;
 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
@@ -224,9 +216,8 @@
 
 	req->expires = 0UL;
 	req->retrans = 0;
-	ireq->ecn_ok		= 0;
+	ireq->ecn_ok		= ecn_ok;
 	ireq->snd_wscale	= tcp_opt.snd_wscale;
-	ireq->rcv_wscale	= tcp_opt.rcv_wscale;
 	ireq->sack_ok		= tcp_opt.sack_ok;
 	ireq->wscale_ok		= tcp_opt.wscale_ok;
 	ireq->tstamp_ok		= tcp_opt.saw_tstamp;
@@ -240,17 +231,12 @@
 	 * me if there is a preferred way.
 	 */
 	{
-		struct in6_addr *final_p = NULL, final;
+		struct in6_addr *final_p, final;
 		struct flowi fl;
 		memset(&fl, 0, sizeof(fl));
 		fl.proto = IPPROTO_TCP;
 		ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
-		if (np->opt && np->opt->srcrt) {
-			struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
-			ipv6_addr_copy(&final, &fl.fl6_dst);
-			ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-			final_p = &final;
-		}
+		final_p = fl6_update_dst(&fl, np->opt, &final);
 		ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
 		fl.oif = sk->sk_bound_dev_if;
 		fl.mark = sk->sk_mark;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2b7c3a1..5ebc27e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -129,7 +129,7 @@
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
-	struct in6_addr *saddr = NULL, *final_p = NULL, final;
+	struct in6_addr *saddr = NULL, *final_p, final;
 	struct flowi fl;
 	struct dst_entry *dst;
 	int addr_type;
@@ -250,12 +250,7 @@
 	fl.fl_ip_dport = usin->sin6_port;
 	fl.fl_ip_sport = inet->inet_sport;
 
-	if (np->opt && np->opt->srcrt) {
-		struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
-		ipv6_addr_copy(&final, &fl.fl6_dst);
-		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-		final_p = &final;
-	}
+	final_p = fl6_update_dst(&fl, np->opt, &final);
 
 	security_sk_classify_flow(sk, &fl);
 
@@ -477,7 +472,7 @@
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sk_buff * skb;
 	struct ipv6_txoptions *opt = NULL;
-	struct in6_addr * final_p = NULL, final;
+	struct in6_addr * final_p, final;
 	struct flowi fl;
 	struct dst_entry *dst;
 	int err = -1;
@@ -494,12 +489,7 @@
 	security_req_classify_flow(req, &fl);
 
 	opt = np->opt;
-	if (opt && opt->srcrt) {
-		struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
-		ipv6_addr_copy(&final, &fl.fl6_dst);
-		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-		final_p = &final;
-	}
+	final_p = fl6_update_dst(&fl, opt, &final);
 
 	err = ip6_dst_lookup(sk, &dst, &fl);
 	if (err)
@@ -1167,7 +1157,7 @@
 	}
 
 #ifdef CONFIG_SYN_COOKIES
-	if (!th->rst && !th->syn && th->ack)
+	if (!th->syn)
 		sk = cookie_v6_check(sk, skb);
 #endif
 	return sk;
@@ -1279,13 +1269,10 @@
 	treq = inet6_rsk(req);
 	ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
 	ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
-	if (!want_cookie)
+	if (!want_cookie || tmp_opt.tstamp_ok)
 		TCP_ECN_create_request(req, tcp_hdr(skb));
 
-	if (want_cookie) {
-		isn = cookie_v6_init_sequence(sk, skb, &req->mss);
-		req->cookie_ts = tmp_opt.tstamp_ok;
-	} else if (!isn) {
+	if (!isn) {
 		if (ipv6_opt_accepted(sk, skb) ||
 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1298,8 +1285,12 @@
 		if (!sk->sk_bound_dev_if &&
 		    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
 			treq->iif = inet6_iif(skb);
-
-		isn = tcp_v6_init_sequence(skb);
+		if (!want_cookie) {
+			isn = tcp_v6_init_sequence(skb);
+		} else {
+			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
+			req->cookie_ts = tmp_opt.tstamp_ok;
+		}
 	}
 	tcp_rsk(req)->snt_isn = isn;
 
@@ -1392,18 +1383,13 @@
 		goto out_overflow;
 
 	if (dst == NULL) {
-		struct in6_addr *final_p = NULL, final;
+		struct in6_addr *final_p, final;
 		struct flowi fl;
 
 		memset(&fl, 0, sizeof(fl));
 		fl.proto = IPPROTO_TCP;
 		ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
-		if (opt && opt->srcrt) {
-			struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
-			ipv6_addr_copy(&final, &fl.fl6_dst);
-			ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-			final_p = &final;
-		}
+		final_p = fl6_update_dst(&fl, opt, &final);
 		ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
 		fl.oif = sk->sk_bound_dev_if;
 		fl.mark = sk->sk_mark;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 87be586..1dd1aff 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -927,7 +927,7 @@
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
-	struct in6_addr *daddr, *final_p = NULL, final;
+	struct in6_addr *daddr, *final_p, final;
 	struct ipv6_txoptions *opt = NULL;
 	struct ip6_flowlabel *flowlabel = NULL;
 	struct flowi fl;
@@ -1097,14 +1097,9 @@
 		ipv6_addr_copy(&fl.fl6_src, &np->saddr);
 	fl.fl_ip_sport = inet->inet_sport;
 
-	/* merge ip6_build_xmit from ip6_output */
-	if (opt && opt->srcrt) {
-		struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
-		ipv6_addr_copy(&final, &fl.fl6_dst);
-		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-		final_p = &final;
+	final_p = fl6_update_dst(&fl, opt, &final);
+	if (final_p)
 		connected = 0;
-	}
 
 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
 		fl.oif = np->mcast_oif;
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 47db1d8..285761e 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1853,23 +1853,23 @@
 		   self->remote_credit);
 	seq_printf(seq, "send credit: %d\n",
 		   self->send_credit);
-	seq_printf(seq, "  tx packets: %ld, ",
+	seq_printf(seq, "  tx packets: %lu, ",
 		   self->stats.tx_packets);
-	seq_printf(seq, "rx packets: %ld, ",
+	seq_printf(seq, "rx packets: %lu, ",
 		   self->stats.rx_packets);
-	seq_printf(seq, "tx_queue len: %d ",
+	seq_printf(seq, "tx_queue len: %u ",
 		   skb_queue_len(&self->tx_queue));
-	seq_printf(seq, "rx_queue len: %d\n",
+	seq_printf(seq, "rx_queue len: %u\n",
 		   skb_queue_len(&self->rx_queue));
 	seq_printf(seq, "  tx_sdu_busy: %s, ",
 		   self->tx_sdu_busy? "TRUE":"FALSE");
 	seq_printf(seq, "rx_sdu_busy: %s\n",
 		   self->rx_sdu_busy? "TRUE":"FALSE");
-	seq_printf(seq, "  max_seg_size: %d, ",
+	seq_printf(seq, "  max_seg_size: %u, ",
 		   self->max_seg_size);
-	seq_printf(seq, "tx_max_sdu_size: %d, ",
+	seq_printf(seq, "tx_max_sdu_size: %u, ",
 		   self->tx_max_sdu_size);
-	seq_printf(seq, "rx_max_sdu_size: %d\n",
+	seq_printf(seq, "rx_max_sdu_size: %u\n",
 		   self->rx_max_sdu_size);
 
 	seq_printf(seq, "  Used by (%s)\n\n",
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f28ad2cc..499c045 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1463,7 +1463,7 @@
 	u32 res3;
 	u8  ippollfg;
 	u8  res4[3];
-} __attribute__ ((packed));
+} __packed;
 
 static void iucv_path_pending(struct iucv_irq_data *data)
 {
@@ -1524,7 +1524,7 @@
 	u32 res3;
 	u8  ippollfg;
 	u8  res4[3];
-} __attribute__ ((packed));
+} __packed;
 
 static void iucv_path_complete(struct iucv_irq_data *data)
 {
@@ -1554,7 +1554,7 @@
 	u32 res4;
 	u8  ippollfg;
 	u8  res5[3];
-} __attribute__ ((packed));
+} __packed;
 
 static void iucv_path_severed(struct iucv_irq_data *data)
 {
@@ -1590,7 +1590,7 @@
 	u32 res4;
 	u8  ippollfg;
 	u8  res5[3];
-} __attribute__ ((packed));
+} __packed;
 
 static void iucv_path_quiesced(struct iucv_irq_data *data)
 {
@@ -1618,7 +1618,7 @@
 	u32 res4;
 	u8  ippollfg;
 	u8  res5[3];
-} __attribute__ ((packed));
+} __packed;
 
 static void iucv_path_resumed(struct iucv_irq_data *data)
 {
@@ -1649,7 +1649,7 @@
 	u32 ipbfln2f;
 	u8  ippollfg;
 	u8  res2[3];
-} __attribute__ ((packed));
+} __packed;
 
 static void iucv_message_complete(struct iucv_irq_data *data)
 {
@@ -1694,7 +1694,7 @@
 	u32 ipbfln2f;
 	u8  ippollfg;
 	u8  res2[3];
-} __attribute__ ((packed));
+} __packed;
 
 static void iucv_message_pending(struct iucv_irq_data *data)
 {
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 0852512..226a0ae 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -348,7 +348,7 @@
 	sk->sk_state = TCP_ESTABLISHED;
 	inet->inet_id = jiffies;
 
-	sk_dst_set(sk, &rt->u.dst);
+	sk_dst_set(sk, &rt->dst);
 
 	write_lock_bh(&l2tp_ip_lock);
 	hlist_del_init(&sk->sk_bind_node);
@@ -496,9 +496,9 @@
 			if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
 				goto no_route;
 		}
-		sk_setup_caps(sk, &rt->u.dst);
+		sk_setup_caps(sk, &rt->dst);
 	}
-	skb_dst_set(skb, dst_clone(&rt->u.dst));
+	skb_dst_set(skb, dst_clone(&rt->dst));
 
 	/* Queue the packet to IP for output */
 	rc = ip_queue_xmit(skb);
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 8a91f6c..4d6f865 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -33,6 +33,13 @@
 	---help---
 	  This option enables the 'minstrel' TX rate control algorithm
 
+config MAC80211_RC_MINSTREL_HT
+	bool "Minstrel 802.11n support" if EMBEDDED
+	depends on MAC80211_RC_MINSTREL
+	default y
+	---help---
+	  This option enables the 'minstrel_ht' TX rate control algorithm
+
 choice
 	prompt "Default rate control algorithm"
 	depends on MAC80211_HAS_RC
@@ -62,6 +69,7 @@
 
 config MAC80211_RC_DEFAULT
 	string
+	default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL && MAC80211_RC_MINSTREL_HT
 	default "minstrel" if MAC80211_RC_DEFAULT_MINSTREL
 	default "pid" if MAC80211_RC_DEFAULT_PID
 	default ""
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 84b48ba..fdb54e6 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -51,7 +51,11 @@
 rc80211_minstrel-y := rc80211_minstrel.o
 rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o
 
+rc80211_minstrel_ht-y := rc80211_minstrel_ht.o
+rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o
+
 mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
 mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
+mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 6bb9a9a..965b272 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -6,39 +6,70 @@
  * Copyright 2005-2006, Devicescape Software, Inc.
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007-2008, Intel Corporation
+ * Copyright 2007-2010, Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
 
+/**
+ * DOC: RX A-MPDU aggregation
+ *
+ * Aggregation on the RX side requires only implementing the
+ * @ampdu_action callback that is invoked to start/stop any
+ * block-ack sessions for RX aggregation.
+ *
+ * When RX aggregation is started by the peer, the driver is
+ * notified via @ampdu_action function, with the
+ * %IEEE80211_AMPDU_RX_START action, and may reject the request
+ * in which case a negative response is sent to the peer, if it
+ * accepts it a positive response is sent.
+ *
+ * While the session is active, the device/driver are required
+ * to de-aggregate frames and pass them up one by one to mac80211,
+ * which will handle the reorder buffer.
+ *
+ * When the aggregation session is stopped again by the peer or
+ * ourselves, the driver's @ampdu_action function will be called
+ * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the
+ * call must not fail.
+ */
+
 #include <linux/ieee80211.h>
 #include <linux/slab.h>
 #include <net/mac80211.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 
-static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
-					    u16 initiator, u16 reason,
-					    bool from_timer)
+static void ieee80211_free_tid_rx(struct rcu_head *h)
+{
+	struct tid_ampdu_rx *tid_rx =
+		container_of(h, struct tid_ampdu_rx, rcu_head);
+	int i;
+
+	for (i = 0; i < tid_rx->buf_size; i++)
+		dev_kfree_skb(tid_rx->reorder_buf[i]);
+	kfree(tid_rx->reorder_buf);
+	kfree(tid_rx->reorder_time);
+	kfree(tid_rx);
+}
+
+void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+				     u16 initiator, u16 reason)
 {
 	struct ieee80211_local *local = sta->local;
 	struct tid_ampdu_rx *tid_rx;
-	int i;
 
-	spin_lock_bh(&sta->lock);
-
-	/* check if TID is in operational state */
-	if (!sta->ampdu_mlme.tid_active_rx[tid]) {
-		spin_unlock_bh(&sta->lock);
-		return;
-	}
-
-	sta->ampdu_mlme.tid_active_rx[tid] = false;
+	lockdep_assert_held(&sta->ampdu_mlme.mtx);
 
 	tid_rx = sta->ampdu_mlme.tid_rx[tid];
 
+	if (!tid_rx)
+		return;
+
+	rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL);
+
 #ifdef CONFIG_MAC80211_HT_DEBUG
 	printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
 	       sta->sta.addr, tid);
@@ -54,32 +85,17 @@
 		ieee80211_send_delba(sta->sdata, sta->sta.addr,
 				     tid, 0, reason);
 
-	/* free the reordering buffer */
-	for (i = 0; i < tid_rx->buf_size; i++) {
-		if (tid_rx->reorder_buf[i]) {
-			/* release the reordered frames */
-			dev_kfree_skb(tid_rx->reorder_buf[i]);
-			tid_rx->stored_mpdu_num--;
-			tid_rx->reorder_buf[i] = NULL;
-		}
-	}
+	del_timer_sync(&tid_rx->session_timer);
 
-	/* free resources */
-	kfree(tid_rx->reorder_buf);
-	kfree(tid_rx->reorder_time);
-	sta->ampdu_mlme.tid_rx[tid] = NULL;
-
-	spin_unlock_bh(&sta->lock);
-
-	if (!from_timer)
-		del_timer_sync(&tid_rx->session_timer);
-	kfree(tid_rx);
+	call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
 }
 
 void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				    u16 initiator, u16 reason)
 {
-	___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false);
+	mutex_lock(&sta->ampdu_mlme.mtx);
+	___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason);
+	mutex_unlock(&sta->ampdu_mlme.mtx);
 }
 
 /*
@@ -100,8 +116,8 @@
 #ifdef CONFIG_MAC80211_HT_DEBUG
 	printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
 #endif
-	___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT,
-					WLAN_REASON_QSTA_TIMEOUT, true);
+	set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
+	ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
 }
 
 static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
@@ -212,9 +228,9 @@
 
 
 	/* examine state machine */
-	spin_lock_bh(&sta->lock);
+	mutex_lock(&sta->ampdu_mlme.mtx);
 
-	if (sta->ampdu_mlme.tid_active_rx[tid]) {
+	if (sta->ampdu_mlme.tid_rx[tid]) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		if (net_ratelimit())
 			printk(KERN_DEBUG "unexpected AddBA Req from "
@@ -225,9 +241,8 @@
 	}
 
 	/* prepare A-MPDU MLME for Rx aggregation */
-	sta->ampdu_mlme.tid_rx[tid] =
-			kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
-	if (!sta->ampdu_mlme.tid_rx[tid]) {
+	tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
+	if (!tid_agg_rx) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		if (net_ratelimit())
 			printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
@@ -235,14 +250,11 @@
 #endif
 		goto end;
 	}
-	/* rx timer */
-	sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
-				sta_rx_agg_session_timer_expired;
-	sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
-				(unsigned long)&sta->timer_to_tid[tid];
-	init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
 
-	tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
+	/* rx timer */
+	tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
+	tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+	init_timer(&tid_agg_rx->session_timer);
 
 	/* prepare reordering buffer */
 	tid_agg_rx->reorder_buf =
@@ -257,8 +269,7 @@
 #endif
 		kfree(tid_agg_rx->reorder_buf);
 		kfree(tid_agg_rx->reorder_time);
-		kfree(sta->ampdu_mlme.tid_rx[tid]);
-		sta->ampdu_mlme.tid_rx[tid] = NULL;
+		kfree(tid_agg_rx);
 		goto end;
 	}
 
@@ -270,13 +281,12 @@
 
 	if (ret) {
 		kfree(tid_agg_rx->reorder_buf);
+		kfree(tid_agg_rx->reorder_time);
 		kfree(tid_agg_rx);
-		sta->ampdu_mlme.tid_rx[tid] = NULL;
 		goto end;
 	}
 
-	/* change state and send addba resp */
-	sta->ampdu_mlme.tid_active_rx[tid] = true;
+	/* update data */
 	tid_agg_rx->dialog_token = dialog_token;
 	tid_agg_rx->ssn = start_seq_num;
 	tid_agg_rx->head_seq_num = start_seq_num;
@@ -284,8 +294,15 @@
 	tid_agg_rx->timeout = timeout;
 	tid_agg_rx->stored_mpdu_num = 0;
 	status = WLAN_STATUS_SUCCESS;
+
+	/* activate it for RX */
+	rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
+
+	if (timeout)
+		mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
+
 end:
-	spin_unlock_bh(&sta->lock);
+	mutex_unlock(&sta->ampdu_mlme.mtx);
 
 end_no_lock:
 	ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 98258b7..c893f23 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -6,7 +6,7 @@
  * Copyright 2005-2006, Devicescape Software, Inc.
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007-2009, Intel Corporation
+ * Copyright 2007-2010, Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -21,28 +21,39 @@
 #include "wme.h"
 
 /**
- * DOC: TX aggregation
+ * DOC: TX A-MPDU aggregation
  *
  * Aggregation on the TX side requires setting the hardware flag
- * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues
- * hardware parameter to the number of hardware AMPDU queues. If there are no
- * hardware queues then the driver will (currently) have to do all frame
- * buffering.
+ * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed
+ * packets with a flag indicating A-MPDU aggregation. The driver
+ * or device is responsible for actually aggregating the frames,
+ * as well as deciding how many and which to aggregate.
  *
- * When TX aggregation is started by some subsystem (usually the rate control
- * algorithm would be appropriate) by calling the
- * ieee80211_start_tx_ba_session() function, the driver will be notified via
- * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action.
+ * When TX aggregation is started by some subsystem (usually the rate
+ * control algorithm would be appropriate) by calling the
+ * ieee80211_start_tx_ba_session() function, the driver will be
+ * notified via its @ampdu_action function, with the
+ * %IEEE80211_AMPDU_TX_START action.
  *
  * In response to that, the driver is later required to call the
- * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe())
- * function, which will start the aggregation session.
+ * ieee80211_start_tx_ba_cb_irqsafe() function, which will really
+ * start the aggregation session after the peer has also responded.
+ * If the peer responds negatively, the session will be stopped
+ * again right away. Note that it is possible for the aggregation
+ * session to be stopped before the driver has indicated that it
+ * is done setting it up, in which case it must not indicate the
+ * setup completion.
  *
- * Similarly, when the aggregation session is stopped by
- * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will
- * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the
- * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb()
- * (or ieee80211_stop_tx_ba_cb_irqsafe()).
+ * Also note that, since we also need to wait for a response from
+ * the peer, the driver is notified of the completion of the
+ * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the
+ * @ampdu_action callback.
+ *
+ * Similarly, when the aggregation session is stopped by the peer
+ * or something calling ieee80211_stop_tx_ba_session(), the driver's
+ * @ampdu_action function will be called with the action
+ * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
+ * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
  */
 
 static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
@@ -125,25 +136,53 @@
 	ieee80211_tx_skb(sdata, skb);
 }
 
+static void kfree_tid_tx(struct rcu_head *rcu_head)
+{
+	struct tid_ampdu_tx *tid_tx =
+	    container_of(rcu_head, struct tid_ampdu_tx, rcu_head);
+
+	kfree(tid_tx);
+}
+
 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 				    enum ieee80211_back_parties initiator)
 {
 	struct ieee80211_local *local = sta->local;
+	struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
 	int ret;
-	u8 *state;
+
+	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
+	if (!tid_tx)
+		return -ENOENT;
+
+	spin_lock_bh(&sta->lock);
+
+	if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
+		/* not even started yet! */
+		rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
+		spin_unlock_bh(&sta->lock);
+		call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
+		return 0;
+	}
+
+	spin_unlock_bh(&sta->lock);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
 	printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
 	       sta->sta.addr, tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
+	set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
 
-	if (*state == HT_AGG_STATE_OPERATIONAL)
-		sta->ampdu_mlme.addba_req_num[tid] = 0;
+	/*
+	 * After this packets are no longer handed right through
+	 * to the driver but are put onto tid_tx->pending instead,
+	 * with locking to ensure proper access.
+	 */
+	clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
 
-	*state = HT_AGG_STATE_REQ_STOP_BA_MSK |
-		(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
+	tid_tx->stop_initiator = initiator;
 
 	ret = drv_ampdu_action(local, sta->sdata,
 			       IEEE80211_AMPDU_TX_STOP,
@@ -174,16 +213,14 @@
 	u16 tid = *(u8 *)data;
 	struct sta_info *sta = container_of((void *)data,
 		struct sta_info, timer_to_tid[tid]);
-	u8 *state;
-
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
+	struct tid_ampdu_tx *tid_tx;
 
 	/* check if the TID waits for addBA response */
-	spin_lock_bh(&sta->lock);
-	if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
-		       HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
-						HT_ADDBA_REQUESTED_MSK) {
-		spin_unlock_bh(&sta->lock);
+	rcu_read_lock();
+	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+	if (!tid_tx ||
+	    test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
+		rcu_read_unlock();
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		printk(KERN_DEBUG "timer expired on tid %d but we are not "
 				"(or no longer) expecting addBA response there\n",
@@ -196,8 +233,8 @@
 	printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
 #endif
 
-	___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
-	spin_unlock_bh(&sta->lock);
+	ieee80211_stop_tx_ba_session(&sta->sta, tid);
+	rcu_read_unlock();
 }
 
 static inline int ieee80211_ac_from_tid(int tid)
@@ -205,14 +242,112 @@
 	return ieee802_1d_to_ac[tid & 7];
 }
 
+/*
+ * When multiple aggregation sessions on multiple stations
+ * are being created/destroyed simultaneously, we need to
+ * refcount the global queue stop caused by that in order
+ * to not get into a situation where one of the aggregation
+ * setup or teardown re-enables queues before the other is
+ * ready to handle that.
+ *
+ * These two functions take care of this issue by keeping
+ * a global "agg_queue_stop" refcount.
+ */
+static void __acquires(agg_queue)
+ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
+{
+	int queue = ieee80211_ac_from_tid(tid);
+
+	if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
+		ieee80211_stop_queue_by_reason(
+			&local->hw, queue,
+			IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+	__acquire(agg_queue);
+}
+
+static void __releases(agg_queue)
+ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
+{
+	int queue = ieee80211_ac_from_tid(tid);
+
+	if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
+		ieee80211_wake_queue_by_reason(
+			&local->hw, queue,
+			IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+	__release(agg_queue);
+}
+
+void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+{
+	struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
+	struct ieee80211_local *local = sta->local;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	u16 start_seq_num;
+	int ret;
+
+	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
+	/*
+	 * While we're asking the driver about the aggregation,
+	 * stop the AC queue so that we don't have to worry
+	 * about frames that came in while we were doing that,
+	 * which would require us to put them to the AC pending
+	 * afterwards which just makes the code more complex.
+	 */
+	ieee80211_stop_queue_agg(local, tid);
+
+	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+
+	/*
+	 * make sure no packets are being processed to get
+	 * valid starting sequence number
+	 */
+	synchronize_net();
+
+	start_seq_num = sta->tid_seq[tid] >> 4;
+
+	ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
+			       &sta->sta, tid, &start_seq_num);
+	if (ret) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "BA request denied - HW unavailable for"
+					" tid %d\n", tid);
+#endif
+		spin_lock_bh(&sta->lock);
+		rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
+		spin_unlock_bh(&sta->lock);
+
+		ieee80211_wake_queue_agg(local, tid);
+		call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
+		return;
+	}
+
+	/* we can take packets again now */
+	ieee80211_wake_queue_agg(local, tid);
+
+	/* activate the timer for the recipient's addBA response */
+	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
+#endif
+
+	spin_lock_bh(&sta->lock);
+	sta->ampdu_mlme.addba_req_num[tid]++;
+	spin_unlock_bh(&sta->lock);
+
+	/* send AddBA request */
+	ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
+				     tid_tx->dialog_token, start_seq_num,
+				     0x40, 5000);
+}
+
 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
 {
 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_local *local = sdata->local;
-	u8 *state;
+	struct tid_ampdu_tx *tid_tx;
 	int ret = 0;
-	u16 start_seq_num;
 
 	trace_api_start_tx_ba_session(pubsta, tid);
 
@@ -239,24 +374,15 @@
 	    sdata->vif.type != NL80211_IFTYPE_AP)
 		return -EINVAL;
 
-	if (test_sta_flags(sta, WLAN_STA_DISASSOC)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "Disassociation is in progress. "
-		       "Denying BA session request\n");
-#endif
-		return -EINVAL;
-	}
-
 	if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "Suspend in progress. "
+		printk(KERN_DEBUG "BA sessions blocked. "
 		       "Denying BA session request\n");
 #endif
 		return -EINVAL;
 	}
 
 	spin_lock_bh(&sta->lock);
-	spin_lock(&local->ampdu_lock);
 
 	/* we have tried too many times, receiver does not want A-MPDU */
 	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
@@ -264,9 +390,9 @@
 		goto err_unlock_sta;
 	}
 
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
+	tid_tx = sta->ampdu_mlme.tid_tx[tid];
 	/* check if the TID is not in aggregation flow already */
-	if (*state != HT_AGG_STATE_IDLE) {
+	if (tid_tx) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		printk(KERN_DEBUG "BA request denied - session is not "
 				 "idle on tid %u\n", tid);
@@ -275,96 +401,37 @@
 		goto err_unlock_sta;
 	}
 
-	/*
-	 * While we're asking the driver about the aggregation,
-	 * stop the AC queue so that we don't have to worry
-	 * about frames that came in while we were doing that,
-	 * which would require us to put them to the AC pending
-	 * afterwards which just makes the code more complex.
-	 */
-	ieee80211_stop_queue_by_reason(
-		&local->hw, ieee80211_ac_from_tid(tid),
-		IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
-
 	/* prepare A-MPDU MLME for Tx aggregation */
-	sta->ampdu_mlme.tid_tx[tid] =
-			kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
-	if (!sta->ampdu_mlme.tid_tx[tid]) {
+	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
+	if (!tid_tx) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		if (net_ratelimit())
 			printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
 					tid);
 #endif
 		ret = -ENOMEM;
-		goto err_wake_queue;
+		goto err_unlock_sta;
 	}
 
-	skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending);
+	skb_queue_head_init(&tid_tx->pending);
+	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
 
 	/* Tx timer */
-	sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
-			sta_addba_resp_timer_expired;
-	sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
-			(unsigned long)&sta->timer_to_tid[tid];
-	init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+	tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
+	tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+	init_timer(&tid_tx->addba_resp_timer);
 
-	/* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
-	 * call back right away, it must see that the flow has begun */
-	*state |= HT_ADDBA_REQUESTED_MSK;
-
-	start_seq_num = sta->tid_seq[tid] >> 4;
-
-	ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
-			       pubsta, tid, &start_seq_num);
-
-	if (ret) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "BA request denied - HW unavailable for"
-					" tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-		*state = HT_AGG_STATE_IDLE;
-		goto err_free;
-	}
-
-	/* Driver vetoed or OKed, but we can take packets again now */
-	ieee80211_wake_queue_by_reason(
-		&local->hw, ieee80211_ac_from_tid(tid),
-		IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
-
-	spin_unlock(&local->ampdu_lock);
-
-	/* prepare tid data */
+	/* assign a dialog token */
 	sta->ampdu_mlme.dialog_token_allocator++;
-	sta->ampdu_mlme.tid_tx[tid]->dialog_token =
-			sta->ampdu_mlme.dialog_token_allocator;
-	sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
+	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
 
-	spin_unlock_bh(&sta->lock);
+	/* finally, assign it to the array */
+	rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
 
-	/* send AddBA request */
-	ieee80211_send_addba_request(sdata, pubsta->addr, tid,
-			 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
-			 sta->ampdu_mlme.tid_tx[tid]->ssn,
-			 0x40, 5000);
-	sta->ampdu_mlme.addba_req_num[tid]++;
-	/* activate the timer for the recipient's addBA response */
-	sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
-				jiffies + ADDBA_RESP_INTERVAL;
-	add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
-#endif
-	return 0;
+	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
 
- err_free:
-	kfree(sta->ampdu_mlme.tid_tx[tid]);
-	sta->ampdu_mlme.tid_tx[tid] = NULL;
- err_wake_queue:
-	ieee80211_wake_queue_by_reason(
-		&local->hw, ieee80211_ac_from_tid(tid),
-		IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+	/* this flow continues off the work */
  err_unlock_sta:
-	spin_unlock(&local->ampdu_lock);
 	spin_unlock_bh(&sta->lock);
 	return ret;
 }
@@ -372,69 +439,65 @@
 
 /*
  * splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish and holding
- * local->ampdu_lock across both calls.
+ * requires a call to ieee80211_agg_splice_finish later
  */
-static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
-					 struct sta_info *sta, u16 tid)
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+			     struct tid_ampdu_tx *tid_tx, u16 tid)
 {
+	int queue = ieee80211_ac_from_tid(tid);
 	unsigned long flags;
-	u16 queue = ieee80211_ac_from_tid(tid);
 
-	ieee80211_stop_queue_by_reason(
-		&local->hw, queue,
-		IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+	ieee80211_stop_queue_agg(local, tid);
 
-	if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK))
+	if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+			  " from the pending queue\n", tid))
 		return;
 
-	if (WARN(!sta->ampdu_mlme.tid_tx[tid],
-		 "TID %d gone but expected when splicing aggregates from"
-		 "the pending queue\n", tid))
-		return;
-
-	if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
+	if (!skb_queue_empty(&tid_tx->pending)) {
 		spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
 		/* copy over remaining packets */
-		skb_queue_splice_tail_init(
-			&sta->ampdu_mlme.tid_tx[tid]->pending,
-			&local->pending[queue]);
+		skb_queue_splice_tail_init(&tid_tx->pending,
+					   &local->pending[queue]);
 		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 	}
 }
 
-static void ieee80211_agg_splice_finish(struct ieee80211_local *local,
-					struct sta_info *sta, u16 tid)
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
 {
-	u16 queue = ieee80211_ac_from_tid(tid);
-
-	ieee80211_wake_queue_by_reason(
-		&local->hw, queue,
-		IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+	ieee80211_wake_queue_agg(local, tid);
 }
 
-/* caller must hold sta->lock */
 static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
 					 struct sta_info *sta, u16 tid)
 {
+	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
 #ifdef CONFIG_MAC80211_HT_DEBUG
 	printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
 #endif
 
-	spin_lock(&local->ampdu_lock);
-	ieee80211_agg_splice_packets(local, sta, tid);
-	/*
-	 * NB: we rely on sta->lock being taken in the TX
-	 * processing here when adding to the pending queue,
-	 * otherwise we could only change the state of the
-	 * session to OPERATIONAL _here_.
-	 */
-	ieee80211_agg_splice_finish(local, sta, tid);
-	spin_unlock(&local->ampdu_lock);
-
 	drv_ampdu_action(local, sta->sdata,
 			 IEEE80211_AMPDU_TX_OPERATIONAL,
 			 &sta->sta, tid, NULL);
+
+	/*
+	 * synchronize with TX path, while splicing the TX path
+	 * should block so it won't put more packets onto pending.
+	 */
+	spin_lock_bh(&sta->lock);
+
+	ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid);
+	/*
+	 * Now mark as operational. This will be visible
+	 * in the TX path, and lets it go lock-free in
+	 * the common case.
+	 */
+	set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state);
+	ieee80211_agg_splice_finish(local, tid);
+
+	spin_unlock_bh(&sta->lock);
 }
 
 void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
@@ -442,7 +505,7 @@
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
-	u8 *state;
+	struct tid_ampdu_tx *tid_tx;
 
 	trace_api_start_tx_ba_cb(sdata, ra, tid);
 
@@ -454,42 +517,36 @@
 		return;
 	}
 
-	rcu_read_lock();
+	mutex_lock(&local->sta_mtx);
 	sta = sta_info_get(sdata, ra);
 	if (!sta) {
-		rcu_read_unlock();
+		mutex_unlock(&local->sta_mtx);
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		printk(KERN_DEBUG "Could not find station: %pM\n", ra);
 #endif
 		return;
 	}
 
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
-	spin_lock_bh(&sta->lock);
+	mutex_lock(&sta->ampdu_mlme.mtx);
+	tid_tx = sta->ampdu_mlme.tid_tx[tid];
 
-	if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) {
+	if (WARN_ON(!tid_tx)) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
-				*state);
+		printk(KERN_DEBUG "addBA was not requested!\n");
 #endif
-		spin_unlock_bh(&sta->lock);
-		rcu_read_unlock();
-		return;
+		goto unlock;
 	}
 
-	if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK))
-		goto out;
+	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
+		goto unlock;
 
-	*state |= HT_ADDBA_DRV_READY_MSK;
-
-	if (*state == HT_AGG_STATE_OPERATIONAL)
+	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
 		ieee80211_agg_tx_operational(local, sta, tid);
 
- out:
-	spin_unlock_bh(&sta->lock);
-	rcu_read_unlock();
+ unlock:
+	mutex_unlock(&sta->ampdu_mlme.mtx);
+	mutex_unlock(&local->sta_mtx);
 }
-EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
 
 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
 				      const u8 *ra, u16 tid)
@@ -510,44 +567,36 @@
 	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
 	memcpy(&ra_tid->ra, ra, ETH_ALEN);
 	ra_tid->tid = tid;
-	ra_tid->vif = vif;
 
-	skb->pkt_type = IEEE80211_ADDBA_MSG;
-	skb_queue_tail(&local->skb_queue, skb);
-	tasklet_schedule(&local->tasklet);
+	skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
+	skb_queue_tail(&sdata->skb_queue, skb);
+	ieee80211_queue_work(&local->hw, &sdata->work);
 }
 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
 
 int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 				   enum ieee80211_back_parties initiator)
 {
-	u8 *state;
 	int ret;
 
-	/* check if the TID is in aggregation */
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
-	spin_lock_bh(&sta->lock);
-
-	if (*state != HT_AGG_STATE_OPERATIONAL) {
-		ret = -ENOENT;
-		goto unlock;
-	}
+	mutex_lock(&sta->ampdu_mlme.mtx);
 
 	ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
 
- unlock:
-	spin_unlock_bh(&sta->lock);
+	mutex_unlock(&sta->ampdu_mlme.mtx);
+
 	return ret;
 }
 
-int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
-				 enum ieee80211_back_parties initiator)
+int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
 {
 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_local *local = sdata->local;
+	struct tid_ampdu_tx *tid_tx;
+	int ret = 0;
 
-	trace_api_stop_tx_ba_session(pubsta, tid, initiator);
+	trace_api_stop_tx_ba_session(pubsta, tid);
 
 	if (!local->ops->ampdu_action)
 		return -EINVAL;
@@ -555,7 +604,26 @@
 	if (tid >= STA_TID_NUM)
 		return -EINVAL;
 
-	return __ieee80211_stop_tx_ba_session(sta, tid, initiator);
+	spin_lock_bh(&sta->lock);
+	tid_tx = sta->ampdu_mlme.tid_tx[tid];
+
+	if (!tid_tx) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+		/* already in progress stopping it */
+		ret = 0;
+		goto unlock;
+	}
+
+	set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
+	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+
+ unlock:
+	spin_unlock_bh(&sta->lock);
+	return ret;
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
 
@@ -564,7 +632,7 @@
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
-	u8 *state;
+	struct tid_ampdu_tx *tid_tx;
 
 	trace_api_stop_tx_ba_cb(sdata, ra, tid);
 
@@ -581,51 +649,56 @@
 	       ra, tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
-	rcu_read_lock();
+	mutex_lock(&local->sta_mtx);
+
 	sta = sta_info_get(sdata, ra);
 	if (!sta) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		printk(KERN_DEBUG "Could not find station: %pM\n", ra);
 #endif
-		rcu_read_unlock();
-		return;
+		goto unlock;
 	}
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
 
-	/* NOTE: no need to use sta->lock in this state check, as
-	 * ieee80211_stop_tx_ba_session will let only one stop call to
-	 * pass through per sta/tid
-	 */
-	if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
+	mutex_lock(&sta->ampdu_mlme.mtx);
+	spin_lock_bh(&sta->lock);
+	tid_tx = sta->ampdu_mlme.tid_tx[tid];
+
+	if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
 #endif
-		rcu_read_unlock();
-		return;
+		goto unlock_sta;
 	}
 
-	if (*state & HT_AGG_STATE_INITIATOR_MSK)
+	if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR)
 		ieee80211_send_delba(sta->sdata, ra, tid,
 			WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
 
-	spin_lock_bh(&sta->lock);
-	spin_lock(&local->ampdu_lock);
+	/*
+	 * When we get here, the TX path will not be lockless any more wrt.
+	 * aggregation, since the OPERATIONAL bit has long been cleared.
+	 * Thus it will block on getting the lock, if it occurs. So if we
+	 * stop the queue now, we will not get any more packets, and any
+	 * that might be being processed will wait for us here, thereby
+	 * guaranteeing that no packets go to the tid_tx pending queue any
+	 * more.
+	 */
 
-	ieee80211_agg_splice_packets(local, sta, tid);
+	ieee80211_agg_splice_packets(local, tid_tx, tid);
 
-	*state = HT_AGG_STATE_IDLE;
-	/* from now on packets are no longer put onto sta->pending */
-	kfree(sta->ampdu_mlme.tid_tx[tid]);
-	sta->ampdu_mlme.tid_tx[tid] = NULL;
+	/* future packets must not find the tid_tx struct any more */
+	rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
 
-	ieee80211_agg_splice_finish(local, sta, tid);
+	ieee80211_agg_splice_finish(local, tid);
 
-	spin_unlock(&local->ampdu_lock);
+	call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
+
+ unlock_sta:
 	spin_unlock_bh(&sta->lock);
-
-	rcu_read_unlock();
+	mutex_unlock(&sta->ampdu_mlme.mtx);
+ unlock:
+	mutex_unlock(&local->sta_mtx);
 }
-EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
 
 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
 				     const u8 *ra, u16 tid)
@@ -646,11 +719,10 @@
 	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
 	memcpy(&ra_tid->ra, ra, ETH_ALEN);
 	ra_tid->tid = tid;
-	ra_tid->vif = vif;
 
-	skb->pkt_type = IEEE80211_DELBA_MSG;
-	skb_queue_tail(&local->skb_queue, skb);
-	tasklet_schedule(&local->tasklet);
+	skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
+	skb_queue_tail(&sdata->skb_queue, skb);
+	ieee80211_queue_work(&local->hw, &sdata->work);
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
 
@@ -660,40 +732,40 @@
 				  struct ieee80211_mgmt *mgmt,
 				  size_t len)
 {
+	struct tid_ampdu_tx *tid_tx;
 	u16 capab, tid;
-	u8 *state;
 
 	capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
 	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
 
-	state = &sta->ampdu_mlme.tid_state_tx[tid];
+	mutex_lock(&sta->ampdu_mlme.mtx);
 
-	spin_lock_bh(&sta->lock);
-
-	if (!(*state & HT_ADDBA_REQUESTED_MSK))
+	tid_tx = sta->ampdu_mlme.tid_tx[tid];
+	if (!tid_tx)
 		goto out;
 
-	if (mgmt->u.action.u.addba_resp.dialog_token !=
-		sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
+	if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+#endif
 		goto out;
 	}
 
-	del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+	del_timer(&tid_tx->addba_resp_timer);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
 	printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+#endif
 
 	if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
 			== WLAN_STATUS_SUCCESS) {
-		u8 curstate = *state;
+		if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
+				     &tid_tx->state)) {
+			/* ignore duplicate response */
+			goto out;
+		}
 
-		*state |= HT_ADDBA_RECEIVED_MSK;
-
-		if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL)
+		if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
 			ieee80211_agg_tx_operational(local, sta, tid);
 
 		sta->ampdu_mlme.addba_req_num[tid] = 0;
@@ -702,5 +774,5 @@
 	}
 
  out:
-	spin_unlock_bh(&sta->lock);
+	mutex_unlock(&sta->ampdu_mlme.mtx);
 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c7000a6..9eb02a3 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -120,6 +120,9 @@
 	struct ieee80211_key *key;
 	int err;
 
+	if (!netif_running(dev))
+		return -ENETDOWN;
+
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
 	switch (params->cipher) {
@@ -145,7 +148,7 @@
 	if (!key)
 		return -ENOMEM;
 
-	rcu_read_lock();
+	mutex_lock(&sdata->local->sta_mtx);
 
 	if (mac_addr) {
 		sta = sta_info_get_bss(sdata, mac_addr);
@@ -160,7 +163,7 @@
 
 	err = 0;
  out_unlock:
-	rcu_read_unlock();
+	mutex_unlock(&sdata->local->sta_mtx);
 
 	return err;
 }
@@ -174,7 +177,7 @@
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	rcu_read_lock();
+	mutex_lock(&sdata->local->sta_mtx);
 
 	if (mac_addr) {
 		ret = -ENOENT;
@@ -202,7 +205,7 @@
 
 	ret = 0;
  out_unlock:
-	rcu_read_unlock();
+	mutex_unlock(&sdata->local->sta_mtx);
 
 	return ret;
 }
@@ -305,15 +308,10 @@
 					struct net_device *dev,
 					u8 key_idx)
 {
-	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	rcu_read_lock();
-
-	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	ieee80211_set_default_key(sdata, key_idx);
 
-	rcu_read_unlock();
-
 	return 0;
 }
 
@@ -415,9 +413,6 @@
 {
 	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
 
-	if (!local->ops->get_survey)
-		return -EOPNOTSUPP;
-
 	return drv_get_survey(local, idx, survey);
 }
 
@@ -600,7 +595,7 @@
 	u8 ssap;		/* 0 */
 	u8 control;
 	u8 xid_info[3];
-} __attribute__ ((packed));
+} __packed;
 
 static void ieee80211_send_layer2_update(struct sta_info *sta)
 {
@@ -1331,28 +1326,28 @@
 }
 
 static int ieee80211_set_tx_power(struct wiphy *wiphy,
-				  enum tx_power_setting type, int dbm)
+				  enum nl80211_tx_power_setting type, int mbm)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_channel *chan = local->hw.conf.channel;
 	u32 changes = 0;
 
 	switch (type) {
-	case TX_POWER_AUTOMATIC:
+	case NL80211_TX_POWER_AUTOMATIC:
 		local->user_power_level = -1;
 		break;
-	case TX_POWER_LIMITED:
-		if (dbm < 0)
-			return -EINVAL;
-		local->user_power_level = dbm;
+	case NL80211_TX_POWER_LIMITED:
+		if (mbm < 0 || (mbm % 100))
+			return -EOPNOTSUPP;
+		local->user_power_level = MBM_TO_DBM(mbm);
 		break;
-	case TX_POWER_FIXED:
-		if (dbm < 0)
-			return -EINVAL;
+	case NL80211_TX_POWER_FIXED:
+		if (mbm < 0 || (mbm % 100))
+			return -EOPNOTSUPP;
 		/* TODO: move to cfg80211 when it knows the channel */
-		if (dbm > chan->max_power)
+		if (MBM_TO_DBM(mbm) > chan->max_power)
 			return -EINVAL;
-		local->user_power_level = dbm;
+		local->user_power_level = MBM_TO_DBM(mbm);
 		break;
 	}
 
@@ -1448,7 +1443,6 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_conf *conf = &local->hw.conf;
 
 	if (sdata->vif.type != NL80211_IFTYPE_STATION)
 		return -EOPNOTSUPP;
@@ -1457,11 +1451,11 @@
 		return -EOPNOTSUPP;
 
 	if (enabled == sdata->u.mgd.powersave &&
-	    timeout == conf->dynamic_ps_forced_timeout)
+	    timeout == local->dynamic_ps_forced_timeout)
 		return 0;
 
 	sdata->u.mgd.powersave = enabled;
-	conf->dynamic_ps_forced_timeout = timeout;
+	local->dynamic_ps_forced_timeout = timeout;
 
 	/* no change, but if automatic follow powersave */
 	mutex_lock(&sdata->u.mgd.mtx);
@@ -1554,10 +1548,58 @@
 static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
 			    struct ieee80211_channel *chan,
 			    enum nl80211_channel_type channel_type,
+			    bool channel_type_valid,
 			    const u8 *buf, size_t len, u64 *cookie)
 {
-	return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan,
-				    channel_type, buf, len, cookie);
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+	struct sk_buff *skb;
+	struct sta_info *sta;
+	const struct ieee80211_mgmt *mgmt = (void *)buf;
+	u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+		    IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+	/* Check that we are on the requested channel for transmission */
+	if (chan != local->tmp_channel &&
+	    chan != local->oper_channel)
+		return -EBUSY;
+	if (channel_type_valid &&
+	    (channel_type != local->tmp_channel_type &&
+	     channel_type != local->_oper_channel_type))
+		return -EBUSY;
+
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_ADHOC:
+		if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
+			break;
+		rcu_read_lock();
+		sta = sta_info_get(sdata, mgmt->da);
+		rcu_read_unlock();
+		if (!sta)
+			return -ENOLINK;
+		break;
+	case NL80211_IFTYPE_STATION:
+		if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
+			flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
+	if (!skb)
+		return -ENOMEM;
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+
+	memcpy(skb_put(skb, len), buf, len);
+
+	IEEE80211_SKB_CB(skb)->flags = flags;
+
+	skb->dev = sdata->dev;
+	ieee80211_tx_skb(sdata, skb);
+
+	*cookie = (unsigned long) skb;
+	return 0;
 }
 
 struct cfg80211_ops mac80211_config_ops = {
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 637929b..a694c59 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -307,9 +307,6 @@
 
 /* statistics stuff */
 
-#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...)			\
-	DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value)
-
 static ssize_t format_devstat_counter(struct ieee80211_local *local,
 	char __user *userbuf,
 	size_t count, loff_t *ppos,
@@ -351,75 +348,16 @@
 	.open = mac80211_open_file_generic,				\
 };
 
-#define DEBUGFS_STATS_ADD(name)						\
+#define DEBUGFS_STATS_ADD(name, field)					\
+	debugfs_create_u32(#name, 0400, statsd, (u32 *) &field);
+#define DEBUGFS_DEVSTATS_ADD(name)					\
 	debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
 
-DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u",
-		   local->dot11TransmittedFragmentCount);
-DEBUGFS_STATS_FILE(multicast_transmitted_frame_count, 20, "%u",
-		   local->dot11MulticastTransmittedFrameCount);
-DEBUGFS_STATS_FILE(failed_count, 20, "%u",
-		   local->dot11FailedCount);
-DEBUGFS_STATS_FILE(retry_count, 20, "%u",
-		   local->dot11RetryCount);
-DEBUGFS_STATS_FILE(multiple_retry_count, 20, "%u",
-		   local->dot11MultipleRetryCount);
-DEBUGFS_STATS_FILE(frame_duplicate_count, 20, "%u",
-		   local->dot11FrameDuplicateCount);
-DEBUGFS_STATS_FILE(received_fragment_count, 20, "%u",
-		   local->dot11ReceivedFragmentCount);
-DEBUGFS_STATS_FILE(multicast_received_frame_count, 20, "%u",
-		   local->dot11MulticastReceivedFrameCount);
-DEBUGFS_STATS_FILE(transmitted_frame_count, 20, "%u",
-		   local->dot11TransmittedFrameCount);
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
-DEBUGFS_STATS_FILE(tx_handlers_drop, 20, "%u",
-		   local->tx_handlers_drop);
-DEBUGFS_STATS_FILE(tx_handlers_queued, 20, "%u",
-		   local->tx_handlers_queued);
-DEBUGFS_STATS_FILE(tx_handlers_drop_unencrypted, 20, "%u",
-		   local->tx_handlers_drop_unencrypted);
-DEBUGFS_STATS_FILE(tx_handlers_drop_fragment, 20, "%u",
-		   local->tx_handlers_drop_fragment);
-DEBUGFS_STATS_FILE(tx_handlers_drop_wep, 20, "%u",
-		   local->tx_handlers_drop_wep);
-DEBUGFS_STATS_FILE(tx_handlers_drop_not_assoc, 20, "%u",
-		   local->tx_handlers_drop_not_assoc);
-DEBUGFS_STATS_FILE(tx_handlers_drop_unauth_port, 20, "%u",
-		   local->tx_handlers_drop_unauth_port);
-DEBUGFS_STATS_FILE(rx_handlers_drop, 20, "%u",
-		   local->rx_handlers_drop);
-DEBUGFS_STATS_FILE(rx_handlers_queued, 20, "%u",
-		   local->rx_handlers_queued);
-DEBUGFS_STATS_FILE(rx_handlers_drop_nullfunc, 20, "%u",
-		   local->rx_handlers_drop_nullfunc);
-DEBUGFS_STATS_FILE(rx_handlers_drop_defrag, 20, "%u",
-		   local->rx_handlers_drop_defrag);
-DEBUGFS_STATS_FILE(rx_handlers_drop_short, 20, "%u",
-		   local->rx_handlers_drop_short);
-DEBUGFS_STATS_FILE(rx_handlers_drop_passive_scan, 20, "%u",
-		   local->rx_handlers_drop_passive_scan);
-DEBUGFS_STATS_FILE(tx_expand_skb_head, 20, "%u",
-		   local->tx_expand_skb_head);
-DEBUGFS_STATS_FILE(tx_expand_skb_head_cloned, 20, "%u",
-		   local->tx_expand_skb_head_cloned);
-DEBUGFS_STATS_FILE(rx_expand_skb_head, 20, "%u",
-		   local->rx_expand_skb_head);
-DEBUGFS_STATS_FILE(rx_expand_skb_head2, 20, "%u",
-		   local->rx_expand_skb_head2);
-DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u",
-		   local->rx_handlers_fragments);
-DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u",
-		   local->tx_status_drop);
-
-#endif
-
 DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount);
 DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount);
 DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount);
 DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount);
 
-
 void debugfs_hw_add(struct ieee80211_local *local)
 {
 	struct dentry *phyd = local->hw.wiphy->debugfsdir;
@@ -448,38 +386,60 @@
 	if (!statsd)
 		return;
 
-	DEBUGFS_STATS_ADD(transmitted_fragment_count);
-	DEBUGFS_STATS_ADD(multicast_transmitted_frame_count);
-	DEBUGFS_STATS_ADD(failed_count);
-	DEBUGFS_STATS_ADD(retry_count);
-	DEBUGFS_STATS_ADD(multiple_retry_count);
-	DEBUGFS_STATS_ADD(frame_duplicate_count);
-	DEBUGFS_STATS_ADD(received_fragment_count);
-	DEBUGFS_STATS_ADD(multicast_received_frame_count);
-	DEBUGFS_STATS_ADD(transmitted_frame_count);
+	DEBUGFS_STATS_ADD(transmitted_fragment_count,
+		local->dot11TransmittedFragmentCount);
+	DEBUGFS_STATS_ADD(multicast_transmitted_frame_count,
+		local->dot11MulticastTransmittedFrameCount);
+	DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount);
+	DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount);
+	DEBUGFS_STATS_ADD(multiple_retry_count,
+		local->dot11MultipleRetryCount);
+	DEBUGFS_STATS_ADD(frame_duplicate_count,
+		local->dot11FrameDuplicateCount);
+	DEBUGFS_STATS_ADD(received_fragment_count,
+		local->dot11ReceivedFragmentCount);
+	DEBUGFS_STATS_ADD(multicast_received_frame_count,
+		local->dot11MulticastReceivedFrameCount);
+	DEBUGFS_STATS_ADD(transmitted_frame_count,
+		local->dot11TransmittedFrameCount);
 #ifdef CONFIG_MAC80211_DEBUG_COUNTERS
-	DEBUGFS_STATS_ADD(tx_handlers_drop);
-	DEBUGFS_STATS_ADD(tx_handlers_queued);
-	DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted);
-	DEBUGFS_STATS_ADD(tx_handlers_drop_fragment);
-	DEBUGFS_STATS_ADD(tx_handlers_drop_wep);
-	DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc);
-	DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port);
-	DEBUGFS_STATS_ADD(rx_handlers_drop);
-	DEBUGFS_STATS_ADD(rx_handlers_queued);
-	DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
-	DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
-	DEBUGFS_STATS_ADD(rx_handlers_drop_short);
-	DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan);
-	DEBUGFS_STATS_ADD(tx_expand_skb_head);
-	DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
-	DEBUGFS_STATS_ADD(rx_expand_skb_head);
-	DEBUGFS_STATS_ADD(rx_expand_skb_head2);
-	DEBUGFS_STATS_ADD(rx_handlers_fragments);
-	DEBUGFS_STATS_ADD(tx_status_drop);
+	DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
+	DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
+	DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted,
+		local->tx_handlers_drop_unencrypted);
+	DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
+		local->tx_handlers_drop_fragment);
+	DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
+		local->tx_handlers_drop_wep);
+	DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc,
+		local->tx_handlers_drop_not_assoc);
+	DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port,
+		local->tx_handlers_drop_unauth_port);
+	DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop);
+	DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued);
+	DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc,
+		local->rx_handlers_drop_nullfunc);
+	DEBUGFS_STATS_ADD(rx_handlers_drop_defrag,
+		local->rx_handlers_drop_defrag);
+	DEBUGFS_STATS_ADD(rx_handlers_drop_short,
+		local->rx_handlers_drop_short);
+	DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan,
+		local->rx_handlers_drop_passive_scan);
+	DEBUGFS_STATS_ADD(tx_expand_skb_head,
+		local->tx_expand_skb_head);
+	DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
+		local->tx_expand_skb_head_cloned);
+	DEBUGFS_STATS_ADD(rx_expand_skb_head,
+		local->rx_expand_skb_head);
+	DEBUGFS_STATS_ADD(rx_expand_skb_head2,
+		local->rx_expand_skb_head2);
+	DEBUGFS_STATS_ADD(rx_handlers_fragments,
+		local->rx_handlers_fragments);
+	DEBUGFS_STATS_ADD(tx_status_drop,
+		local->tx_status_drop);
 #endif
-	DEBUGFS_STATS_ADD(dot11ACKFailureCount);
-	DEBUGFS_STATS_ADD(dot11RTSFailureCount);
-	DEBUGFS_STATS_ADD(dot11FCSErrorCount);
-	DEBUGFS_STATS_ADD(dot11RTSSuccessCount);
+	DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount);
+	DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
+	DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount);
+	DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount);
 }
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 97c9e46..fa5e76e 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -143,7 +143,7 @@
 		len = p - buf;
 		break;
 	case ALG_CCMP:
-		for (i = 0; i < NUM_RX_DATA_QUEUES; i++) {
+		for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) {
 			rpn = key->u.ccmp.rx_pn[i];
 			p += scnprintf(p, sizeof(buf)+buf-p,
 				       "%02x%02x%02x%02x%02x%02x\n",
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index e763f15..76839d4 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -30,7 +30,6 @@
 }
 #define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n")
 #define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n")
-#define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n")
 #define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n")
 
 #define STA_OPS(name)							\
@@ -52,19 +51,7 @@
 
 STA_FILE(aid, sta.aid, D);
 STA_FILE(dev, sdata->name, S);
-STA_FILE(rx_packets, rx_packets, LU);
-STA_FILE(tx_packets, tx_packets, LU);
-STA_FILE(rx_bytes, rx_bytes, LU);
-STA_FILE(tx_bytes, tx_bytes, LU);
-STA_FILE(rx_duplicates, num_duplicates, LU);
-STA_FILE(rx_fragments, rx_fragments, LU);
-STA_FILE(rx_dropped, rx_dropped, LU);
-STA_FILE(tx_fragments, tx_fragments, LU);
-STA_FILE(tx_filtered, tx_filtered_count, LU);
-STA_FILE(tx_retry_failed, tx_retry_failed, LU);
-STA_FILE(tx_retry_count, tx_retry_count, LU);
 STA_FILE(last_signal, last_signal, D);
-STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
 
 static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
 			      size_t count, loff_t *ppos)
@@ -134,28 +121,25 @@
 	p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
 			sta->ampdu_mlme.dialog_token_allocator + 1);
 	p += scnprintf(p, sizeof(buf) + buf - p,
-		       "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
+		       "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
 	for (i = 0; i < STA_TID_NUM; i++) {
 		p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
 		p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
-				sta->ampdu_mlme.tid_active_rx[i]);
+				!!sta->ampdu_mlme.tid_rx[i]);
 		p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
-				sta->ampdu_mlme.tid_active_rx[i] ?
+				sta->ampdu_mlme.tid_rx[i] ?
 				sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
 		p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
-				sta->ampdu_mlme.tid_active_rx[i] ?
+				sta->ampdu_mlme.tid_rx[i] ?
 				sta->ampdu_mlme.tid_rx[i]->ssn : 0);
 
 		p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
-				sta->ampdu_mlme.tid_state_tx[i]);
+				!!sta->ampdu_mlme.tid_tx[i]);
 		p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
-				sta->ampdu_mlme.tid_state_tx[i] ?
+				sta->ampdu_mlme.tid_tx[i] ?
 				sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
-		p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
-				sta->ampdu_mlme.tid_state_tx[i] ?
-				sta->ampdu_mlme.tid_tx[i]->ssn : 0);
 		p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
-				sta->ampdu_mlme.tid_state_tx[i] ?
+				sta->ampdu_mlme.tid_tx[i] ?
 				skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
 		p += scnprintf(p, sizeof(buf) + buf - p, "\n");
 	}
@@ -210,8 +194,7 @@
 		if (start)
 			ret = ieee80211_start_tx_ba_session(&sta->sta, tid);
 		else
-			ret = ieee80211_stop_tx_ba_session(&sta->sta, tid,
-							   WLAN_BACK_RECIPIENT);
+			ret = ieee80211_stop_tx_ba_session(&sta->sta, tid);
 	} else {
 		__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3);
 		ret = 0;
@@ -307,6 +290,13 @@
 	debugfs_create_file(#name, 0400, \
 		sta->debugfs.dir, sta, &sta_ ##name## _ops);
 
+#define DEBUGFS_ADD_COUNTER(name, field)				\
+	if (sizeof(sta->field) == sizeof(u32))				\
+		debugfs_create_u32(#name, 0400, sta->debugfs.dir,	\
+			(u32 *) &sta->field);				\
+	else								\
+		debugfs_create_u64(#name, 0400, sta->debugfs.dir,	\
+			(u64 *) &sta->field);
 
 void ieee80211_sta_debugfs_add(struct sta_info *sta)
 {
@@ -339,20 +329,21 @@
 	DEBUGFS_ADD(last_seq_ctrl);
 	DEBUGFS_ADD(agg_status);
 	DEBUGFS_ADD(dev);
-	DEBUGFS_ADD(rx_packets);
-	DEBUGFS_ADD(tx_packets);
-	DEBUGFS_ADD(rx_bytes);
-	DEBUGFS_ADD(tx_bytes);
-	DEBUGFS_ADD(rx_duplicates);
-	DEBUGFS_ADD(rx_fragments);
-	DEBUGFS_ADD(rx_dropped);
-	DEBUGFS_ADD(tx_fragments);
-	DEBUGFS_ADD(tx_filtered);
-	DEBUGFS_ADD(tx_retry_failed);
-	DEBUGFS_ADD(tx_retry_count);
 	DEBUGFS_ADD(last_signal);
-	DEBUGFS_ADD(wep_weak_iv_count);
 	DEBUGFS_ADD(ht_capa);
+
+	DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
+	DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
+	DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes);
+	DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes);
+	DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
+	DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
+	DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped);
+	DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments);
+	DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
+	DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
+	DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
+	DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
 }
 
 void ieee80211_sta_debugfs_remove(struct sta_info *sta)
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 9c1da08..14123dc 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -16,10 +16,11 @@
 
 	might_sleep();
 
+	trace_drv_start(local);
 	local->started = true;
 	smp_mb();
 	ret = local->ops->start(&local->hw);
-	trace_drv_start(local, ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -27,8 +28,9 @@
 {
 	might_sleep();
 
-	local->ops->stop(&local->hw);
 	trace_drv_stop(local);
+	local->ops->stop(&local->hw);
+	trace_drv_return_void(local);
 
 	/* sync away all work on the tasklet before clearing started */
 	tasklet_disable(&local->tasklet);
@@ -46,8 +48,9 @@
 
 	might_sleep();
 
+	trace_drv_add_interface(local, vif_to_sdata(vif));
 	ret = local->ops->add_interface(&local->hw, vif);
-	trace_drv_add_interface(local, vif_to_sdata(vif), ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -56,8 +59,9 @@
 {
 	might_sleep();
 
-	local->ops->remove_interface(&local->hw, vif);
 	trace_drv_remove_interface(local, vif_to_sdata(vif));
+	local->ops->remove_interface(&local->hw, vif);
+	trace_drv_return_void(local);
 }
 
 static inline int drv_config(struct ieee80211_local *local, u32 changed)
@@ -66,8 +70,9 @@
 
 	might_sleep();
 
+	trace_drv_config(local, changed);
 	ret = local->ops->config(&local->hw, changed);
-	trace_drv_config(local, changed, ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -78,9 +83,10 @@
 {
 	might_sleep();
 
+	trace_drv_bss_info_changed(local, sdata, info, changed);
 	if (local->ops->bss_info_changed)
 		local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
-	trace_drv_bss_info_changed(local, sdata, info, changed);
+	trace_drv_return_void(local);
 }
 
 static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -88,10 +94,12 @@
 {
 	u64 ret = 0;
 
+	trace_drv_prepare_multicast(local, mc_list->count);
+
 	if (local->ops->prepare_multicast)
 		ret = local->ops->prepare_multicast(&local->hw, mc_list);
 
-	trace_drv_prepare_multicast(local, mc_list->count, ret);
+	trace_drv_return_u64(local, ret);
 
 	return ret;
 }
@@ -103,19 +111,21 @@
 {
 	might_sleep();
 
-	local->ops->configure_filter(&local->hw, changed_flags, total_flags,
-				     multicast);
 	trace_drv_configure_filter(local, changed_flags, total_flags,
 				   multicast);
+	local->ops->configure_filter(&local->hw, changed_flags, total_flags,
+				     multicast);
+	trace_drv_return_void(local);
 }
 
 static inline int drv_set_tim(struct ieee80211_local *local,
 			      struct ieee80211_sta *sta, bool set)
 {
 	int ret = 0;
+	trace_drv_set_tim(local, sta, set);
 	if (local->ops->set_tim)
 		ret = local->ops->set_tim(&local->hw, sta, set);
-	trace_drv_set_tim(local, sta, set, ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -129,8 +139,9 @@
 
 	might_sleep();
 
+	trace_drv_set_key(local, cmd, sdata, sta, key);
 	ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
-	trace_drv_set_key(local, cmd, sdata, sta, key, ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -145,10 +156,11 @@
 	if (sta)
 		ista = &sta->sta;
 
+	trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
 	if (local->ops->update_tkip_key)
 		local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
 					    ista, iv32, phase1key);
-	trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
+	trace_drv_return_void(local);
 }
 
 static inline int drv_hw_scan(struct ieee80211_local *local,
@@ -159,8 +171,9 @@
 
 	might_sleep();
 
+	trace_drv_hw_scan(local, sdata, req);
 	ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
-	trace_drv_hw_scan(local, sdata, req, ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -168,18 +181,20 @@
 {
 	might_sleep();
 
+	trace_drv_sw_scan_start(local);
 	if (local->ops->sw_scan_start)
 		local->ops->sw_scan_start(&local->hw);
-	trace_drv_sw_scan_start(local);
+	trace_drv_return_void(local);
 }
 
 static inline void drv_sw_scan_complete(struct ieee80211_local *local)
 {
 	might_sleep();
 
+	trace_drv_sw_scan_complete(local);
 	if (local->ops->sw_scan_complete)
 		local->ops->sw_scan_complete(&local->hw);
-	trace_drv_sw_scan_complete(local);
+	trace_drv_return_void(local);
 }
 
 static inline int drv_get_stats(struct ieee80211_local *local,
@@ -211,9 +226,10 @@
 
 	might_sleep();
 
+	trace_drv_set_rts_threshold(local, value);
 	if (local->ops->set_rts_threshold)
 		ret = local->ops->set_rts_threshold(&local->hw, value);
-	trace_drv_set_rts_threshold(local, value, ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -223,12 +239,13 @@
 	int ret = 0;
 	might_sleep();
 
+	trace_drv_set_coverage_class(local, value);
 	if (local->ops->set_coverage_class)
 		local->ops->set_coverage_class(&local->hw, value);
 	else
 		ret = -EOPNOTSUPP;
 
-	trace_drv_set_coverage_class(local, value, ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -237,9 +254,10 @@
 				  enum sta_notify_cmd cmd,
 				  struct ieee80211_sta *sta)
 {
+	trace_drv_sta_notify(local, sdata, cmd, sta);
 	if (local->ops->sta_notify)
 		local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
-	trace_drv_sta_notify(local, sdata, cmd, sta);
+	trace_drv_return_void(local);
 }
 
 static inline int drv_sta_add(struct ieee80211_local *local,
@@ -250,13 +268,11 @@
 
 	might_sleep();
 
+	trace_drv_sta_add(local, sdata, sta);
 	if (local->ops->sta_add)
 		ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
-	else if (local->ops->sta_notify)
-		local->ops->sta_notify(&local->hw, &sdata->vif,
-					STA_NOTIFY_ADD, sta);
 
-	trace_drv_sta_add(local, sdata, sta, ret);
+	trace_drv_return_int(local, ret);
 
 	return ret;
 }
@@ -267,13 +283,11 @@
 {
 	might_sleep();
 
+	trace_drv_sta_remove(local, sdata, sta);
 	if (local->ops->sta_remove)
 		local->ops->sta_remove(&local->hw, &sdata->vif, sta);
-	else if (local->ops->sta_notify)
-		local->ops->sta_notify(&local->hw, &sdata->vif,
-					STA_NOTIFY_REMOVE, sta);
 
-	trace_drv_sta_remove(local, sdata, sta);
+	trace_drv_return_void(local);
 }
 
 static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
@@ -283,9 +297,10 @@
 
 	might_sleep();
 
+	trace_drv_conf_tx(local, queue, params);
 	if (local->ops->conf_tx)
 		ret = local->ops->conf_tx(&local->hw, queue, params);
-	trace_drv_conf_tx(local, queue, params, ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -295,9 +310,10 @@
 
 	might_sleep();
 
+	trace_drv_get_tsf(local);
 	if (local->ops->get_tsf)
 		ret = local->ops->get_tsf(&local->hw);
-	trace_drv_get_tsf(local, ret);
+	trace_drv_return_u64(local, ret);
 	return ret;
 }
 
@@ -305,18 +321,20 @@
 {
 	might_sleep();
 
+	trace_drv_set_tsf(local, tsf);
 	if (local->ops->set_tsf)
 		local->ops->set_tsf(&local->hw, tsf);
-	trace_drv_set_tsf(local, tsf);
+	trace_drv_return_void(local);
 }
 
 static inline void drv_reset_tsf(struct ieee80211_local *local)
 {
 	might_sleep();
 
+	trace_drv_reset_tsf(local);
 	if (local->ops->reset_tsf)
 		local->ops->reset_tsf(&local->hw);
-	trace_drv_reset_tsf(local);
+	trace_drv_return_void(local);
 }
 
 static inline int drv_tx_last_beacon(struct ieee80211_local *local)
@@ -325,9 +343,10 @@
 
 	might_sleep();
 
+	trace_drv_tx_last_beacon(local);
 	if (local->ops->tx_last_beacon)
 		ret = local->ops->tx_last_beacon(&local->hw);
-	trace_drv_tx_last_beacon(local, ret);
+	trace_drv_return_int(local, ret);
 	return ret;
 }
 
@@ -338,10 +357,17 @@
 				   u16 *ssn)
 {
 	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn);
+
 	if (local->ops->ampdu_action)
 		ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
 					       sta, tid, ssn);
-	trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret);
+
+	trace_drv_return_int(local, ret);
+
 	return ret;
 }
 
@@ -349,9 +375,14 @@
 				struct survey_info *survey)
 {
 	int ret = -EOPNOTSUPP;
+
+	trace_drv_get_survey(local, idx, survey);
+
 	if (local->ops->get_survey)
 		ret = local->ops->get_survey(&local->hw, idx, survey);
-	/* trace_drv_get_survey(local, idx, survey, ret); */
+
+	trace_drv_return_int(local, ret);
+
 	return ret;
 }
 
@@ -370,6 +401,7 @@
 	trace_drv_flush(local, drop);
 	if (local->ops->flush)
 		local->ops->flush(&local->hw, drop);
+	trace_drv_return_void(local);
 }
 
 static inline void drv_channel_switch(struct ieee80211_local *local,
@@ -377,9 +409,9 @@
 {
 	might_sleep();
 
-	local->ops->channel_switch(&local->hw, ch_switch);
-
 	trace_drv_channel_switch(local, ch_switch);
+	local->ops->channel_switch(&local->hw, ch_switch);
+	trace_drv_return_void(local);
 }
 
 #endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 6a9b234..5d5d2a9 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -36,20 +36,58 @@
  * Tracing for driver callbacks.
  */
 
-TRACE_EVENT(drv_start,
+TRACE_EVENT(drv_return_void,
+	TP_PROTO(struct ieee80211_local *local),
+	TP_ARGS(local),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+	),
+	TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
+);
+
+TRACE_EVENT(drv_return_int,
 	TP_PROTO(struct ieee80211_local *local, int ret),
-
 	TP_ARGS(local, ret),
-
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		__field(int, ret)
 	),
-
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		__entry->ret = ret;
 	),
+	TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret)
+);
+
+TRACE_EVENT(drv_return_u64,
+	TP_PROTO(struct ieee80211_local *local, u64 ret),
+	TP_ARGS(local, ret),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(u64, ret)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->ret = ret;
+	),
+	TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret)
+);
+
+TRACE_EVENT(drv_start,
+	TP_PROTO(struct ieee80211_local *local),
+
+	TP_ARGS(local),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+	),
 
 	TP_printk(
 		LOCAL_PR_FMT, LOCAL_PR_ARG
@@ -76,28 +114,25 @@
 
 TRACE_EVENT(drv_add_interface,
 	TP_PROTO(struct ieee80211_local *local,
-		 struct ieee80211_sub_if_data *sdata,
-		 int ret),
+		 struct ieee80211_sub_if_data *sdata),
 
-	TP_ARGS(local, sdata, ret),
+	TP_ARGS(local, sdata),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		VIF_ENTRY
 		__array(char, addr, 6)
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
 		memcpy(__entry->addr, sdata->vif.addr, 6);
-		__entry->ret = ret;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT  VIF_PR_FMT " addr:%pM ret:%d",
-		LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret
+		LOCAL_PR_FMT  VIF_PR_FMT " addr:%pM",
+		LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
 	)
 );
 
@@ -126,15 +161,13 @@
 
 TRACE_EVENT(drv_config,
 	TP_PROTO(struct ieee80211_local *local,
-		 u32 changed,
-		 int ret),
+		 u32 changed),
 
-	TP_ARGS(local, changed, ret),
+	TP_ARGS(local, changed),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		__field(u32, changed)
-		__field(int, ret)
 		__field(u32, flags)
 		__field(int, power_level)
 		__field(int, dynamic_ps_timeout)
@@ -150,7 +183,6 @@
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		__entry->changed = changed;
-		__entry->ret = ret;
 		__entry->flags = local->hw.conf.flags;
 		__entry->power_level = local->hw.conf.power_level;
 		__entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
@@ -164,8 +196,8 @@
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " ch:%#x freq:%d ret:%d",
-		LOCAL_PR_ARG, __entry->changed, __entry->center_freq, __entry->ret
+		LOCAL_PR_FMT " ch:%#x freq:%d",
+		LOCAL_PR_ARG, __entry->changed, __entry->center_freq
 	)
 );
 
@@ -220,26 +252,23 @@
 );
 
 TRACE_EVENT(drv_prepare_multicast,
-	TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret),
+	TP_PROTO(struct ieee80211_local *local, int mc_count),
 
-	TP_ARGS(local, mc_count, ret),
+	TP_ARGS(local, mc_count),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		__field(int, mc_count)
-		__field(u64, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		__entry->mc_count = mc_count;
-		__entry->ret = ret;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " prepare mc (%d): %llx",
-		LOCAL_PR_ARG, __entry->mc_count,
-		(unsigned long long) __entry->ret
+		LOCAL_PR_FMT " prepare mc (%d)",
+		LOCAL_PR_ARG, __entry->mc_count
 	)
 );
 
@@ -273,27 +302,25 @@
 
 TRACE_EVENT(drv_set_tim,
 	TP_PROTO(struct ieee80211_local *local,
-		 struct ieee80211_sta *sta, bool set, int ret),
+		 struct ieee80211_sta *sta, bool set),
 
-	TP_ARGS(local, sta, set, ret),
+	TP_ARGS(local, sta, set),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		STA_ENTRY
 		__field(bool, set)
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		STA_ASSIGN;
 		__entry->set = set;
-		__entry->ret = ret;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d",
-		LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret
+		LOCAL_PR_FMT STA_PR_FMT " set:%d",
+		LOCAL_PR_ARG, STA_PR_FMT, __entry->set
 	)
 );
 
@@ -301,9 +328,9 @@
 	TP_PROTO(struct ieee80211_local *local,
 		 enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
 		 struct ieee80211_sta *sta,
-		 struct ieee80211_key_conf *key, int ret),
+		 struct ieee80211_key_conf *key),
 
-	TP_ARGS(local, cmd, sdata, sta, key, ret),
+	TP_ARGS(local, cmd, sdata, sta, key),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
@@ -313,7 +340,6 @@
 		__field(u8, hw_key_idx)
 		__field(u8, flags)
 		__field(s8, keyidx)
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
@@ -324,12 +350,11 @@
 		__entry->flags = key->flags;
 		__entry->keyidx = key->keyidx;
 		__entry->hw_key_idx = key->hw_key_idx;
-		__entry->ret = ret;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT " ret:%d",
-		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
+		LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT,
+		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
 	)
 );
 
@@ -364,25 +389,23 @@
 TRACE_EVENT(drv_hw_scan,
 	TP_PROTO(struct ieee80211_local *local,
 		 struct ieee80211_sub_if_data *sdata,
-		 struct cfg80211_scan_request *req, int ret),
+		 struct cfg80211_scan_request *req),
 
-	TP_ARGS(local, sdata, req, ret),
+	TP_ARGS(local, sdata, req),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		VIF_ENTRY
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
-		__entry->ret = ret;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT VIF_PR_FMT " ret:%d",
-		LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret
+		LOCAL_PR_FMT VIF_PR_FMT,
+		LOCAL_PR_ARG,VIF_PR_ARG
 	)
 );
 
@@ -479,48 +502,44 @@
 );
 
 TRACE_EVENT(drv_set_rts_threshold,
-	TP_PROTO(struct ieee80211_local *local, u32 value, int ret),
+	TP_PROTO(struct ieee80211_local *local, u32 value),
 
-	TP_ARGS(local, value, ret),
+	TP_ARGS(local, value),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		__field(u32, value)
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
-		__entry->ret = ret;
 		__entry->value = value;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " value:%d ret:%d",
-		LOCAL_PR_ARG, __entry->value, __entry->ret
+		LOCAL_PR_FMT " value:%d",
+		LOCAL_PR_ARG, __entry->value
 	)
 );
 
 TRACE_EVENT(drv_set_coverage_class,
-	TP_PROTO(struct ieee80211_local *local, u8 value, int ret),
+	TP_PROTO(struct ieee80211_local *local, u8 value),
 
-	TP_ARGS(local, value, ret),
+	TP_ARGS(local, value),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		__field(u8, value)
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
-		__entry->ret = ret;
 		__entry->value = value;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " value:%d ret:%d",
-		LOCAL_PR_ARG, __entry->value, __entry->ret
+		LOCAL_PR_FMT " value:%d",
+		LOCAL_PR_ARG, __entry->value
 	)
 );
 
@@ -555,27 +574,25 @@
 TRACE_EVENT(drv_sta_add,
 	TP_PROTO(struct ieee80211_local *local,
 		 struct ieee80211_sub_if_data *sdata,
-		 struct ieee80211_sta *sta, int ret),
+		 struct ieee80211_sta *sta),
 
-	TP_ARGS(local, sdata, sta, ret),
+	TP_ARGS(local, sdata, sta),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		VIF_ENTRY
 		STA_ENTRY
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
 		STA_ASSIGN;
-		__entry->ret = ret;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT " ret:%d",
-		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
+		LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT,
+		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
 	)
 );
 
@@ -606,10 +623,9 @@
 
 TRACE_EVENT(drv_conf_tx,
 	TP_PROTO(struct ieee80211_local *local, u16 queue,
-		 const struct ieee80211_tx_queue_params *params,
-		 int ret),
+		 const struct ieee80211_tx_queue_params *params),
 
-	TP_ARGS(local, queue, params, ret),
+	TP_ARGS(local, queue, params),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
@@ -618,13 +634,11 @@
 		__field(u16, cw_min)
 		__field(u16, cw_max)
 		__field(u8, aifs)
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		__entry->queue = queue;
-		__entry->ret = ret;
 		__entry->txop = params->txop;
 		__entry->cw_max = params->cw_max;
 		__entry->cw_min = params->cw_min;
@@ -632,29 +646,27 @@
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " queue:%d ret:%d",
-		LOCAL_PR_ARG, __entry->queue, __entry->ret
+		LOCAL_PR_FMT " queue:%d",
+		LOCAL_PR_ARG, __entry->queue
 	)
 );
 
 TRACE_EVENT(drv_get_tsf,
-	TP_PROTO(struct ieee80211_local *local, u64 ret),
+	TP_PROTO(struct ieee80211_local *local),
 
-	TP_ARGS(local, ret),
+	TP_ARGS(local),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
-		__field(u64, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
-		__entry->ret = ret;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " ret:%llu",
-		LOCAL_PR_ARG, (unsigned long long)__entry->ret
+		LOCAL_PR_FMT,
+		LOCAL_PR_ARG
 	)
 );
 
@@ -698,23 +710,21 @@
 );
 
 TRACE_EVENT(drv_tx_last_beacon,
-	TP_PROTO(struct ieee80211_local *local, int ret),
+	TP_PROTO(struct ieee80211_local *local),
 
-	TP_ARGS(local, ret),
+	TP_ARGS(local),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
-		__entry->ret = ret;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " ret:%d",
-		LOCAL_PR_ARG, __entry->ret
+		LOCAL_PR_FMT,
+		LOCAL_PR_ARG
 	)
 );
 
@@ -723,9 +733,9 @@
 		 struct ieee80211_sub_if_data *sdata,
 		 enum ieee80211_ampdu_mlme_action action,
 		 struct ieee80211_sta *sta, u16 tid,
-		 u16 *ssn, int ret),
+		 u16 *ssn),
 
-	TP_ARGS(local, sdata, action, sta, tid, ssn, ret),
+	TP_ARGS(local, sdata, action, sta, tid, ssn),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
@@ -733,7 +743,6 @@
 		__field(u32, action)
 		__field(u16, tid)
 		__field(u16, ssn)
-		__field(int, ret)
 		VIF_ENTRY
 	),
 
@@ -741,15 +750,36 @@
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
 		STA_ASSIGN;
-		__entry->ret = ret;
 		__entry->action = action;
 		__entry->tid = tid;
 		__entry->ssn = ssn ? *ssn : 0;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d",
-		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
+		LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d",
+		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid
+	)
+);
+
+TRACE_EVENT(drv_get_survey,
+	TP_PROTO(struct ieee80211_local *local, int idx,
+		 struct survey_info *survey),
+
+	TP_ARGS(local, idx, survey),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(int, idx)
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->idx = idx;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT " idx:%d",
+		LOCAL_PR_ARG, __entry->idx
 	)
 );
 
@@ -851,25 +881,23 @@
 );
 
 TRACE_EVENT(api_stop_tx_ba_session,
-	TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator),
+	TP_PROTO(struct ieee80211_sta *sta, u16 tid),
 
-	TP_ARGS(sta, tid, initiator),
+	TP_ARGS(sta, tid),
 
 	TP_STRUCT__entry(
 		STA_ENTRY
 		__field(u16, tid)
-		__field(u16, initiator)
 	),
 
 	TP_fast_assign(
 		STA_ASSIGN;
 		__entry->tid = tid;
-		__entry->initiator = initiator;
 	),
 
 	TP_printk(
-		STA_PR_FMT " tid:%d initiator:%d",
-		STA_PR_ARG, __entry->tid, __entry->initiator
+		STA_PR_FMT " tid:%d",
+		STA_PR_ARG, __entry->tid
 	)
 );
 
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 2ab106a..be928ef 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -6,7 +6,7 @@
  * Copyright 2005-2006, Devicescape Software, Inc.
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007-2008, Intel Corporation
+ * Copyright 2007-2010, Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -105,6 +105,8 @@
 {
 	int i;
 
+	cancel_work_sync(&sta->ampdu_mlme.work);
+
 	for (i = 0; i <  STA_TID_NUM; i++) {
 		__ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR);
 		__ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -112,6 +114,43 @@
 	}
 }
 
+void ieee80211_ba_session_work(struct work_struct *work)
+{
+	struct sta_info *sta =
+		container_of(work, struct sta_info, ampdu_mlme.work);
+	struct tid_ampdu_tx *tid_tx;
+	int tid;
+
+	/*
+	 * When this flag is set, new sessions should be
+	 * blocked, and existing sessions will be torn
+	 * down by the code that set the flag, so this
+	 * need not run.
+	 */
+	if (test_sta_flags(sta, WLAN_STA_BLOCK_BA))
+		return;
+
+	mutex_lock(&sta->ampdu_mlme.mtx);
+	for (tid = 0; tid < STA_TID_NUM; tid++) {
+		if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired))
+			___ieee80211_stop_rx_ba_session(
+				sta, tid, WLAN_BACK_RECIPIENT,
+				WLAN_REASON_QSTA_TIMEOUT);
+
+		tid_tx = sta->ampdu_mlme.tid_tx[tid];
+		if (!tid_tx)
+			continue;
+
+		if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state))
+			ieee80211_tx_ba_session_handle_start(sta, tid);
+		else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
+					    &tid_tx->state))
+			___ieee80211_stop_tx_ba_session(sta, tid,
+							WLAN_BACK_INITIATOR);
+	}
+	mutex_unlock(&sta->ampdu_mlme.mtx);
+}
+
 void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
 			  const u8 *da, u16 tid,
 			  u16 initiator, u16 reason_code)
@@ -176,13 +215,8 @@
 
 	if (initiator == WLAN_BACK_INITIATOR)
 		__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0);
-	else { /* WLAN_BACK_RECIPIENT */
-		spin_lock_bh(&sta->lock);
-		if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
-			___ieee80211_stop_tx_ba_session(sta, tid,
-							WLAN_BACK_RECIPIENT);
-		spin_unlock_bh(&sta->lock);
-	}
+	else
+		__ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT);
 }
 
 int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index b2cc1fd..d4e84b2 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -172,11 +172,13 @@
 	rcu_assign_pointer(ifibss->presp, skb);
 
 	sdata->vif.bss_conf.beacon_int = beacon_int;
+	sdata->vif.bss_conf.basic_rates = basic_rates;
 	bss_change = BSS_CHANGED_BEACON_INT;
 	bss_change |= ieee80211_reset_erp_info(sdata);
 	bss_change |= BSS_CHANGED_BSSID;
 	bss_change |= BSS_CHANGED_BEACON;
 	bss_change |= BSS_CHANGED_BEACON_ENABLED;
+	bss_change |= BSS_CHANGED_BASIC_RATES;
 	bss_change |= BSS_CHANGED_IBSS;
 	sdata->vif.bss_conf.ibss_joined = true;
 	ieee80211_bss_info_change_notify(sdata, bss_change);
@@ -529,7 +531,7 @@
 		sdata->drop_unencrypted = 0;
 
 	__ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
-				  ifibss->channel, 3, /* first two are basic */
+				  ifibss->channel, ifibss->basic_rates,
 				  capability, 0);
 }
 
@@ -727,8 +729,8 @@
 	ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
 }
 
-static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
-					  struct sk_buff *skb)
+void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+				   struct sk_buff *skb)
 {
 	struct ieee80211_rx_status *rx_status;
 	struct ieee80211_mgmt *mgmt;
@@ -754,33 +756,11 @@
 		ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len);
 		break;
 	}
-
-	kfree_skb(skb);
 }
 
-static void ieee80211_ibss_work(struct work_struct *work)
+void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata =
-		container_of(work, struct ieee80211_sub_if_data, u.ibss.work);
-	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_if_ibss *ifibss;
-	struct sk_buff *skb;
-
-	if (WARN_ON(local->suspended))
-		return;
-
-	if (!ieee80211_sdata_running(sdata))
-		return;
-
-	if (local->scanning)
-		return;
-
-	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC))
-		return;
-	ifibss = &sdata->u.ibss;
-
-	while ((skb = skb_dequeue(&ifibss->skb_queue)))
-		ieee80211_ibss_rx_queued_mgmt(sdata, skb);
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 
 	if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request))
 		return;
@@ -798,6 +778,15 @@
 	}
 }
 
+static void ieee80211_queue_ibss_work(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+	struct ieee80211_local *local = sdata->local;
+
+	set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
+	ieee80211_queue_work(&local->hw, &sdata->work);
+}
+
 static void ieee80211_ibss_timer(unsigned long data)
 {
 	struct ieee80211_sub_if_data *sdata =
@@ -810,8 +799,7 @@
 		return;
 	}
 
-	set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
-	ieee80211_queue_work(&local->hw, &ifibss->work);
+	ieee80211_queue_ibss_work(sdata);
 }
 
 #ifdef CONFIG_PM
@@ -819,7 +807,6 @@
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 
-	cancel_work_sync(&ifibss->work);
 	if (del_timer_sync(&ifibss->timer))
 		ifibss->timer_running = true;
 }
@@ -839,10 +826,8 @@
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 
-	INIT_WORK(&ifibss->work, ieee80211_ibss_work);
 	setup_timer(&ifibss->timer, ieee80211_ibss_timer,
 		    (unsigned long) sdata);
-	skb_queue_head_init(&ifibss->skb_queue);
 }
 
 /* scan finished notification */
@@ -859,37 +844,11 @@
 		if (!sdata->u.ibss.ssid_len)
 			continue;
 		sdata->u.ibss.last_scan_completed = jiffies;
-		mod_timer(&sdata->u.ibss.timer, 0);
+		ieee80211_queue_ibss_work(sdata);
 	}
 	mutex_unlock(&local->iflist_mtx);
 }
 
-ieee80211_rx_result
-ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
-{
-	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_mgmt *mgmt;
-	u16 fc;
-
-	if (skb->len < 24)
-		return RX_DROP_MONITOR;
-
-	mgmt = (struct ieee80211_mgmt *) skb->data;
-	fc = le16_to_cpu(mgmt->frame_control);
-
-	switch (fc & IEEE80211_FCTL_STYPE) {
-	case IEEE80211_STYPE_PROBE_RESP:
-	case IEEE80211_STYPE_BEACON:
-	case IEEE80211_STYPE_PROBE_REQ:
-	case IEEE80211_STYPE_AUTH:
-		skb_queue_tail(&sdata->u.ibss.skb_queue, skb);
-		ieee80211_queue_work(&local->hw, &sdata->u.ibss.work);
-		return RX_QUEUED;
-	}
-
-	return RX_DROP_MONITOR;
-}
-
 int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
 			struct cfg80211_ibss_params *params)
 {
@@ -902,6 +861,7 @@
 		sdata->u.ibss.fixed_bssid = false;
 
 	sdata->u.ibss.privacy = params->privacy;
+	sdata->u.ibss.basic_rates = params->basic_rates;
 
 	sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 
@@ -949,7 +909,7 @@
 	ieee80211_recalc_idle(sdata->local);
 
 	set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
-	ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work);
+	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 
 	return 0;
 }
@@ -957,10 +917,35 @@
 int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 {
 	struct sk_buff *skb;
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+	struct ieee80211_local *local = sdata->local;
+	struct cfg80211_bss *cbss;
+	u16 capability;
+	int active_ibss = 0;
+
+	active_ibss = ieee80211_sta_active_ibss(sdata);
+
+	if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+		capability = WLAN_CAPABILITY_IBSS;
+
+		if (ifibss->privacy)
+			capability |= WLAN_CAPABILITY_PRIVACY;
+
+		cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->channel,
+					ifibss->bssid, ifibss->ssid,
+					ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+					WLAN_CAPABILITY_PRIVACY,
+					capability);
+
+		if (cbss) {
+			cfg80211_unlink_bss(local->hw.wiphy, cbss);
+			cfg80211_put_bss(cbss);
+		}
+	}
 
 	del_timer_sync(&sdata->u.ibss.timer);
 	clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
-	cancel_work_sync(&sdata->u.ibss.work);
+	cancel_work_sync(&sdata->work);
 	clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
 
 	sta_info_flush(sdata->local, sdata);
@@ -975,7 +960,7 @@
 	synchronize_rcu();
 	kfree_skb(skb);
 
-	skb_queue_purge(&sdata->u.ibss.skb_queue);
+	skb_queue_purge(&sdata->skb_queue);
 	memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
 	sdata->u.ibss.ssid_len = 0;
 
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 1a9e2da..a3649a8 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -325,7 +325,6 @@
 	struct timer_list conn_mon_timer;
 	struct timer_list bcn_mon_timer;
 	struct timer_list chswitch_timer;
-	struct work_struct work;
 	struct work_struct monitor_work;
 	struct work_struct chswitch_work;
 	struct work_struct beacon_connection_loss_work;
@@ -340,8 +339,6 @@
 
 	u16 aid;
 
-	struct sk_buff_head skb_queue;
-
 	unsigned long timers_running; /* used for quiesce/restart */
 	bool powersave; /* powersave requested for this iface */
 	enum ieee80211_smps_mode req_smps, /* requested smps mode */
@@ -386,13 +383,12 @@
 
 struct ieee80211_if_ibss {
 	struct timer_list timer;
-	struct work_struct work;
-
-	struct sk_buff_head skb_queue;
 
 	unsigned long request;
 	unsigned long last_scan_completed;
 
+	u32 basic_rates;
+
 	bool timer_running;
 
 	bool fixed_bssid;
@@ -416,11 +412,9 @@
 };
 
 struct ieee80211_if_mesh {
-	struct work_struct work;
 	struct timer_list housekeeping_timer;
 	struct timer_list mesh_path_timer;
 	struct timer_list mesh_path_root_timer;
-	struct sk_buff_head skb_queue;
 
 	unsigned long timers_running;
 
@@ -517,6 +511,11 @@
 
 	u16 sequence_number;
 
+	struct work_struct work;
+	struct sk_buff_head skb_queue;
+
+	bool arp_filter_state;
+
 	/*
 	 * AP this belongs to: self in AP mode and
 	 * corresponding AP in VLAN mode, NULL for
@@ -569,11 +568,15 @@
 #endif
 }
 
+enum sdata_queue_type {
+	IEEE80211_SDATA_QUEUE_TYPE_FRAME	= 0,
+	IEEE80211_SDATA_QUEUE_AGG_START		= 1,
+	IEEE80211_SDATA_QUEUE_AGG_STOP		= 2,
+};
+
 enum {
 	IEEE80211_RX_MSG	= 1,
 	IEEE80211_TX_STATUS_MSG	= 2,
-	IEEE80211_DELBA_MSG	= 3,
-	IEEE80211_ADDBA_MSG	= 4,
 };
 
 enum queue_stop_reason {
@@ -724,13 +727,7 @@
 	struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
 	struct tasklet_struct tx_pending_tasklet;
 
-	/*
-	 * This lock is used to prevent concurrent A-MPDU
-	 * session start/stop processing, this thus also
-	 * synchronises the ->ampdu_action() callback to
-	 * drivers and limits it to one at a time.
-	 */
-	spinlock_t ampdu_lock;
+	atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
 
 	/* number of interfaces with corresponding IFF_ flags */
 	atomic_t iff_allmultis, iff_promiscs;
@@ -746,10 +743,10 @@
 	struct mutex iflist_mtx;
 
 	/*
-	 * Key lock, protects sdata's key_list and sta_info's
+	 * Key mutex, protects sdata's key_list and sta_info's
 	 * key pointers (write access, they're RCU.)
 	 */
-	spinlock_t key_lock;
+	struct mutex key_mtx;
 
 
 	/* Scanning and BSS list */
@@ -851,6 +848,15 @@
 	struct work_struct dynamic_ps_disable_work;
 	struct timer_list dynamic_ps_timer;
 	struct notifier_block network_latency_notifier;
+	struct notifier_block ifa_notifier;
+
+	/*
+	 * The dynamic ps timeout configured from user space via WEXT -
+	 * this will override whatever chosen by mac80211 internally.
+	 */
+	int dynamic_ps_forced_timeout;
+	int dynamic_ps_user_timeout;
+	bool disable_dynamic_ps;
 
 	int user_power_level; /* in dBm */
 	int power_constr_level; /* in dBm */
@@ -874,9 +880,8 @@
 	return netdev_priv(dev);
 }
 
-/* this struct represents 802.11n's RA/TID combination along with our vif */
+/* this struct represents 802.11n's RA/TID combination */
 struct ieee80211_ra_tid {
-	struct ieee80211_vif *vif;
 	u8 ra[ETH_ALEN];
 	u16 tid;
 };
@@ -985,29 +990,25 @@
 int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
 			   struct cfg80211_disassoc_request *req,
 			   void *cookie);
-int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
-			 struct ieee80211_channel *chan,
-			 enum nl80211_channel_type channel_type,
-			 const u8 *buf, size_t len, u64 *cookie);
-ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
-					  struct sk_buff *skb);
 void ieee80211_send_pspoll(struct ieee80211_local *local,
 			   struct ieee80211_sub_if_data *sdata);
 void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
 int ieee80211_max_network_latency(struct notifier_block *nb,
 				  unsigned long data, void *dummy);
+int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 				      struct ieee80211_channel_sw_ie *sw_elem,
 				      struct ieee80211_bss *bss,
 				      u64 timestamp);
 void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+				  struct sk_buff *skb);
 
 /* IBSS code */
 void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
 void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
-ieee80211_rx_result
-ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
 struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
 					u8 *bssid, u8 *addr, u32 supp_rates,
 					gfp_t gfp);
@@ -1016,6 +1017,14 @@
 int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
 void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata);
 void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+				   struct sk_buff *skb);
+
+/* mesh code */
+void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+				   struct sk_buff *skb);
 
 /* scan/BSS handling */
 void ieee80211_scan_work(struct work_struct *work);
@@ -1084,7 +1093,7 @@
 	u8 padding_for_rate;
 	__le16 tx_flags;
 	u8 data_retries;
-} __attribute__ ((packed));
+} __packed;
 
 
 /* HT */
@@ -1099,6 +1108,8 @@
 			       enum ieee80211_smps_mode smps, const u8 *da,
 			       const u8 *bssid);
 
+void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+				     u16 initiator, u16 reason);
 void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				    u16 initiator, u16 reason);
 void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta);
@@ -1118,6 +1129,10 @@
 				   enum ieee80211_back_parties initiator);
 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 				    enum ieee80211_back_parties initiator);
+void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
+void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+void ieee80211_ba_session_work(struct work_struct *work);
+void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
 
 /* Spectrum management */
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 50deb01..910729f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -268,7 +268,6 @@
 
 		changed |= ieee80211_reset_erp_info(sdata);
 		ieee80211_bss_info_change_notify(sdata, changed);
-		ieee80211_enable_keys(sdata);
 
 		if (sdata->vif.type == NL80211_IFTYPE_STATION)
 			netif_carrier_off(dev);
@@ -321,15 +320,6 @@
 
 	ieee80211_recalc_ps(local, -1);
 
-	/*
-	 * ieee80211_sta_work is disabled while network interface
-	 * is down. Therefore, some configuration changes may not
-	 * yet be effective. Trigger execution of ieee80211_sta_work
-	 * to fix this.
-	 */
-	if (sdata->vif.type == NL80211_IFTYPE_STATION)
-		ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
-
 	netif_tx_start_all_queues(dev);
 
 	return 0;
@@ -349,7 +339,6 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
-	struct sta_info *sta;
 	unsigned long flags;
 	struct sk_buff *skb, *tmp;
 	u32 hw_reconf_flags = 0;
@@ -366,18 +355,6 @@
 	ieee80211_work_purge(sdata);
 
 	/*
-	 * Now delete all active aggregation sessions.
-	 */
-	rcu_read_lock();
-
-	list_for_each_entry_rcu(sta, &local->sta_list, list) {
-		if (sta->sdata == sdata)
-			ieee80211_sta_tear_down_BA_sessions(sta);
-	}
-
-	rcu_read_unlock();
-
-	/*
 	 * Remove all stations associated with this interface.
 	 *
 	 * This must be done before calling ops->remove_interface()
@@ -483,27 +460,14 @@
 		 * whether the interface is running, which, at this point,
 		 * it no longer is.
 		 */
-		cancel_work_sync(&sdata->u.mgd.work);
 		cancel_work_sync(&sdata->u.mgd.chswitch_work);
 		cancel_work_sync(&sdata->u.mgd.monitor_work);
 		cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
 
-		/*
-		 * When we get here, the interface is marked down.
-		 * Call synchronize_rcu() to wait for the RX path
-		 * should it be using the interface and enqueuing
-		 * frames at this very time on another CPU.
-		 */
-		synchronize_rcu();
-		skb_queue_purge(&sdata->u.mgd.skb_queue);
 		/* fall through */
 	case NL80211_IFTYPE_ADHOC:
-		if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+		if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
 			del_timer_sync(&sdata->u.ibss.timer);
-			cancel_work_sync(&sdata->u.ibss.work);
-			synchronize_rcu();
-			skb_queue_purge(&sdata->u.ibss.skb_queue);
-		}
 		/* fall through */
 	case NL80211_IFTYPE_MESH_POINT:
 		if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -518,6 +482,16 @@
 		}
 		/* fall through */
 	default:
+		flush_work(&sdata->work);
+		/*
+		 * When we get here, the interface is marked down.
+		 * Call synchronize_rcu() to wait for the RX path
+		 * should it be using the interface and enqueuing
+		 * frames at this very time on another CPU.
+		 */
+		synchronize_rcu();
+		skb_queue_purge(&sdata->skb_queue);
+
 		if (local->scan_sdata == sdata)
 			ieee80211_scan_cancel(local);
 
@@ -531,8 +505,8 @@
 				BSS_CHANGED_BEACON_ENABLED);
 		}
 
-		/* disable all keys for as long as this netdev is down */
-		ieee80211_disable_keys(sdata);
+		/* free all remaining keys, there shouldn't be any */
+		ieee80211_free_keys(sdata);
 		drv_remove_interface(local, &sdata->vif);
 	}
 
@@ -727,6 +701,136 @@
 	dev->destructor = free_netdev;
 }
 
+static void ieee80211_iface_work(struct work_struct *work)
+{
+	struct ieee80211_sub_if_data *sdata =
+		container_of(work, struct ieee80211_sub_if_data, work);
+	struct ieee80211_local *local = sdata->local;
+	struct sk_buff *skb;
+	struct sta_info *sta;
+	struct ieee80211_ra_tid *ra_tid;
+
+	if (!ieee80211_sdata_running(sdata))
+		return;
+
+	if (local->scanning)
+		return;
+
+	/*
+	 * ieee80211_queue_work() should have picked up most cases,
+	 * here we'll pick the rest.
+	 */
+	if (WARN(local->suspended,
+		 "interface work scheduled while going to suspend\n"))
+		return;
+
+	/* first process frames */
+	while ((skb = skb_dequeue(&sdata->skb_queue))) {
+		struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+		if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
+			ra_tid = (void *)&skb->cb;
+			ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
+						 ra_tid->tid);
+		} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
+			ra_tid = (void *)&skb->cb;
+			ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
+						ra_tid->tid);
+		} else if (ieee80211_is_action(mgmt->frame_control) &&
+			   mgmt->u.action.category == WLAN_CATEGORY_BACK) {
+			int len = skb->len;
+
+			mutex_lock(&local->sta_mtx);
+			sta = sta_info_get(sdata, mgmt->sa);
+			if (sta) {
+				switch (mgmt->u.action.u.addba_req.action_code) {
+				case WLAN_ACTION_ADDBA_REQ:
+					ieee80211_process_addba_request(
+							local, sta, mgmt, len);
+					break;
+				case WLAN_ACTION_ADDBA_RESP:
+					ieee80211_process_addba_resp(local, sta,
+								     mgmt, len);
+					break;
+				case WLAN_ACTION_DELBA:
+					ieee80211_process_delba(sdata, sta,
+								mgmt, len);
+					break;
+				default:
+					WARN_ON(1);
+					break;
+				}
+			}
+			mutex_unlock(&local->sta_mtx);
+		} else if (ieee80211_is_data_qos(mgmt->frame_control)) {
+			struct ieee80211_hdr *hdr = (void *)mgmt;
+			/*
+			 * So the frame isn't mgmt, but frame_control
+			 * is at the right place anyway, of course, so
+			 * the if statement is correct.
+			 *
+			 * Warn if we have other data frame types here,
+			 * they must not get here.
+			 */
+			WARN_ON(hdr->frame_control &
+					cpu_to_le16(IEEE80211_STYPE_NULLFUNC));
+			WARN_ON(!(hdr->seq_ctrl &
+					cpu_to_le16(IEEE80211_SCTL_FRAG)));
+			/*
+			 * This was a fragment of a frame, received while
+			 * a block-ack session was active. That cannot be
+			 * right, so terminate the session.
+			 */
+			mutex_lock(&local->sta_mtx);
+			sta = sta_info_get(sdata, mgmt->sa);
+			if (sta) {
+				u16 tid = *ieee80211_get_qos_ctl(hdr) &
+						IEEE80211_QOS_CTL_TID_MASK;
+
+				__ieee80211_stop_rx_ba_session(
+					sta, tid, WLAN_BACK_RECIPIENT,
+					WLAN_REASON_QSTA_REQUIRE_SETUP);
+			}
+			mutex_unlock(&local->sta_mtx);
+		} else switch (sdata->vif.type) {
+		case NL80211_IFTYPE_STATION:
+			ieee80211_sta_rx_queued_mgmt(sdata, skb);
+			break;
+		case NL80211_IFTYPE_ADHOC:
+			ieee80211_ibss_rx_queued_mgmt(sdata, skb);
+			break;
+		case NL80211_IFTYPE_MESH_POINT:
+			if (!ieee80211_vif_is_mesh(&sdata->vif))
+				break;
+			ieee80211_mesh_rx_queued_mgmt(sdata, skb);
+			break;
+		default:
+			WARN(1, "frame for unexpected interface type");
+			break;
+		}
+
+		kfree_skb(skb);
+	}
+
+	/* then other type-dependent work */
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_STATION:
+		ieee80211_sta_work(sdata);
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		ieee80211_ibss_work(sdata);
+		break;
+	case NL80211_IFTYPE_MESH_POINT:
+		if (!ieee80211_vif_is_mesh(&sdata->vif))
+			break;
+		ieee80211_mesh_work(sdata);
+		break;
+	default:
+		break;
+	}
+}
+
+
 /*
  * Helper function to initialise an interface to a specific type.
  */
@@ -744,6 +848,9 @@
 	/* only monitor differs */
 	sdata->dev->type = ARPHRD_ETHER;
 
+	skb_queue_head_init(&sdata->skb_queue);
+	INIT_WORK(&sdata->work, ieee80211_iface_work);
+
 	switch (type) {
 	case NL80211_IFTYPE_AP:
 		skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
@@ -969,6 +1076,9 @@
 	sdata->wdev.wiphy = local->hw.wiphy;
 	sdata->local = local;
 	sdata->dev = ndev;
+#ifdef CONFIG_INET
+	sdata->arp_filter_state = true;
+#endif
 
 	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
 		skb_queue_head_init(&sdata->fragments[i].skb_list);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index e8f6e3b..50d1cff 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -36,80 +36,20 @@
  * There is currently no way of knowing this except by looking into
  * debugfs.
  *
- * All key operations are protected internally so you can call them at
- * any time.
+ * All key operations are protected internally.
  *
  * Within mac80211, key references are, just as STA structure references,
  * protected by RCU. Note, however, that some things are unprotected,
  * namely the key->sta dereferences within the hardware acceleration
- * functions. This means that sta_info_destroy() must flush the key todo
- * list.
- *
- * All the direct key list manipulation functions must not sleep because
- * they can operate on STA info structs that are protected by RCU.
+ * functions. This means that sta_info_destroy() must remove the key
+ * which waits for an RCU grace period.
  */
 
 static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
 
-/* key mutex: used to synchronise todo runners */
-static DEFINE_MUTEX(key_mutex);
-static DEFINE_SPINLOCK(todo_lock);
-static LIST_HEAD(todo_list);
-
-static void key_todo(struct work_struct *work)
+static void assert_key_lock(struct ieee80211_local *local)
 {
-	ieee80211_key_todo();
-}
-
-static DECLARE_WORK(todo_work, key_todo);
-
-/**
- * add_todo - add todo item for a key
- *
- * @key: key to add to do item for
- * @flag: todo flag(s)
- *
- * Must be called with IRQs or softirqs disabled.
- */
-static void add_todo(struct ieee80211_key *key, u32 flag)
-{
-	if (!key)
-		return;
-
-	spin_lock(&todo_lock);
-	key->flags |= flag;
-	/*
-	 * Remove again if already on the list so that we move it to the end.
-	 */
-	if (!list_empty(&key->todo))
-		list_del(&key->todo);
-	list_add_tail(&key->todo, &todo_list);
-	schedule_work(&todo_work);
-	spin_unlock(&todo_lock);
-}
-
-/**
- * ieee80211_key_lock - lock the mac80211 key operation lock
- *
- * This locks the (global) mac80211 key operation lock, all
- * key operations must be done under this lock.
- */
-static void ieee80211_key_lock(void)
-{
-	mutex_lock(&key_mutex);
-}
-
-/**
- * ieee80211_key_unlock - unlock the mac80211 key operation lock
- */
-static void ieee80211_key_unlock(void)
-{
-	mutex_unlock(&key_mutex);
-}
-
-static void assert_key_lock(void)
-{
-	WARN_ON(!mutex_is_locked(&key_mutex));
+	WARN_ON(!mutex_is_locked(&local->key_mtx));
 }
 
 static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
@@ -126,12 +66,13 @@
 	struct ieee80211_sta *sta;
 	int ret;
 
-	assert_key_lock();
 	might_sleep();
 
 	if (!key->local->ops->set_key)
 		return;
 
+	assert_key_lock(key->local);
+
 	sta = get_sta_for_key(key);
 
 	sdata = key->sdata;
@@ -142,11 +83,8 @@
 
 	ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
 
-	if (!ret) {
-		spin_lock_bh(&todo_lock);
+	if (!ret)
 		key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
-		spin_unlock_bh(&todo_lock);
-	}
 
 	if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP)
 		printk(KERN_ERR "mac80211-%s: failed to set key "
@@ -161,18 +99,15 @@
 	struct ieee80211_sta *sta;
 	int ret;
 
-	assert_key_lock();
 	might_sleep();
 
 	if (!key || !key->local->ops->set_key)
 		return;
 
-	spin_lock_bh(&todo_lock);
-	if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
-		spin_unlock_bh(&todo_lock);
+	assert_key_lock(key->local);
+
+	if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
 		return;
-	}
-	spin_unlock_bh(&todo_lock);
 
 	sta = get_sta_for_key(key);
 	sdata = key->sdata;
@@ -191,9 +126,7 @@
 		       wiphy_name(key->local->hw.wiphy),
 		       key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
 
-	spin_lock_bh(&todo_lock);
 	key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
-	spin_unlock_bh(&todo_lock);
 }
 
 static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
@@ -201,22 +134,24 @@
 {
 	struct ieee80211_key *key = NULL;
 
+	assert_key_lock(sdata->local);
+
 	if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
 		key = sdata->keys[idx];
 
 	rcu_assign_pointer(sdata->default_key, key);
 
-	if (key)
-		add_todo(key, KEY_FLAG_TODO_DEFKEY);
+	if (key) {
+		ieee80211_debugfs_key_remove_default(key->sdata);
+		ieee80211_debugfs_key_add_default(key->sdata);
+	}
 }
 
 void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&sdata->local->key_lock, flags);
+	mutex_lock(&sdata->local->key_mtx);
 	__ieee80211_set_default_key(sdata, idx);
-	spin_unlock_irqrestore(&sdata->local->key_lock, flags);
+	mutex_unlock(&sdata->local->key_mtx);
 }
 
 static void
@@ -224,24 +159,26 @@
 {
 	struct ieee80211_key *key = NULL;
 
+	assert_key_lock(sdata->local);
+
 	if (idx >= NUM_DEFAULT_KEYS &&
 	    idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
 		key = sdata->keys[idx];
 
 	rcu_assign_pointer(sdata->default_mgmt_key, key);
 
-	if (key)
-		add_todo(key, KEY_FLAG_TODO_DEFMGMTKEY);
+	if (key) {
+		ieee80211_debugfs_key_remove_mgmt_default(key->sdata);
+		ieee80211_debugfs_key_add_mgmt_default(key->sdata);
+	}
 }
 
 void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
 				    int idx)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&sdata->local->key_lock, flags);
+	mutex_lock(&sdata->local->key_mtx);
 	__ieee80211_set_default_mgmt_key(sdata, idx);
-	spin_unlock_irqrestore(&sdata->local->key_lock, flags);
+	mutex_unlock(&sdata->local->key_mtx);
 }
 
 
@@ -336,7 +273,7 @@
 		key->conf.iv_len = CCMP_HDR_LEN;
 		key->conf.icv_len = CCMP_MIC_LEN;
 		if (seq) {
-			for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
+			for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++)
 				for (j = 0; j < CCMP_PN_LEN; j++)
 					key->u.ccmp.rx_pn[i][j] =
 						seq[CCMP_PN_LEN - j - 1];
@@ -352,7 +289,6 @@
 	}
 	memcpy(key->conf.key, key_data, key_len);
 	INIT_LIST_HEAD(&key->list);
-	INIT_LIST_HEAD(&key->todo);
 
 	if (alg == ALG_CCMP) {
 		/*
@@ -382,12 +318,27 @@
 	return key;
 }
 
+static void __ieee80211_key_destroy(struct ieee80211_key *key)
+{
+	if (!key)
+		return;
+
+	ieee80211_key_disable_hw_accel(key);
+
+	if (key->conf.alg == ALG_CCMP)
+		ieee80211_aes_key_free(key->u.ccmp.tfm);
+	if (key->conf.alg == ALG_AES_CMAC)
+		ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
+	ieee80211_debugfs_key_remove(key);
+
+	kfree(key);
+}
+
 void ieee80211_key_link(struct ieee80211_key *key,
 			struct ieee80211_sub_if_data *sdata,
 			struct sta_info *sta)
 {
 	struct ieee80211_key *old_key;
-	unsigned long flags;
 	int idx;
 
 	BUG_ON(!sdata);
@@ -431,7 +382,7 @@
 		}
 	}
 
-	spin_lock_irqsave(&sdata->local->key_lock, flags);
+	mutex_lock(&sdata->local->key_mtx);
 
 	if (sta)
 		old_key = sta->key;
@@ -439,15 +390,13 @@
 		old_key = sdata->keys[idx];
 
 	__ieee80211_key_replace(sdata, sta, old_key, key);
+	__ieee80211_key_destroy(old_key);
 
-	/* free old key later */
-	add_todo(old_key, KEY_FLAG_TODO_DELETE);
+	ieee80211_debugfs_key_add(key);
 
-	add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
-	if (ieee80211_sdata_running(sdata))
-		add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
+	ieee80211_key_enable_hw_accel(key);
 
-	spin_unlock_irqrestore(&sdata->local->key_lock, flags);
+	mutex_unlock(&sdata->local->key_mtx);
 }
 
 static void __ieee80211_key_free(struct ieee80211_key *key)
@@ -458,170 +407,65 @@
 	if (key->sdata)
 		__ieee80211_key_replace(key->sdata, key->sta,
 					key, NULL);
-
-	add_todo(key, KEY_FLAG_TODO_DELETE);
+	__ieee80211_key_destroy(key);
 }
 
 void ieee80211_key_free(struct ieee80211_key *key)
 {
-	unsigned long flags;
+	struct ieee80211_local *local;
 
 	if (!key)
 		return;
 
-	if (!key->sdata) {
-		/* The key has not been linked yet, simply free it
-		 * and don't Oops */
-		if (key->conf.alg == ALG_CCMP)
-			ieee80211_aes_key_free(key->u.ccmp.tfm);
-		kfree(key);
-		return;
-	}
+	local = key->sdata->local;
 
-	spin_lock_irqsave(&key->sdata->local->key_lock, flags);
+	mutex_lock(&local->key_mtx);
 	__ieee80211_key_free(key);
-	spin_unlock_irqrestore(&key->sdata->local->key_lock, flags);
-}
-
-/*
- * To be safe against concurrent manipulations of the list (which shouldn't
- * actually happen) we need to hold the spinlock. But under the spinlock we
- * can't actually do much, so we defer processing to the todo list. Then run
- * the todo list to be sure the operation and possibly previously pending
- * operations are completed.
- */
-static void ieee80211_todo_for_each_key(struct ieee80211_sub_if_data *sdata,
-					u32 todo_flags)
-{
-	struct ieee80211_key *key;
-	unsigned long flags;
-
-	might_sleep();
-
-	spin_lock_irqsave(&sdata->local->key_lock, flags);
-	list_for_each_entry(key, &sdata->key_list, list)
-		add_todo(key, todo_flags);
-	spin_unlock_irqrestore(&sdata->local->key_lock, flags);
-
-	ieee80211_key_todo();
+	mutex_unlock(&local->key_mtx);
 }
 
 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
 {
+	struct ieee80211_key *key;
+
 	ASSERT_RTNL();
 
 	if (WARN_ON(!ieee80211_sdata_running(sdata)))
 		return;
 
-	ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD);
+	mutex_lock(&sdata->local->key_mtx);
+
+	list_for_each_entry(key, &sdata->key_list, list)
+		ieee80211_key_enable_hw_accel(key);
+
+	mutex_unlock(&sdata->local->key_mtx);
 }
 
 void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata)
 {
+	struct ieee80211_key *key;
+
 	ASSERT_RTNL();
 
-	ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_REMOVE);
-}
+	mutex_lock(&sdata->local->key_mtx);
 
-static void __ieee80211_key_destroy(struct ieee80211_key *key)
-{
-	if (!key)
-		return;
+	list_for_each_entry(key, &sdata->key_list, list)
+		ieee80211_key_disable_hw_accel(key);
 
-	ieee80211_key_disable_hw_accel(key);
-
-	if (key->conf.alg == ALG_CCMP)
-		ieee80211_aes_key_free(key->u.ccmp.tfm);
-	if (key->conf.alg == ALG_AES_CMAC)
-		ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
-	ieee80211_debugfs_key_remove(key);
-
-	kfree(key);
-}
-
-static void __ieee80211_key_todo(void)
-{
-	struct ieee80211_key *key;
-	bool work_done;
-	u32 todoflags;
-
-	/*
-	 * NB: sta_info_destroy relies on this!
-	 */
-	synchronize_rcu();
-
-	spin_lock_bh(&todo_lock);
-	while (!list_empty(&todo_list)) {
-		key = list_first_entry(&todo_list, struct ieee80211_key, todo);
-		list_del_init(&key->todo);
-		todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS |
-					  KEY_FLAG_TODO_DEFKEY |
-					  KEY_FLAG_TODO_DEFMGMTKEY |
-					  KEY_FLAG_TODO_HWACCEL_ADD |
-					  KEY_FLAG_TODO_HWACCEL_REMOVE |
-					  KEY_FLAG_TODO_DELETE);
-		key->flags &= ~todoflags;
-		spin_unlock_bh(&todo_lock);
-
-		work_done = false;
-
-		if (todoflags & KEY_FLAG_TODO_ADD_DEBUGFS) {
-			ieee80211_debugfs_key_add(key);
-			work_done = true;
-		}
-		if (todoflags & KEY_FLAG_TODO_DEFKEY) {
-			ieee80211_debugfs_key_remove_default(key->sdata);
-			ieee80211_debugfs_key_add_default(key->sdata);
-			work_done = true;
-		}
-		if (todoflags & KEY_FLAG_TODO_DEFMGMTKEY) {
-			ieee80211_debugfs_key_remove_mgmt_default(key->sdata);
-			ieee80211_debugfs_key_add_mgmt_default(key->sdata);
-			work_done = true;
-		}
-		if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) {
-			ieee80211_key_enable_hw_accel(key);
-			work_done = true;
-		}
-		if (todoflags & KEY_FLAG_TODO_HWACCEL_REMOVE) {
-			ieee80211_key_disable_hw_accel(key);
-			work_done = true;
-		}
-		if (todoflags & KEY_FLAG_TODO_DELETE) {
-			__ieee80211_key_destroy(key);
-			work_done = true;
-		}
-
-		WARN_ON(!work_done);
-
-		spin_lock_bh(&todo_lock);
-	}
-	spin_unlock_bh(&todo_lock);
-}
-
-void ieee80211_key_todo(void)
-{
-	ieee80211_key_lock();
-	__ieee80211_key_todo();
-	ieee80211_key_unlock();
+	mutex_unlock(&sdata->local->key_mtx);
 }
 
 void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_key *key, *tmp;
-	unsigned long flags;
 
-	ieee80211_key_lock();
+	mutex_lock(&sdata->local->key_mtx);
 
 	ieee80211_debugfs_key_remove_default(sdata);
 	ieee80211_debugfs_key_remove_mgmt_default(sdata);
 
-	spin_lock_irqsave(&sdata->local->key_lock, flags);
 	list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
 		__ieee80211_key_free(key);
-	spin_unlock_irqrestore(&sdata->local->key_lock, flags);
 
-	__ieee80211_key_todo();
-
-	ieee80211_key_unlock();
+	mutex_unlock(&sdata->local->key_mtx);
 }
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index bdc2968..a3849fa 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -38,25 +38,9 @@
  *
  * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
  *	in the hardware for TX crypto hardware acceleration.
- * @KEY_FLAG_TODO_DELETE: Key is marked for deletion and will, after an
- *	RCU grace period, no longer be reachable other than from the
- *	todo list.
- * @KEY_FLAG_TODO_HWACCEL_ADD: Key needs to be added to hardware acceleration.
- * @KEY_FLAG_TODO_HWACCEL_REMOVE: Key needs to be removed from hardware
- *	acceleration.
- * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated.
- * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs.
- * @KEY_FLAG_TODO_DEFMGMTKEY: Key is default management key and debugfs needs
- *	to be updated.
  */
 enum ieee80211_internal_key_flags {
 	KEY_FLAG_UPLOADED_TO_HARDWARE	= BIT(0),
-	KEY_FLAG_TODO_DELETE		= BIT(1),
-	KEY_FLAG_TODO_HWACCEL_ADD	= BIT(2),
-	KEY_FLAG_TODO_HWACCEL_REMOVE	= BIT(3),
-	KEY_FLAG_TODO_DEFKEY		= BIT(4),
-	KEY_FLAG_TODO_ADD_DEBUGFS	= BIT(5),
-	KEY_FLAG_TODO_DEFMGMTKEY	= BIT(6),
 };
 
 enum ieee80211_internal_tkip_state {
@@ -79,10 +63,8 @@
 
 	/* for sdata list */
 	struct list_head list;
-	/* for todo list */
-	struct list_head todo;
 
-	/* protected by todo lock! */
+	/* protected by key mutex */
 	unsigned int flags;
 
 	union {
@@ -95,7 +77,13 @@
 		} tkip;
 		struct {
 			u8 tx_pn[6];
-			u8 rx_pn[NUM_RX_DATA_QUEUES][6];
+			/*
+			 * Last received packet number. The first
+			 * NUM_RX_DATA_QUEUES counters are used with Data
+			 * frames and the last counter is used with Robust
+			 * Management frames.
+			 */
+			u8 rx_pn[NUM_RX_DATA_QUEUES + 1][6];
 			struct crypto_cipher *tfm;
 			u32 replays; /* dot11RSNAStatsCCMPReplays */
 			/* scratch buffers for virt_to_page() (crypto API) */
@@ -155,6 +143,4 @@
 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
 void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata);
 
-void ieee80211_key_todo(void);
-
 #endif /* IEEE80211_KEY_H */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 22a384d..edf7aff 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -20,6 +20,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/bitmap.h>
 #include <linux/pm_qos_params.h>
+#include <linux/inetdevice.h>
 #include <net/net_namespace.h>
 #include <net/cfg80211.h>
 
@@ -259,7 +260,6 @@
 {
 	struct ieee80211_local *local = (struct ieee80211_local *) data;
 	struct sk_buff *skb;
-	struct ieee80211_ra_tid *ra_tid;
 
 	while ((skb = skb_dequeue(&local->skb_queue)) ||
 	       (skb = skb_dequeue(&local->skb_queue_unreliable))) {
@@ -274,18 +274,6 @@
 			skb->pkt_type = 0;
 			ieee80211_tx_status(local_to_hw(local), skb);
 			break;
-		case IEEE80211_DELBA_MSG:
-			ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-			ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra,
-						ra_tid->tid);
-			dev_kfree_skb(skb);
-			break;
-		case IEEE80211_ADDBA_MSG:
-			ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-			ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra,
-						 ra_tid->tid);
-			dev_kfree_skb(skb);
-			break ;
 		default:
 			WARN(1, "mac80211: Packet is of unknown type %d\n",
 			     skb->pkt_type);
@@ -329,6 +317,76 @@
 	mutex_unlock(&local->iflist_mtx);
 }
 
+#ifdef CONFIG_INET
+static int ieee80211_ifa_changed(struct notifier_block *nb,
+				 unsigned long data, void *arg)
+{
+	struct in_ifaddr *ifa = arg;
+	struct ieee80211_local *local =
+		container_of(nb, struct ieee80211_local,
+			     ifa_notifier);
+	struct net_device *ndev = ifa->ifa_dev->dev;
+	struct wireless_dev *wdev = ndev->ieee80211_ptr;
+	struct in_device *idev;
+	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_bss_conf *bss_conf;
+	struct ieee80211_if_managed *ifmgd;
+	int c = 0;
+
+	if (!netif_running(ndev))
+		return NOTIFY_DONE;
+
+	/* Make sure it's our interface that got changed */
+	if (!wdev)
+		return NOTIFY_DONE;
+
+	if (wdev->wiphy != local->hw.wiphy)
+		return NOTIFY_DONE;
+
+	sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
+	bss_conf = &sdata->vif.bss_conf;
+
+	/* ARP filtering is only supported in managed mode */
+	if (sdata->vif.type != NL80211_IFTYPE_STATION)
+		return NOTIFY_DONE;
+
+	idev = sdata->dev->ip_ptr;
+	if (!idev)
+		return NOTIFY_DONE;
+
+	ifmgd = &sdata->u.mgd;
+	mutex_lock(&ifmgd->mtx);
+
+	/* Copy the addresses to the bss_conf list */
+	ifa = idev->ifa_list;
+	while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) {
+		bss_conf->arp_addr_list[c] = ifa->ifa_address;
+		ifa = ifa->ifa_next;
+		c++;
+	}
+
+	/* If not all addresses fit the list, disable filtering */
+	if (ifa) {
+		sdata->arp_filter_state = false;
+		c = 0;
+	} else {
+		sdata->arp_filter_state = true;
+	}
+	bss_conf->arp_addr_cnt = c;
+
+	/* Configure driver only if associated */
+	if (ifmgd->associated) {
+		bss_conf->arp_filter_enabled = sdata->arp_filter_state;
+		ieee80211_bss_info_change_notify(sdata,
+						 BSS_CHANGED_ARP_FILTER);
+	}
+
+	mutex_unlock(&ifmgd->mtx);
+
+	return NOTIFY_DONE;
+}
+#endif
+
 struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
 					const struct ieee80211_ops *ops)
 {
@@ -396,7 +454,7 @@
 	mutex_init(&local->iflist_mtx);
 	mutex_init(&local->scan_mtx);
 
-	spin_lock_init(&local->key_lock);
+	mutex_init(&local->key_mtx);
 	spin_lock_init(&local->filter_lock);
 	spin_lock_init(&local->queue_stop_reason_lock);
 
@@ -419,8 +477,10 @@
 
 	sta_info_init(local);
 
-	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
+	for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
 		skb_queue_head_init(&local->pending[i]);
+		atomic_set(&local->agg_queue_stop[i], 0);
+	}
 	tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
 		     (unsigned long)local);
 
@@ -431,8 +491,6 @@
 	skb_queue_head_init(&local->skb_queue);
 	skb_queue_head_init(&local->skb_queue_unreliable);
 
-	spin_lock_init(&local->ampdu_lock);
-
 	return local_to_hw(local);
 }
 EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -572,7 +630,7 @@
 
 	local->hw.conf.listen_interval = local->hw.max_listen_interval;
 
-	local->hw.conf.dynamic_ps_forced_timeout = -1;
+	local->dynamic_ps_forced_timeout = -1;
 
 	result = sta_info_start(local);
 	if (result < 0)
@@ -612,14 +670,24 @@
 		ieee80211_max_network_latency;
 	result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
 				     &local->network_latency_notifier);
-
 	if (result) {
 		rtnl_lock();
 		goto fail_pm_qos;
 	}
 
+#ifdef CONFIG_INET
+	local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
+	result = register_inetaddr_notifier(&local->ifa_notifier);
+	if (result)
+		goto fail_ifa;
+#endif
+
 	return 0;
 
+ fail_ifa:
+	pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
+			       &local->network_latency_notifier);
+	rtnl_lock();
  fail_pm_qos:
 	ieee80211_led_exit(local);
 	ieee80211_remove_interfaces(local);
@@ -647,6 +715,9 @@
 
 	pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
 			       &local->network_latency_notifier);
+#ifdef CONFIG_INET
+	unregister_inetaddr_notifier(&local->ifa_notifier);
+#endif
 
 	rtnl_lock();
 
@@ -704,6 +775,10 @@
 	if (ret)
 		return ret;
 
+	ret = rc80211_minstrel_ht_init();
+	if (ret)
+		goto err_minstrel;
+
 	ret = rc80211_pid_init();
 	if (ret)
 		goto err_pid;
@@ -716,6 +791,8 @@
  err_netdev:
 	rc80211_pid_exit();
  err_pid:
+	rc80211_minstrel_ht_exit();
+ err_minstrel:
 	rc80211_minstrel_exit();
 
 	return ret;
@@ -724,6 +801,7 @@
 static void __exit ieee80211_exit(void)
 {
 	rc80211_pid_exit();
+	rc80211_minstrel_ht_exit();
 	rc80211_minstrel_exit();
 
 	/*
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index bde8103..c8a4f19 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -54,7 +54,7 @@
 		return;
 	}
 
-	ieee80211_queue_work(&local->hw, &ifmsh->work);
+	ieee80211_queue_work(&local->hw, &sdata->work);
 }
 
 /**
@@ -345,7 +345,7 @@
 		return;
 	}
 
-	ieee80211_queue_work(&local->hw, &ifmsh->work);
+	ieee80211_queue_work(&local->hw, &sdata->work);
 }
 
 static void ieee80211_mesh_path_root_timer(unsigned long data)
@@ -362,7 +362,7 @@
 		return;
 	}
 
-	ieee80211_queue_work(&local->hw, &ifmsh->work);
+	ieee80211_queue_work(&local->hw, &sdata->work);
 }
 
 void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
@@ -484,9 +484,6 @@
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
-	/* might restart the timer but that doesn't matter */
-	cancel_work_sync(&ifmsh->work);
-
 	/* use atomic bitops in case both timers fire at the same time */
 
 	if (del_timer_sync(&ifmsh->housekeeping_timer))
@@ -518,7 +515,7 @@
 
 	set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
 	ieee80211_mesh_root_setup(ifmsh);
-	ieee80211_queue_work(&local->hw, &ifmsh->work);
+	ieee80211_queue_work(&local->hw, &sdata->work);
 	sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
 						BSS_CHANGED_BEACON_ENABLED |
@@ -536,16 +533,7 @@
 	 * whether the interface is running, which, at this point,
 	 * it no longer is.
 	 */
-	cancel_work_sync(&sdata->u.mesh.work);
-
-	/*
-	 * When we get here, the interface is marked down.
-	 * Call synchronize_rcu() to wait for the RX path
-	 * should it be using the interface and enqueuing
-	 * frames at this very time on another CPU.
-	 */
-	rcu_barrier(); /* Wait for RX path and call_rcu()'s */
-	skb_queue_purge(&sdata->u.mesh.skb_queue);
+	cancel_work_sync(&sdata->work);
 }
 
 static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
@@ -608,8 +596,8 @@
 	}
 }
 
-static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
-					  struct sk_buff *skb)
+void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+				   struct sk_buff *skb)
 {
 	struct ieee80211_rx_status *rx_status;
 	struct ieee80211_if_mesh *ifmsh;
@@ -632,26 +620,11 @@
 		ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
 		break;
 	}
-
-	kfree_skb(skb);
 }
 
-static void ieee80211_mesh_work(struct work_struct *work)
+void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata =
-		container_of(work, struct ieee80211_sub_if_data, u.mesh.work);
-	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	struct sk_buff *skb;
-
-	if (!ieee80211_sdata_running(sdata))
-		return;
-
-	if (local->scanning)
-		return;
-
-	while ((skb = skb_dequeue(&ifmsh->skb_queue)))
-		ieee80211_mesh_rx_queued_mgmt(sdata, skb);
 
 	if (ifmsh->preq_queue_len &&
 	    time_after(jiffies,
@@ -678,7 +651,7 @@
 	rcu_read_lock();
 	list_for_each_entry_rcu(sdata, &local->interfaces, list)
 		if (ieee80211_vif_is_mesh(&sdata->vif))
-			ieee80211_queue_work(&local->hw, &sdata->u.mesh.work);
+			ieee80211_queue_work(&local->hw, &sdata->work);
 	rcu_read_unlock();
 }
 
@@ -686,11 +659,9 @@
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
-	INIT_WORK(&ifmsh->work, ieee80211_mesh_work);
 	setup_timer(&ifmsh->housekeeping_timer,
 		    ieee80211_mesh_housekeeping_timer,
 		    (unsigned long) sdata);
-	skb_queue_head_init(&sdata->u.mesh.skb_queue);
 
 	ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
 	ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
@@ -731,29 +702,3 @@
 	INIT_LIST_HEAD(&ifmsh->preq_queue.list);
 	spin_lock_init(&ifmsh->mesh_preq_queue_lock);
 }
-
-ieee80211_rx_result
-ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
-{
-	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	struct ieee80211_mgmt *mgmt;
-	u16 fc;
-
-	if (skb->len < 24)
-		return RX_DROP_MONITOR;
-
-	mgmt = (struct ieee80211_mgmt *) skb->data;
-	fc = le16_to_cpu(mgmt->frame_control);
-
-	switch (fc & IEEE80211_FCTL_STYPE) {
-	case IEEE80211_STYPE_ACTION:
-	case IEEE80211_STYPE_PROBE_RESP:
-	case IEEE80211_STYPE_BEACON:
-		skb_queue_tail(&ifmsh->skb_queue, skb);
-		ieee80211_queue_work(&local->hw, &ifmsh->work);
-		return RX_QUEUED;
-	}
-
-	return RX_CONTINUE;
-}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index c88087f..ebd3f1d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -237,8 +237,6 @@
 		struct sta_info *stainfo, struct sk_buff *skb);
 void ieee80211s_stop(void);
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
-ieee80211_rx_result
-ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
 void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 0705018..829e08a 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -805,14 +805,14 @@
 	spin_unlock(&ifmsh->mesh_preq_queue_lock);
 
 	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
-		ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
+		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 
 	else if (time_before(jiffies, ifmsh->last_preq)) {
 		/* avoid long wait if did not send preqs for a long time
 		 * and jiffies wrapped around
 		 */
 		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
-		ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
+		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 	} else
 		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
 						min_preq_int_jiff(sdata));
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 181ffd6..349e466 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -315,7 +315,7 @@
 	read_unlock(&pathtbl_resize_lock);
 	if (grow) {
 		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
-		ieee80211_queue_work(&local->hw, &ifmsh->work);
+		ieee80211_queue_work(&local->hw, &sdata->work);
 	}
 	return 0;
 
@@ -425,7 +425,7 @@
 	read_unlock(&pathtbl_resize_lock);
 	if (grow) {
 		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
-		ieee80211_queue_work(&local->hw, &ifmsh->work);
+		ieee80211_queue_work(&local->hw, &sdata->work);
 	}
 	return 0;
 
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 3cd5f7b..ea13a80 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -65,7 +65,6 @@
 {
 	atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
 	mesh_accept_plinks_update(sdata);
-	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
 }
 
 static inline
@@ -73,7 +72,6 @@
 {
 	atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
 	mesh_accept_plinks_update(sdata);
-	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
 }
 
 /**
@@ -115,7 +113,7 @@
 }
 
 /**
- * mesh_plink_deactivate - deactivate mesh peer link
+ * __mesh_plink_deactivate - deactivate mesh peer link
  *
  * @sta: mesh peer link to deactivate
  *
@@ -123,18 +121,23 @@
  *
  * Locking: the caller must hold sta->lock
  */
-static void __mesh_plink_deactivate(struct sta_info *sta)
+static bool __mesh_plink_deactivate(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	bool deactivated = false;
 
-	if (sta->plink_state == PLINK_ESTAB)
+	if (sta->plink_state == PLINK_ESTAB) {
 		mesh_plink_dec_estab_count(sdata);
+		deactivated = true;
+	}
 	sta->plink_state = PLINK_BLOCKED;
 	mesh_path_flush_by_nexthop(sta);
+
+	return deactivated;
 }
 
 /**
- * __mesh_plink_deactivate - deactivate mesh peer link
+ * mesh_plink_deactivate - deactivate mesh peer link
  *
  * @sta: mesh peer link to deactivate
  *
@@ -142,9 +145,15 @@
  */
 void mesh_plink_deactivate(struct sta_info *sta)
 {
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	bool deactivated;
+
 	spin_lock_bh(&sta->lock);
-	__mesh_plink_deactivate(sta);
+	deactivated = __mesh_plink_deactivate(sta);
 	spin_unlock_bh(&sta->lock);
+
+	if (deactivated)
+		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
 }
 
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -381,10 +390,16 @@
 
 void mesh_plink_block(struct sta_info *sta)
 {
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	bool deactivated;
+
 	spin_lock_bh(&sta->lock);
-	__mesh_plink_deactivate(sta);
+	deactivated = __mesh_plink_deactivate(sta);
 	sta->plink_state = PLINK_BLOCKED;
 	spin_unlock_bh(&sta->lock);
+
+	if (deactivated)
+		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
 }
 
 
@@ -397,6 +412,7 @@
 	enum plink_event event;
 	enum plink_frame_type ftype;
 	size_t baselen;
+	bool deactivated;
 	u8 ie_len;
 	u8 *baseaddr;
 	__le16 plid, llid, reason;
@@ -651,8 +667,9 @@
 		case CNF_ACPT:
 			del_timer(&sta->plink_timer);
 			sta->plink_state = PLINK_ESTAB;
-			mesh_plink_inc_estab_count(sdata);
 			spin_unlock_bh(&sta->lock);
+			mesh_plink_inc_estab_count(sdata);
+			ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
 			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
 				sta->sta.addr);
 			break;
@@ -684,8 +701,9 @@
 		case OPN_ACPT:
 			del_timer(&sta->plink_timer);
 			sta->plink_state = PLINK_ESTAB;
-			mesh_plink_inc_estab_count(sdata);
 			spin_unlock_bh(&sta->lock);
+			mesh_plink_inc_estab_count(sdata);
+			ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
 			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
 				sta->sta.addr);
 			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
@@ -702,11 +720,13 @@
 		case CLS_ACPT:
 			reason = cpu_to_le16(MESH_CLOSE_RCVD);
 			sta->reason = reason;
-			__mesh_plink_deactivate(sta);
+			deactivated = __mesh_plink_deactivate(sta);
 			sta->plink_state = PLINK_HOLDING;
 			llid = sta->llid;
 			mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
 			spin_unlock_bh(&sta->lock);
+			if (deactivated)
+				ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
 			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
 					    plid, reason);
 			break;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f803f8b..d196265 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -478,6 +478,39 @@
 	}
 }
 
+void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
+{
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_conf *conf = &local->hw.conf;
+
+	WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
+		!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
+		(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
+
+	local->disable_dynamic_ps = false;
+	conf->dynamic_ps_timeout = local->dynamic_ps_user_timeout;
+}
+EXPORT_SYMBOL(ieee80211_enable_dyn_ps);
+
+void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif)
+{
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_conf *conf = &local->hw.conf;
+
+	WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
+		!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
+		(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
+
+	local->disable_dynamic_ps = true;
+	conf->dynamic_ps_timeout = 0;
+	del_timer_sync(&local->dynamic_ps_timer);
+	ieee80211_queue_work(&local->hw,
+			     &local->dynamic_ps_enable_work);
+}
+EXPORT_SYMBOL(ieee80211_disable_dyn_ps);
+
 /* powersave */
 static void ieee80211_enable_ps(struct ieee80211_local *local,
 				struct ieee80211_sub_if_data *sdata)
@@ -553,6 +586,7 @@
 	    found->u.mgd.associated->beacon_ies &&
 	    !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
 				    IEEE80211_STA_CONNECTION_POLL))) {
+		struct ieee80211_conf *conf = &local->hw.conf;
 		s32 beaconint_us;
 
 		if (latency < 0)
@@ -561,25 +595,24 @@
 		beaconint_us = ieee80211_tu_to_usec(
 					found->vif.bss_conf.beacon_int);
 
-		timeout = local->hw.conf.dynamic_ps_forced_timeout;
+		timeout = local->dynamic_ps_forced_timeout;
 		if (timeout < 0) {
 			/*
+			 * Go to full PSM if the user configures a very low
+			 * latency requirement.
 			 * The 2 second value is there for compatibility until
 			 * the PM_QOS_NETWORK_LATENCY is configured with real
 			 * values.
 			 */
-			if (latency == 2000000000)
-				timeout = 100;
-			else if (latency <= 50000)
-				timeout = 300;
-			else if (latency <= 100000)
-				timeout = 100;
-			else if (latency <= 500000)
-				timeout = 50;
-			else
+			if (latency > 1900000000 && latency != 2000000000)
 				timeout = 0;
+			else
+				timeout = 100;
 		}
-		local->hw.conf.dynamic_ps_timeout = timeout;
+		local->dynamic_ps_user_timeout = timeout;
+		if (!local->disable_dynamic_ps)
+			conf->dynamic_ps_timeout =
+				local->dynamic_ps_user_timeout;
 
 		if (beaconint_us > latency) {
 			local->ps_sdata = NULL;
@@ -806,11 +839,12 @@
 {
 	struct ieee80211_bss *bss = (void *)cbss->priv;
 	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
 
 	bss_info_changed |= BSS_CHANGED_ASSOC;
 	/* set timing information */
-	sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
-	sdata->vif.bss_conf.timestamp = cbss->tsf;
+	bss_conf->beacon_int = cbss->beacon_interval;
+	bss_conf->timestamp = cbss->tsf;
 
 	bss_info_changed |= BSS_CHANGED_BEACON_INT;
 	bss_info_changed |= ieee80211_handle_bss_capability(sdata,
@@ -835,7 +869,7 @@
 
 	ieee80211_led_assoc(local, 1);
 
-	sdata->vif.bss_conf.assoc = 1;
+	bss_conf->assoc = 1;
 	/*
 	 * For now just always ask the driver to update the basic rateset
 	 * when we have associated, we aren't checking whether it actually
@@ -848,9 +882,15 @@
 
 	/* Tell the driver to monitor connection quality (if supported) */
 	if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) &&
-	    sdata->vif.bss_conf.cqm_rssi_thold)
+	    bss_conf->cqm_rssi_thold)
 		bss_info_changed |= BSS_CHANGED_CQM;
 
+	/* Enable ARP filtering */
+	if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) {
+		bss_conf->arp_filter_enabled = sdata->arp_filter_state;
+		bss_info_changed |= BSS_CHANGED_ARP_FILTER;
+	}
+
 	ieee80211_bss_info_change_notify(sdata, bss_info_changed);
 
 	mutex_lock(&local->iflist_mtx);
@@ -898,13 +938,13 @@
 	netif_tx_stop_all_queues(sdata->dev);
 	netif_carrier_off(sdata->dev);
 
-	rcu_read_lock();
+	mutex_lock(&local->sta_mtx);
 	sta = sta_info_get(sdata, bssid);
 	if (sta) {
-		set_sta_flags(sta, WLAN_STA_DISASSOC);
+		set_sta_flags(sta, WLAN_STA_BLOCK_BA);
 		ieee80211_sta_tear_down_BA_sessions(sta);
 	}
-	rcu_read_unlock();
+	mutex_unlock(&local->sta_mtx);
 
 	changed |= ieee80211_reset_erp_info(sdata);
 
@@ -932,6 +972,12 @@
 
 	ieee80211_hw_config(local, config_changed);
 
+	/* Disable ARP filtering */
+	if (sdata->vif.bss_conf.arp_filter_enabled) {
+		sdata->vif.bss_conf.arp_filter_enabled = false;
+		changed |= BSS_CHANGED_ARP_FILTER;
+	}
+
 	/* The BSSID (not really interesting) and HT changed */
 	changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
 	ieee80211_bss_info_change_notify(sdata, changed);
@@ -1633,35 +1679,8 @@
 	ieee80211_bss_info_change_notify(sdata, changed);
 }
 
-ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
-					  struct sk_buff *skb)
-{
-	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_mgmt *mgmt;
-	u16 fc;
-
-	if (skb->len < 24)
-		return RX_DROP_MONITOR;
-
-	mgmt = (struct ieee80211_mgmt *) skb->data;
-	fc = le16_to_cpu(mgmt->frame_control);
-
-	switch (fc & IEEE80211_FCTL_STYPE) {
-	case IEEE80211_STYPE_PROBE_RESP:
-	case IEEE80211_STYPE_BEACON:
-	case IEEE80211_STYPE_DEAUTH:
-	case IEEE80211_STYPE_DISASSOC:
-	case IEEE80211_STYPE_ACTION:
-		skb_queue_tail(&sdata->u.mgd.skb_queue, skb);
-		ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
-		return RX_QUEUED;
-	}
-
-	return RX_DROP_MONITOR;
-}
-
-static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
-					 struct sk_buff *skb)
+void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+				  struct sk_buff *skb)
 {
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_rx_status *rx_status;
@@ -1693,44 +1712,6 @@
 			break;
 		case IEEE80211_STYPE_ACTION:
 			switch (mgmt->u.action.category) {
-			case WLAN_CATEGORY_BACK: {
-				struct ieee80211_local *local = sdata->local;
-				int len = skb->len;
-				struct sta_info *sta;
-
-				rcu_read_lock();
-				sta = sta_info_get(sdata, mgmt->sa);
-				if (!sta) {
-					rcu_read_unlock();
-					break;
-				}
-
-				local_bh_disable();
-
-				switch (mgmt->u.action.u.addba_req.action_code) {
-				case WLAN_ACTION_ADDBA_REQ:
-					if (len < (IEEE80211_MIN_ACTION_SIZE +
-						   sizeof(mgmt->u.action.u.addba_req)))
-						break;
-					ieee80211_process_addba_request(local, sta, mgmt, len);
-					break;
-				case WLAN_ACTION_ADDBA_RESP:
-					if (len < (IEEE80211_MIN_ACTION_SIZE +
-						   sizeof(mgmt->u.action.u.addba_resp)))
-						break;
-					ieee80211_process_addba_resp(local, sta, mgmt, len);
-					break;
-				case WLAN_ACTION_DELBA:
-					if (len < (IEEE80211_MIN_ACTION_SIZE +
-						   sizeof(mgmt->u.action.u.delba)))
-						break;
-					ieee80211_process_delba(sdata, sta, mgmt, len);
-					break;
-				}
-				local_bh_enable();
-				rcu_read_unlock();
-				break;
-				}
 			case WLAN_CATEGORY_SPECTRUM_MGMT:
 				ieee80211_sta_process_chanswitch(sdata,
 						&mgmt->u.action.u.chan_switch.sw_elem,
@@ -1754,7 +1735,7 @@
 		default:
 			WARN(1, "unexpected: %d", rma);
 		}
-		goto out;
+		return;
 	}
 
 	mutex_unlock(&ifmgd->mtx);
@@ -1799,8 +1780,6 @@
 
 		cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
 	}
- out:
-	kfree_skb(skb);
 }
 
 static void ieee80211_sta_timer(unsigned long data)
@@ -1815,39 +1794,13 @@
 		return;
 	}
 
-	ieee80211_queue_work(&local->hw, &ifmgd->work);
+	ieee80211_queue_work(&local->hw, &sdata->work);
 }
 
-static void ieee80211_sta_work(struct work_struct *work)
+void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata =
-		container_of(work, struct ieee80211_sub_if_data, u.mgd.work);
 	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_if_managed *ifmgd;
-	struct sk_buff *skb;
-
-	if (!ieee80211_sdata_running(sdata))
-		return;
-
-	if (local->scanning)
-		return;
-
-	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
-		return;
-
-	/*
-	 * ieee80211_queue_work() should have picked up most cases,
-	 * here we'll pick the the rest.
-	 */
-	if (WARN(local->suspended, "STA MLME work scheduled while "
-		 "going to suspend\n"))
-		return;
-
-	ifmgd = &sdata->u.mgd;
-
-	/* first process frames to avoid timing out while a frame is pending */
-	while ((skb = skb_dequeue(&ifmgd->skb_queue)))
-		ieee80211_sta_rx_queued_mgmt(sdata, skb);
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
 	/* then process the rest of the work */
 	mutex_lock(&ifmgd->mtx);
@@ -1942,8 +1895,7 @@
 		ieee80211_queue_work(&sdata->local->hw,
 			   &sdata->u.mgd.monitor_work);
 		/* and do all the other regular work too */
-		ieee80211_queue_work(&sdata->local->hw,
-			   &sdata->u.mgd.work);
+		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 	}
 }
 
@@ -1958,7 +1910,6 @@
 	 * time -- the code here is properly synchronised.
 	 */
 
-	cancel_work_sync(&ifmgd->work);
 	cancel_work_sync(&ifmgd->beacon_connection_loss_work);
 	if (del_timer_sync(&ifmgd->timer))
 		set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -1990,7 +1941,6 @@
 	struct ieee80211_if_managed *ifmgd;
 
 	ifmgd = &sdata->u.mgd;
-	INIT_WORK(&ifmgd->work, ieee80211_sta_work);
 	INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
 	INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
 	INIT_WORK(&ifmgd->beacon_connection_loss_work,
@@ -2003,7 +1953,6 @@
 		    (unsigned long) sdata);
 	setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
 		    (unsigned long) sdata);
-	skb_queue_head_init(&ifmgd->skb_queue);
 
 	ifmgd->flags = 0;
 
@@ -2153,6 +2102,7 @@
 						    wk->filter_ta);
 			return WORK_DONE_DESTROY;
 		}
+
 		mutex_unlock(&wk->sdata->u.mgd.mtx);
 	}
 
@@ -2282,14 +2232,16 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_work *wk;
-	const u8 *bssid = req->bss->bssid;
+	u8 bssid[ETH_ALEN];
+	bool assoc_bss = false;
 
 	mutex_lock(&ifmgd->mtx);
 
+	memcpy(bssid, req->bss->bssid, ETH_ALEN);
 	if (ifmgd->associated == req->bss) {
-		bssid = req->bss->bssid;
-		ieee80211_set_disassoc(sdata, true);
+		ieee80211_set_disassoc(sdata, false);
 		mutex_unlock(&ifmgd->mtx);
+		assoc_bss = true;
 	} else {
 		bool not_auth_yet = false;
 
@@ -2335,6 +2287,8 @@
 	ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH,
 				       req->reason_code, cookie,
 				       !req->local_state_change);
+	if (assoc_bss)
+		sta_info_destroy_addr(sdata, bssid);
 
 	ieee80211_recalc_idle(sdata->local);
 
@@ -2379,41 +2333,6 @@
 	return 0;
 }
 
-int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
-			 struct ieee80211_channel *chan,
-			 enum nl80211_channel_type channel_type,
-			 const u8 *buf, size_t len, u64 *cookie)
-{
-	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-	struct sk_buff *skb;
-
-	/* Check that we are on the requested channel for transmission */
-	if ((chan != local->tmp_channel ||
-	     channel_type != local->tmp_channel_type) &&
-	    (chan != local->oper_channel ||
-	     channel_type != local->_oper_channel_type))
-		return -EBUSY;
-
-	skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
-	if (!skb)
-		return -ENOMEM;
-	skb_reserve(skb, local->hw.extra_tx_headroom);
-
-	memcpy(skb_put(skb, len), buf, len);
-
-	if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
-		IEEE80211_SKB_CB(skb)->flags |=
-			IEEE80211_TX_INTFL_DONT_ENCRYPT;
-	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
-		IEEE80211_TX_CTL_REQ_TX_STATUS;
-	skb->dev = sdata->dev;
-	ieee80211_tx_skb(sdata, skb);
-
-	*cookie = (unsigned long) skb;
-	return 0;
-}
-
 void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
 			       enum nl80211_cqm_rssi_threshold_event rssi_event,
 			       gfp_t gfp)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 75202b2..d287fde 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -40,22 +40,14 @@
 	list_for_each_entry(sdata, &local->interfaces, list)
 		ieee80211_disable_keys(sdata);
 
-	/* Tear down aggregation sessions */
-
-	rcu_read_lock();
-
-	if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
-		list_for_each_entry_rcu(sta, &local->sta_list, list) {
+	/* tear down aggregation sessions and remove STAs */
+	mutex_lock(&local->sta_mtx);
+	list_for_each_entry(sta, &local->sta_list, list) {
+		if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
 			set_sta_flags(sta, WLAN_STA_BLOCK_BA);
 			ieee80211_sta_tear_down_BA_sessions(sta);
 		}
-	}
 
-	rcu_read_unlock();
-
-	/* remove STAs */
-	mutex_lock(&local->sta_mtx);
-	list_for_each_entry(sta, &local->sta_list, list) {
 		if (sta->uploaded) {
 			sdata = sta->sdata;
 			if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -72,6 +64,8 @@
 
 	/* remove all interfaces */
 	list_for_each_entry(sdata, &local->interfaces, list) {
+		cancel_work_sync(&sdata->work);
+
 		switch(sdata->vif.type) {
 		case NL80211_IFTYPE_STATION:
 			ieee80211_sta_quiesce(sdata);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 065a9619..168427b 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -147,5 +147,18 @@
 }
 #endif
 
+#ifdef CONFIG_MAC80211_RC_MINSTREL_HT
+extern int rc80211_minstrel_ht_init(void);
+extern void rc80211_minstrel_ht_exit(void);
+#else
+static inline int rc80211_minstrel_ht_init(void)
+{
+	return 0;
+}
+static inline void rc80211_minstrel_ht_exit(void)
+{
+}
+#endif
+
 
 #endif /* IEEE80211_RATE_H */
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
new file mode 100644
index 0000000..52c8503
--- /dev/null
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -0,0 +1,825 @@
+/*
+ * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+#include "rate.h"
+#include "rc80211_minstrel.h"
+#include "rc80211_minstrel_ht.h"
+
+#define AVG_PKT_SIZE	1200
+#define SAMPLE_COLUMNS	10
+#define EWMA_LEVEL		75
+
+/* Number of bits for an average sized packet */
+#define MCS_NBITS (AVG_PKT_SIZE << 3)
+
+/* Number of symbols for a packet with (bps) bits per symbol */
+#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps))
+
+/* Transmission time for a packet containing (syms) symbols */
+#define MCS_SYMBOL_TIME(sgi, syms)					\
+	(sgi ?								\
+	  ((syms) * 18 + 4) / 5 :	/* syms * 3.6 us */		\
+	  (syms) << 2			/* syms * 4 us */		\
+	)
+
+/* Transmit duration for the raw data part of an average sized packet */
+#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
+
+/* MCS rate information for an MCS group */
+#define MCS_GROUP(_streams, _sgi, _ht40) {				\
+	.streams = _streams,						\
+	.flags =							\
+		(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) |			\
+		(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0),		\
+	.duration = {							\
+		MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26),		\
+		MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52),		\
+		MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78),		\
+		MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104),	\
+		MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156),	\
+		MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208),	\
+		MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234),	\
+		MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260)		\
+	}								\
+}
+
+/*
+ * To enable sufficiently targeted rate sampling, MCS rates are divided into
+ * groups, based on the number of streams and flags (HT40, SGI) that they
+ * use.
+ */
+const struct mcs_group minstrel_mcs_groups[] = {
+	MCS_GROUP(1, 0, 0),
+	MCS_GROUP(2, 0, 0),
+#if MINSTREL_MAX_STREAMS >= 3
+	MCS_GROUP(3, 0, 0),
+#endif
+
+	MCS_GROUP(1, 1, 0),
+	MCS_GROUP(2, 1, 0),
+#if MINSTREL_MAX_STREAMS >= 3
+	MCS_GROUP(3, 1, 0),
+#endif
+
+	MCS_GROUP(1, 0, 1),
+	MCS_GROUP(2, 0, 1),
+#if MINSTREL_MAX_STREAMS >= 3
+	MCS_GROUP(3, 0, 1),
+#endif
+
+	MCS_GROUP(1, 1, 1),
+	MCS_GROUP(2, 1, 1),
+#if MINSTREL_MAX_STREAMS >= 3
+	MCS_GROUP(3, 1, 1),
+#endif
+};
+
+static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
+
+/*
+ * Perform EWMA (Exponentially Weighted Moving Average) calculation
+ */
+static int
+minstrel_ewma(int old, int new, int weight)
+{
+	return (new * (100 - weight) + old * weight) / 100;
+}
+
+/*
+ * Look up an MCS group index based on mac80211 rate information
+ */
+static int
+minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
+{
+	int streams = (rate->idx / MCS_GROUP_RATES) + 1;
+	u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
+		if (minstrel_mcs_groups[i].streams != streams)
+			continue;
+		if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
+			continue;
+
+		return i;
+	}
+
+	WARN_ON(1);
+	return 0;
+}
+
+static inline struct minstrel_rate_stats *
+minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
+{
+	return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
+}
+
+
+/*
+ * Recalculate success probabilities and counters for a rate using EWMA
+ */
+static void
+minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr)
+{
+	if (unlikely(mr->attempts > 0)) {
+		mr->sample_skipped = 0;
+		mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
+		if (!mr->att_hist)
+			mr->probability = mr->cur_prob;
+		else
+			mr->probability = minstrel_ewma(mr->probability,
+				mr->cur_prob, EWMA_LEVEL);
+		mr->att_hist += mr->attempts;
+		mr->succ_hist += mr->success;
+	} else {
+		mr->sample_skipped++;
+	}
+	mr->last_success = mr->success;
+	mr->last_attempts = mr->attempts;
+	mr->success = 0;
+	mr->attempts = 0;
+}
+
+/*
+ * Calculate throughput based on the average A-MPDU length, taking into account
+ * the expected number of retransmissions and their expected length
+ */
+static void
+minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+                    int group, int rate)
+{
+	struct minstrel_rate_stats *mr;
+	unsigned int usecs;
+
+	mr = &mi->groups[group].rates[rate];
+
+	if (mr->probability < MINSTREL_FRAC(1, 10)) {
+		mr->cur_tp = 0;
+		return;
+	}
+
+	usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
+	usecs += minstrel_mcs_groups[group].duration[rate];
+	mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability);
+}
+
+/*
+ * Update rate statistics and select new primary rates
+ *
+ * Rules for rate selection:
+ *  - max_prob_rate must use only one stream, as a tradeoff between delivery
+ *    probability and throughput during strong fluctuations
+ *  - as long as the max prob rate has a probability of more than 3/4, pick
+ *    higher throughput rates, even if the probablity is a bit lower
+ */
+static void
+minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
+{
+	struct minstrel_mcs_group_data *mg;
+	struct minstrel_rate_stats *mr;
+	int cur_prob, cur_prob_tp, cur_tp, cur_tp2;
+	int group, i, index;
+
+	if (mi->ampdu_packets > 0) {
+		mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
+			MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL);
+		mi->ampdu_len = 0;
+		mi->ampdu_packets = 0;
+	}
+
+	mi->sample_slow = 0;
+	mi->sample_count = 0;
+	mi->max_tp_rate = 0;
+	mi->max_tp_rate2 = 0;
+	mi->max_prob_rate = 0;
+
+	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
+		cur_prob = 0;
+		cur_prob_tp = 0;
+		cur_tp = 0;
+		cur_tp2 = 0;
+
+		mg = &mi->groups[group];
+		if (!mg->supported)
+			continue;
+
+		mg->max_tp_rate = 0;
+		mg->max_tp_rate2 = 0;
+		mg->max_prob_rate = 0;
+		mi->sample_count++;
+
+		for (i = 0; i < MCS_GROUP_RATES; i++) {
+			if (!(mg->supported & BIT(i)))
+				continue;
+
+			mr = &mg->rates[i];
+			mr->retry_updated = false;
+			index = MCS_GROUP_RATES * group + i;
+			minstrel_calc_rate_ewma(mp, mr);
+			minstrel_ht_calc_tp(mp, mi, group, i);
+
+			if (!mr->cur_tp)
+				continue;
+
+			/* ignore the lowest rate of each single-stream group */
+			if (!i && minstrel_mcs_groups[group].streams == 1)
+				continue;
+
+			if ((mr->cur_tp > cur_prob_tp && mr->probability >
+			     MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) {
+				mg->max_prob_rate = index;
+				cur_prob = mr->probability;
+			}
+
+			if (mr->cur_tp > cur_tp) {
+				swap(index, mg->max_tp_rate);
+				cur_tp = mr->cur_tp;
+				mr = minstrel_get_ratestats(mi, index);
+			}
+
+			if (index >= mg->max_tp_rate)
+				continue;
+
+			if (mr->cur_tp > cur_tp2) {
+				mg->max_tp_rate2 = index;
+				cur_tp2 = mr->cur_tp;
+			}
+		}
+	}
+
+	/* try to sample up to half of the availble rates during each interval */
+	mi->sample_count *= 4;
+
+	cur_prob = 0;
+	cur_prob_tp = 0;
+	cur_tp = 0;
+	cur_tp2 = 0;
+	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
+		mg = &mi->groups[group];
+		if (!mg->supported)
+			continue;
+
+		mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
+		if (cur_prob_tp < mr->cur_tp &&
+		    minstrel_mcs_groups[group].streams == 1) {
+			mi->max_prob_rate = mg->max_prob_rate;
+			cur_prob = mr->cur_prob;
+		}
+
+		mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
+		if (cur_tp < mr->cur_tp) {
+			mi->max_tp_rate = mg->max_tp_rate;
+			cur_tp = mr->cur_tp;
+		}
+
+		mr = minstrel_get_ratestats(mi, mg->max_tp_rate2);
+		if (cur_tp2 < mr->cur_tp) {
+			mi->max_tp_rate2 = mg->max_tp_rate2;
+			cur_tp2 = mr->cur_tp;
+		}
+	}
+
+	mi->stats_update = jiffies;
+}
+
+static bool
+minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
+{
+	if (!rate->count)
+		return false;
+
+	if (rate->idx < 0)
+		return false;
+
+	return !!(rate->flags & IEEE80211_TX_RC_MCS);
+}
+
+static void
+minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
+{
+	struct minstrel_mcs_group_data *mg;
+
+	for (;;) {
+		mi->sample_group++;
+		mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
+		mg = &mi->groups[mi->sample_group];
+
+		if (!mg->supported)
+			continue;
+
+		if (++mg->index >= MCS_GROUP_RATES) {
+			mg->index = 0;
+			if (++mg->column >= ARRAY_SIZE(sample_table))
+				mg->column = 0;
+		}
+		break;
+	}
+}
+
+static void
+minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
+			bool primary)
+{
+	int group, orig_group;
+
+	orig_group = group = *idx / MCS_GROUP_RATES;
+	while (group > 0) {
+		group--;
+
+		if (!mi->groups[group].supported)
+			continue;
+
+		if (minstrel_mcs_groups[group].streams >
+		    minstrel_mcs_groups[orig_group].streams)
+			continue;
+
+		if (primary)
+			*idx = mi->groups[group].max_tp_rate;
+		else
+			*idx = mi->groups[group].max_tp_rate2;
+		break;
+	}
+}
+
+static void
+minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+	u16 tid;
+
+	if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
+		return;
+
+	if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+		return;
+
+	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+	if (likely(sta->ampdu_mlme.tid_tx[tid]))
+		return;
+
+	ieee80211_start_tx_ba_session(pubsta, tid);
+}
+
+static void
+minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
+                      struct ieee80211_sta *sta, void *priv_sta,
+                      struct sk_buff *skb)
+{
+	struct minstrel_ht_sta_priv *msp = priv_sta;
+	struct minstrel_ht_sta *mi = &msp->ht;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *ar = info->status.rates;
+	struct minstrel_rate_stats *rate, *rate2;
+	struct minstrel_priv *mp = priv;
+	bool last = false;
+	int group;
+	int i = 0;
+
+	if (!msp->is_ht)
+		return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb);
+
+	/* This packet was aggregated but doesn't carry status info */
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
+		return;
+
+	if (!info->status.ampdu_len) {
+		info->status.ampdu_ack_len = 1;
+		info->status.ampdu_len = 1;
+	}
+
+	mi->ampdu_packets++;
+	mi->ampdu_len += info->status.ampdu_len;
+
+	if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
+		mi->sample_wait = 4 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
+		mi->sample_tries = 3;
+		mi->sample_count--;
+	}
+
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+		mi->sample_packets += info->status.ampdu_len;
+		minstrel_next_sample_idx(mi);
+	}
+
+	for (i = 0; !last; i++) {
+		last = (i == IEEE80211_TX_MAX_RATES - 1) ||
+		       !minstrel_ht_txstat_valid(&ar[i + 1]);
+
+		if (!minstrel_ht_txstat_valid(&ar[i]))
+			break;
+
+		group = minstrel_ht_get_group_idx(&ar[i]);
+		rate = &mi->groups[group].rates[ar[i].idx % 8];
+
+		if (last && (info->flags & IEEE80211_TX_STAT_ACK))
+			rate->success += info->status.ampdu_ack_len;
+
+		rate->attempts += ar[i].count * info->status.ampdu_len;
+	}
+
+	/*
+	 * check for sudden death of spatial multiplexing,
+	 * downgrade to a lower number of streams if necessary.
+	 */
+	rate = minstrel_get_ratestats(mi, mi->max_tp_rate);
+	if (rate->attempts > 30 &&
+	    MINSTREL_FRAC(rate->success, rate->attempts) <
+	    MINSTREL_FRAC(20, 100))
+		minstrel_downgrade_rate(mi, &mi->max_tp_rate, true);
+
+	rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2);
+	if (rate->attempts > 30 &&
+	    MINSTREL_FRAC(rate->success, rate->attempts) <
+	    MINSTREL_FRAC(20, 100))
+		minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false);
+
+	if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
+		minstrel_ht_update_stats(mp, mi);
+		minstrel_aggr_check(mp, sta, skb);
+	}
+}
+
+static void
+minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+                         int index)
+{
+	struct minstrel_rate_stats *mr;
+	const struct mcs_group *group;
+	unsigned int tx_time, tx_time_rtscts, tx_time_data;
+	unsigned int cw = mp->cw_min;
+	unsigned int t_slot = 9; /* FIXME */
+	unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
+
+	mr = minstrel_get_ratestats(mi, index);
+	if (mr->probability < MINSTREL_FRAC(1, 10)) {
+		mr->retry_count = 1;
+		mr->retry_count_rtscts = 1;
+		return;
+	}
+
+	mr->retry_count = 2;
+	mr->retry_count_rtscts = 2;
+	mr->retry_updated = true;
+
+	group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
+	tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len;
+	tx_time = 2 * (t_slot + mi->overhead + tx_time_data);
+	tx_time_rtscts = 2 * (t_slot + mi->overhead_rtscts + tx_time_data);
+	do {
+		cw = (cw << 1) | 1;
+		cw = min(cw, mp->cw_max);
+		tx_time += cw + t_slot + mi->overhead;
+		tx_time_rtscts += cw + t_slot + mi->overhead_rtscts;
+		if (tx_time_rtscts < mp->segment_size)
+			mr->retry_count_rtscts++;
+	} while ((tx_time < mp->segment_size) &&
+	         (++mr->retry_count < mp->max_retry));
+}
+
+
+static void
+minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+                     struct ieee80211_tx_rate *rate, int index,
+                     struct ieee80211_tx_rate_control *txrc,
+                     bool sample, bool rtscts)
+{
+	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
+	struct minstrel_rate_stats *mr;
+
+	mr = minstrel_get_ratestats(mi, index);
+	if (!mr->retry_updated)
+		minstrel_calc_retransmit(mp, mi, index);
+
+	if (mr->probability < MINSTREL_FRAC(20, 100))
+		rate->count = 2;
+	else if (rtscts)
+		rate->count = mr->retry_count_rtscts;
+	else
+		rate->count = mr->retry_count;
+
+	rate->flags = IEEE80211_TX_RC_MCS | group->flags;
+	if (txrc->short_preamble)
+		rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
+	if (txrc->rts || rtscts)
+		rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
+	rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES;
+}
+
+static inline int
+minstrel_get_duration(int index)
+{
+	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
+	return group->duration[index % MCS_GROUP_RATES];
+}
+
+static int
+minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
+{
+	struct minstrel_rate_stats *mr;
+	struct minstrel_mcs_group_data *mg;
+	int sample_idx = 0;
+
+	if (mi->sample_wait > 0) {
+		mi->sample_wait--;
+		return -1;
+	}
+
+	if (!mi->sample_tries)
+		return -1;
+
+	mi->sample_tries--;
+	mg = &mi->groups[mi->sample_group];
+	sample_idx = sample_table[mg->column][mg->index];
+	mr = &mg->rates[sample_idx];
+	sample_idx += mi->sample_group * MCS_GROUP_RATES;
+
+	/*
+	 * When not using MRR, do not sample if the probability is already
+	 * higher than 95% to avoid wasting airtime
+	 */
+	if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100)))
+		goto next;
+
+	/*
+	 * Make sure that lower rates get sampled only occasionally,
+	 * if the link is working perfectly.
+	 */
+	if (minstrel_get_duration(sample_idx) >
+	    minstrel_get_duration(mi->max_tp_rate)) {
+		if (mr->sample_skipped < 10)
+			goto next;
+
+		if (mi->sample_slow++ > 2)
+			goto next;
+	}
+
+	return sample_idx;
+
+next:
+	minstrel_next_sample_idx(mi);
+	return -1;
+}
+
+static void
+minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
+                     struct ieee80211_tx_rate_control *txrc)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
+	struct ieee80211_tx_rate *ar = info->status.rates;
+	struct minstrel_ht_sta_priv *msp = priv_sta;
+	struct minstrel_ht_sta *mi = &msp->ht;
+	struct minstrel_priv *mp = priv;
+	int sample_idx;
+
+	if (rate_control_send_low(sta, priv_sta, txrc))
+		return;
+
+	if (!msp->is_ht)
+		return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
+
+	info->flags |= mi->tx_flags;
+	sample_idx = minstrel_get_sample_rate(mp, mi);
+	if (sample_idx >= 0) {
+		minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
+			txrc, true, false);
+		minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
+			txrc, false, true);
+		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+	} else {
+		minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
+			txrc, false, false);
+		minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
+			txrc, false, true);
+	}
+	minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, true);
+
+	ar[3].count = 0;
+	ar[3].idx = -1;
+
+	mi->total_packets++;
+
+	/* wraparound */
+	if (mi->total_packets == ~0) {
+		mi->total_packets = 0;
+		mi->sample_packets = 0;
+	}
+}
+
+static void
+minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *priv_sta,
+			enum nl80211_channel_type oper_chan_type)
+{
+	struct minstrel_priv *mp = priv;
+	struct minstrel_ht_sta_priv *msp = priv_sta;
+	struct minstrel_ht_sta *mi = &msp->ht;
+	struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
+	struct ieee80211_local *local = hw_to_local(mp->hw);
+	u16 sta_cap = sta->ht_cap.cap;
+	int ack_dur;
+	int stbc;
+	int i;
+
+	/* fall back to the old minstrel for legacy stations */
+	if (sta && !sta->ht_cap.ht_supported) {
+		msp->is_ht = false;
+		memset(&msp->legacy, 0, sizeof(msp->legacy));
+		msp->legacy.r = msp->ratelist;
+		msp->legacy.sample_table = msp->sample_table;
+		return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy);
+	}
+
+	BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
+		MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS);
+
+	msp->is_ht = true;
+	memset(mi, 0, sizeof(*mi));
+	mi->stats_update = jiffies;
+
+	ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1);
+	mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur;
+	mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
+
+	mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
+
+	/* When using MRR, sample more on the first attempt, without delay */
+	if (mp->has_mrr) {
+		mi->sample_count = 16;
+		mi->sample_wait = 0;
+	} else {
+		mi->sample_count = 8;
+		mi->sample_wait = 8;
+	}
+	mi->sample_tries = 4;
+
+	stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
+		IEEE80211_HT_CAP_RX_STBC_SHIFT;
+	mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
+
+	if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
+		mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
+
+	if (oper_chan_type != NL80211_CHAN_HT40MINUS &&
+	    oper_chan_type != NL80211_CHAN_HT40PLUS)
+		sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+	for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
+		u16 req = 0;
+
+		mi->groups[i].supported = 0;
+		if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
+			if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+				req |= IEEE80211_HT_CAP_SGI_40;
+			else
+				req |= IEEE80211_HT_CAP_SGI_20;
+		}
+
+		if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+		if ((sta_cap & req) != req)
+			continue;
+
+		mi->groups[i].supported =
+			mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
+	}
+}
+
+static void
+minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
+                      struct ieee80211_sta *sta, void *priv_sta)
+{
+	struct minstrel_priv *mp = priv;
+
+	minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type);
+}
+
+static void
+minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *priv_sta,
+                        u32 changed, enum nl80211_channel_type oper_chan_type)
+{
+	minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type);
+}
+
+static void *
+minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+	struct ieee80211_supported_band *sband;
+	struct minstrel_ht_sta_priv *msp;
+	struct minstrel_priv *mp = priv;
+	struct ieee80211_hw *hw = mp->hw;
+	int max_rates = 0;
+	int i;
+
+	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+		sband = hw->wiphy->bands[i];
+		if (sband && sband->n_bitrates > max_rates)
+			max_rates = sband->n_bitrates;
+	}
+
+	msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
+	if (!msp)
+		return NULL;
+
+	msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
+	if (!msp->ratelist)
+		goto error;
+
+	msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
+	if (!msp->sample_table)
+		goto error1;
+
+	return msp;
+
+error1:
+	kfree(msp->sample_table);
+error:
+	kfree(msp);
+	return NULL;
+}
+
+static void
+minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
+{
+	struct minstrel_ht_sta_priv *msp = priv_sta;
+
+	kfree(msp->sample_table);
+	kfree(msp->ratelist);
+	kfree(msp);
+}
+
+static void *
+minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+	return mac80211_minstrel.alloc(hw, debugfsdir);
+}
+
+static void
+minstrel_ht_free(void *priv)
+{
+	mac80211_minstrel.free(priv);
+}
+
+static struct rate_control_ops mac80211_minstrel_ht = {
+	.name = "minstrel_ht",
+	.tx_status = minstrel_ht_tx_status,
+	.get_rate = minstrel_ht_get_rate,
+	.rate_init = minstrel_ht_rate_init,
+	.rate_update = minstrel_ht_rate_update,
+	.alloc_sta = minstrel_ht_alloc_sta,
+	.free_sta = minstrel_ht_free_sta,
+	.alloc = minstrel_ht_alloc,
+	.free = minstrel_ht_free,
+#ifdef CONFIG_MAC80211_DEBUGFS
+	.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
+	.remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
+#endif
+};
+
+
+static void
+init_sample_table(void)
+{
+	int col, i, new_idx;
+	u8 rnd[MCS_GROUP_RATES];
+
+	memset(sample_table, 0xff, sizeof(sample_table));
+	for (col = 0; col < SAMPLE_COLUMNS; col++) {
+		for (i = 0; i < MCS_GROUP_RATES; i++) {
+			get_random_bytes(rnd, sizeof(rnd));
+			new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
+
+			while (sample_table[col][new_idx] != 0xff)
+				new_idx = (new_idx + 1) % MCS_GROUP_RATES;
+
+			sample_table[col][new_idx] = i;
+		}
+	}
+}
+
+int __init
+rc80211_minstrel_ht_init(void)
+{
+	init_sample_table();
+	return ieee80211_rate_control_register(&mac80211_minstrel_ht);
+}
+
+void
+rc80211_minstrel_ht_exit(void)
+{
+	ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
+}
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
new file mode 100644
index 0000000..462d2b2
--- /dev/null
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RC_MINSTREL_HT_H
+#define __RC_MINSTREL_HT_H
+
+/*
+ * The number of streams can be changed to 2 to reduce code
+ * size and memory footprint.
+ */
+#define MINSTREL_MAX_STREAMS	3
+#define MINSTREL_STREAM_GROUPS	4
+
+/* scaled fraction values */
+#define MINSTREL_SCALE	16
+#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
+#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
+
+#define MCS_GROUP_RATES	8
+
+struct mcs_group {
+	u32 flags;
+	unsigned int streams;
+	unsigned int duration[MCS_GROUP_RATES];
+};
+
+extern const struct mcs_group minstrel_mcs_groups[];
+
+struct minstrel_rate_stats {
+	/* current / last sampling period attempts/success counters */
+	unsigned int attempts, last_attempts;
+	unsigned int success, last_success;
+
+	/* total attempts/success counters */
+	u64 att_hist, succ_hist;
+
+	/* current throughput */
+	unsigned int cur_tp;
+
+	/* packet delivery probabilities */
+	unsigned int cur_prob, probability;
+
+	/* maximum retry counts */
+	unsigned int retry_count;
+	unsigned int retry_count_rtscts;
+
+	bool retry_updated;
+	u8 sample_skipped;
+};
+
+struct minstrel_mcs_group_data {
+	u8 index;
+	u8 column;
+
+	/* bitfield of supported MCS rates of this group */
+	u8 supported;
+
+	/* selected primary rates */
+	unsigned int max_tp_rate;
+	unsigned int max_tp_rate2;
+	unsigned int max_prob_rate;
+
+	/* MCS rate statistics */
+	struct minstrel_rate_stats rates[MCS_GROUP_RATES];
+};
+
+struct minstrel_ht_sta {
+	/* ampdu length (average, per sampling interval) */
+	unsigned int ampdu_len;
+	unsigned int ampdu_packets;
+
+	/* ampdu length (EWMA) */
+	unsigned int avg_ampdu_len;
+
+	/* best throughput rate */
+	unsigned int max_tp_rate;
+
+	/* second best throughput rate */
+	unsigned int max_tp_rate2;
+
+	/* best probability rate */
+	unsigned int max_prob_rate;
+
+	/* time of last status update */
+	unsigned long stats_update;
+
+	/* overhead time in usec for each frame */
+	unsigned int overhead;
+	unsigned int overhead_rtscts;
+
+	unsigned int total_packets;
+	unsigned int sample_packets;
+
+	/* tx flags to add for frames for this sta */
+	u32 tx_flags;
+
+	u8 sample_wait;
+	u8 sample_tries;
+	u8 sample_count;
+	u8 sample_slow;
+
+	/* current MCS group to be sampled */
+	u8 sample_group;
+
+	/* MCS rate group info and statistics */
+	struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS];
+};
+
+struct minstrel_ht_sta_priv {
+	union {
+		struct minstrel_ht_sta ht;
+		struct minstrel_sta_info legacy;
+	};
+#ifdef CONFIG_MAC80211_DEBUGFS
+	struct dentry *dbg_stats;
+#endif
+	void *ratelist;
+	void *sample_table;
+	bool is_ht;
+};
+
+void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
+void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta);
+
+#endif
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
new file mode 100644
index 0000000..4a5a4b3
--- /dev/null
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/debugfs.h>
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+#include "rc80211_minstrel.h"
+#include "rc80211_minstrel_ht.h"
+
+static int
+minstrel_ht_stats_open(struct inode *inode, struct file *file)
+{
+	struct minstrel_ht_sta_priv *msp = inode->i_private;
+	struct minstrel_ht_sta *mi = &msp->ht;
+	struct minstrel_debugfs_info *ms;
+	unsigned int i, j, tp, prob, eprob;
+	char *p;
+	int ret;
+
+	if (!msp->is_ht) {
+		inode->i_private = &msp->legacy;
+		ret = minstrel_stats_open(inode, file);
+		inode->i_private = msp;
+		return ret;
+	}
+
+	ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL);
+	if (!ms)
+		return -ENOMEM;
+
+	file->private_data = ms;
+	p = ms->buf;
+	p += sprintf(p, "type      rate     throughput  ewma prob   this prob  "
+			"this succ/attempt   success    attempts\n");
+	for (i = 0; i < MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; i++) {
+		char htmode = '2';
+		char gimode = 'L';
+
+		if (!mi->groups[i].supported)
+			continue;
+
+		if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			htmode = '4';
+		if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI)
+			gimode = 'S';
+
+		for (j = 0; j < MCS_GROUP_RATES; j++) {
+			struct minstrel_rate_stats *mr = &mi->groups[i].rates[j];
+			int idx = i * MCS_GROUP_RATES + j;
+
+			if (!(mi->groups[i].supported & BIT(j)))
+				continue;
+
+			p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
+
+			*(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
+			*(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
+			*(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
+			p += sprintf(p, "MCS%-2u", (minstrel_mcs_groups[i].streams - 1) *
+					MCS_GROUP_RATES + j);
+
+			tp = mr->cur_tp / 10;
+			prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
+			eprob = MINSTREL_TRUNC(mr->probability * 1000);
+
+			p += sprintf(p, "  %6u.%1u   %6u.%1u   %6u.%1u        "
+					"%3u(%3u)   %8llu    %8llu\n",
+					tp / 10, tp % 10,
+					eprob / 10, eprob % 10,
+					prob / 10, prob % 10,
+					mr->last_success,
+					mr->last_attempts,
+					(unsigned long long)mr->succ_hist,
+					(unsigned long long)mr->att_hist);
+		}
+	}
+	p += sprintf(p, "\nTotal packet count::    ideal %d      "
+			"lookaround %d\n",
+			max(0, (int) mi->total_packets - (int) mi->sample_packets),
+			mi->sample_packets);
+	p += sprintf(p, "Average A-MPDU length: %d.%d\n",
+		MINSTREL_TRUNC(mi->avg_ampdu_len),
+		MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
+	ms->len = p - ms->buf;
+
+	return 0;
+}
+
+static const struct file_operations minstrel_ht_stat_fops = {
+	.owner = THIS_MODULE,
+	.open = minstrel_ht_stats_open,
+	.read = minstrel_stats_read,
+	.release = minstrel_stats_release,
+};
+
+void
+minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
+{
+	struct minstrel_ht_sta_priv *msp = priv_sta;
+
+	msp->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, msp,
+			&minstrel_ht_stat_fops);
+}
+
+void
+minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta)
+{
+	struct minstrel_ht_sta_priv *msp = priv_sta;
+
+	debugfs_remove(msp->dbg_stats);
+}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index be9abc2..fa0f37e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -293,7 +293,7 @@
 			skb2 = skb_clone(skb, GFP_ATOMIC);
 			if (skb2) {
 				skb2->dev = prev_dev;
-				netif_rx(skb2);
+				netif_receive_skb(skb2);
 			}
 		}
 
@@ -304,7 +304,7 @@
 
 	if (prev_dev) {
 		skb->dev = prev_dev;
-		netif_rx(skb);
+		netif_receive_skb(skb);
 	} else
 		dev_kfree_skb(skb);
 
@@ -719,16 +719,13 @@
 
 	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
 
-	spin_lock(&sta->lock);
-
-	if (!sta->ampdu_mlme.tid_active_rx[tid])
-		goto dont_reorder_unlock;
-
-	tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
+	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+	if (!tid_agg_rx)
+		goto dont_reorder;
 
 	/* qos null data frames are excluded */
 	if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
-		goto dont_reorder_unlock;
+		goto dont_reorder;
 
 	/* new, potentially un-ordered, ampdu frame - process it */
 
@@ -740,20 +737,22 @@
 	/* if this mpdu is fragmented - terminate rx aggregation session */
 	sc = le16_to_cpu(hdr->seq_ctrl);
 	if (sc & IEEE80211_SCTL_FRAG) {
-		spin_unlock(&sta->lock);
-		__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
-					       WLAN_REASON_QSTA_REQUIRE_SETUP);
-		dev_kfree_skb(skb);
+		skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+		skb_queue_tail(&rx->sdata->skb_queue, skb);
+		ieee80211_queue_work(&local->hw, &rx->sdata->work);
 		return;
 	}
 
-	if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) {
-		spin_unlock(&sta->lock);
+	/*
+	 * No locking needed -- we will only ever process one
+	 * RX packet at a time, and thus own tid_agg_rx. All
+	 * other code manipulating it needs to (and does) make
+	 * sure that we cannot get to it any more before doing
+	 * anything with it.
+	 */
+	if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
 		return;
-	}
 
- dont_reorder_unlock:
-	spin_unlock(&sta->lock);
  dont_reorder:
 	__skb_queue_tail(frames, skb);
 }
@@ -825,6 +824,7 @@
 	ieee80211_rx_result result = RX_DROP_UNUSABLE;
 	struct ieee80211_key *stakey = NULL;
 	int mmie_keyidx = -1;
+	__le16 fc;
 
 	/*
 	 * Key selection 101
@@ -866,13 +866,15 @@
 	if (rx->sta)
 		stakey = rcu_dereference(rx->sta->key);
 
-	if (!ieee80211_has_protected(hdr->frame_control))
+	fc = hdr->frame_control;
+
+	if (!ieee80211_has_protected(fc))
 		mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
 
 	if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
 		rx->key = stakey;
 		/* Skip decryption if the frame is not protected. */
-		if (!ieee80211_has_protected(hdr->frame_control))
+		if (!ieee80211_has_protected(fc))
 			return RX_CONTINUE;
 	} else if (mmie_keyidx >= 0) {
 		/* Broadcast/multicast robust management frame / BIP */
@@ -884,7 +886,7 @@
 		    mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
 			return RX_DROP_MONITOR; /* unexpected BIP keyidx */
 		rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
-	} else if (!ieee80211_has_protected(hdr->frame_control)) {
+	} else if (!ieee80211_has_protected(fc)) {
 		/*
 		 * The frame was not protected, so skip decryption. However, we
 		 * need to set rx->key if there is a key that could have been
@@ -892,7 +894,7 @@
 		 * have been expected.
 		 */
 		struct ieee80211_key *key = NULL;
-		if (ieee80211_is_mgmt(hdr->frame_control) &&
+		if (ieee80211_is_mgmt(fc) &&
 		    is_multicast_ether_addr(hdr->addr1) &&
 		    (key = rcu_dereference(rx->sdata->default_mgmt_key)))
 			rx->key = key;
@@ -914,7 +916,7 @@
 		    (status->flag & RX_FLAG_IV_STRIPPED))
 			return RX_CONTINUE;
 
-		hdrlen = ieee80211_hdrlen(hdr->frame_control);
+		hdrlen = ieee80211_hdrlen(fc);
 
 		if (rx->skb->len < 8 + hdrlen)
 			return RX_DROP_UNUSABLE; /* TODO: count this? */
@@ -947,19 +949,17 @@
 
 	if (skb_linearize(rx->skb))
 		return RX_DROP_UNUSABLE;
-
-	hdr = (struct ieee80211_hdr *)rx->skb->data;
-
-	/* Check for weak IVs if possible */
-	if (rx->sta && rx->key->conf.alg == ALG_WEP &&
-	    ieee80211_is_data(hdr->frame_control) &&
-	    (!(status->flag & RX_FLAG_IV_STRIPPED) ||
-	     !(status->flag & RX_FLAG_DECRYPTED)) &&
-	    ieee80211_wep_is_weak_iv(rx->skb, rx->key))
-		rx->sta->wep_weak_iv_count++;
+	/* the hdr variable is invalid now! */
 
 	switch (rx->key->conf.alg) {
 	case ALG_WEP:
+		/* Check for weak IVs if possible */
+		if (rx->sta && ieee80211_is_data(fc) &&
+		    (!(status->flag & RX_FLAG_IV_STRIPPED) ||
+		     !(status->flag & RX_FLAG_DECRYPTED)) &&
+		    ieee80211_wep_is_weak_iv(rx->skb, rx->key))
+			rx->sta->wep_weak_iv_count++;
+
 		result = ieee80211_crypto_wep_decrypt(rx);
 		break;
 	case ALG_TKIP:
@@ -1267,11 +1267,13 @@
 						 rx->queue, &(rx->skb));
 		if (rx->key && rx->key->conf.alg == ALG_CCMP &&
 		    ieee80211_has_protected(fc)) {
+			int queue = ieee80211_is_mgmt(fc) ?
+				NUM_RX_DATA_QUEUES : rx->queue;
 			/* Store CCMP PN so that we can verify that the next
 			 * fragment has a sequential PN value. */
 			entry->ccmp = 1;
 			memcpy(entry->last_pn,
-			       rx->key->u.ccmp.rx_pn[rx->queue],
+			       rx->key->u.ccmp.rx_pn[queue],
 			       CCMP_PN_LEN);
 		}
 		return RX_QUEUED;
@@ -1291,6 +1293,7 @@
 	if (entry->ccmp) {
 		int i;
 		u8 pn[CCMP_PN_LEN], *rpn;
+		int queue;
 		if (!rx->key || rx->key->conf.alg != ALG_CCMP)
 			return RX_DROP_UNUSABLE;
 		memcpy(pn, entry->last_pn, CCMP_PN_LEN);
@@ -1299,7 +1302,9 @@
 			if (pn[i])
 				break;
 		}
-		rpn = rx->key->u.ccmp.rx_pn[rx->queue];
+		queue = ieee80211_is_mgmt(fc) ?
+			NUM_RX_DATA_QUEUES : rx->queue;
+		rpn = rx->key->u.ccmp.rx_pn[queue];
 		if (memcmp(pn, rpn, CCMP_PN_LEN))
 			return RX_DROP_UNUSABLE;
 		memcpy(entry->last_pn, pn, CCMP_PN_LEN);
@@ -1573,7 +1578,7 @@
 			/* deliver to local stack */
 			skb->protocol = eth_type_trans(skb, dev);
 			memset(skb->cb, 0, sizeof(skb->cb));
-			netif_rx(skb);
+			netif_receive_skb(skb);
 		}
 	}
 
@@ -1829,13 +1834,11 @@
 				  &bar_data, sizeof(bar_data)))
 			return RX_DROP_MONITOR;
 
-		spin_lock(&rx->sta->lock);
 		tid = le16_to_cpu(bar_data.control) >> 12;
-		if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
-			spin_unlock(&rx->sta->lock);
+
+		tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
+		if (!tid_agg_rx)
 			return RX_DROP_MONITOR;
-		}
-		tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
 
 		start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
 
@@ -1848,11 +1851,15 @@
 		ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
 						 frames);
 		kfree_skb(skb);
-		spin_unlock(&rx->sta->lock);
 		return RX_QUEUED;
 	}
 
-	return RX_CONTINUE;
+	/*
+	 * After this point, we only want management frames,
+	 * so we can drop all remaining control frames to
+	 * cooked monitor interfaces.
+	 */
+	return RX_DROP_MONITOR;
 }
 
 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
@@ -1944,30 +1951,27 @@
 		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
 			break;
 
-		if (sdata->vif.type == NL80211_IFTYPE_STATION)
-			return ieee80211_sta_rx_mgmt(sdata, rx->skb);
-
 		switch (mgmt->u.action.u.addba_req.action_code) {
 		case WLAN_ACTION_ADDBA_REQ:
 			if (len < (IEEE80211_MIN_ACTION_SIZE +
 				   sizeof(mgmt->u.action.u.addba_req)))
-				return RX_DROP_MONITOR;
-			ieee80211_process_addba_request(local, rx->sta, mgmt, len);
-			goto handled;
+				goto invalid;
+			break;
 		case WLAN_ACTION_ADDBA_RESP:
 			if (len < (IEEE80211_MIN_ACTION_SIZE +
 				   sizeof(mgmt->u.action.u.addba_resp)))
-				break;
-			ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
-			goto handled;
+				goto invalid;
+			break;
 		case WLAN_ACTION_DELBA:
 			if (len < (IEEE80211_MIN_ACTION_SIZE +
 				   sizeof(mgmt->u.action.u.delba)))
-				break;
-			ieee80211_process_delba(sdata, rx->sta, mgmt, len);
-			goto handled;
+				goto invalid;
+			break;
+		default:
+			goto invalid;
 		}
-		break;
+
+		goto queue;
 	case WLAN_CATEGORY_SPECTRUM_MGMT:
 		if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
 			break;
@@ -1997,7 +2001,7 @@
 			if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
 				break;
 
-			return ieee80211_sta_rx_mgmt(sdata, rx->skb);
+			goto queue;
 		}
 		break;
 	case WLAN_CATEGORY_SA_QUERY:
@@ -2015,11 +2019,12 @@
 		break;
 	case WLAN_CATEGORY_MESH_PLINK:
 	case WLAN_CATEGORY_MESH_PATH_SEL:
-		if (ieee80211_vif_is_mesh(&sdata->vif))
-			return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
-		break;
+		if (!ieee80211_vif_is_mesh(&sdata->vif))
+			break;
+		goto queue;
 	}
 
+ invalid:
 	/*
 	 * For AP mode, hostapd is responsible for handling any action
 	 * frames that we didn't handle, including returning unknown
@@ -2039,8 +2044,7 @@
 	 */
 	status = IEEE80211_SKB_RXCB(rx->skb);
 
-	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-	    cfg80211_rx_action(rx->sdata->dev, status->freq,
+	if (cfg80211_rx_action(rx->sdata->dev, status->freq,
 			       rx->skb->data, rx->skb->len,
 			       GFP_ATOMIC))
 		goto handled;
@@ -2052,11 +2056,11 @@
 	nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
 			       GFP_ATOMIC);
 	if (nskb) {
-		struct ieee80211_mgmt *mgmt = (void *)nskb->data;
+		struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
 
-		mgmt->u.action.category |= 0x80;
-		memcpy(mgmt->da, mgmt->sa, ETH_ALEN);
-		memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
+		nmgmt->u.action.category |= 0x80;
+		memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
+		memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
 
 		memset(nskb->cb, 0, sizeof(nskb->cb));
 
@@ -2068,6 +2072,14 @@
 		rx->sta->rx_packets++;
 	dev_kfree_skb(rx->skb);
 	return RX_QUEUED;
+
+ queue:
+	rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+	skb_queue_tail(&sdata->skb_queue, rx->skb);
+	ieee80211_queue_work(&local->hw, &sdata->work);
+	if (rx->sta)
+		rx->sta->rx_packets++;
+	return RX_QUEUED;
 }
 
 static ieee80211_rx_result debug_noinline
@@ -2075,10 +2087,15 @@
 {
 	struct ieee80211_sub_if_data *sdata = rx->sdata;
 	ieee80211_rx_result rxs;
+	struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
+	__le16 stype;
 
 	if (!(rx->flags & IEEE80211_RX_RA_MATCH))
 		return RX_DROP_MONITOR;
 
+	if (rx->skb->len < 24)
+		return RX_DROP_MONITOR;
+
 	if (ieee80211_drop_unencrypted_mgmt(rx))
 		return RX_DROP_UNUSABLE;
 
@@ -2086,16 +2103,42 @@
 	if (rxs != RX_CONTINUE)
 		return rxs;
 
-	if (ieee80211_vif_is_mesh(&sdata->vif))
-		return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
+	stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
 
-	if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
-		return ieee80211_ibss_rx_mgmt(sdata, rx->skb);
+	if (!ieee80211_vif_is_mesh(&sdata->vif) &&
+	    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+	    sdata->vif.type != NL80211_IFTYPE_STATION)
+		return RX_DROP_MONITOR;
 
-	if (sdata->vif.type == NL80211_IFTYPE_STATION)
-		return ieee80211_sta_rx_mgmt(sdata, rx->skb);
+	switch (stype) {
+	case cpu_to_le16(IEEE80211_STYPE_BEACON):
+	case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+		/* process for all: mesh, mlme, ibss */
+		break;
+	case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+	case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+		/* process only for station */
+		if (sdata->vif.type != NL80211_IFTYPE_STATION)
+			return RX_DROP_MONITOR;
+		break;
+	case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+	case cpu_to_le16(IEEE80211_STYPE_AUTH):
+		/* process only for ibss */
+		if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+			return RX_DROP_MONITOR;
+		break;
+	default:
+		return RX_DROP_MONITOR;
+	}
 
-	return RX_DROP_MONITOR;
+	/* queue up frame and kick off work to process it */
+	rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+	skb_queue_tail(&sdata->skb_queue, rx->skb);
+	ieee80211_queue_work(&rx->local->hw, &sdata->work);
+	if (rx->sta)
+		rx->sta->rx_packets++;
+
+	return RX_QUEUED;
 }
 
 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
@@ -2151,7 +2194,7 @@
 		u8 rate_or_pad;
 		__le16 chan_freq;
 		__le16 chan_flags;
-	} __attribute__ ((packed)) *rthdr;
+	} __packed *rthdr;
 	struct sk_buff *skb = rx->skb, *skb2;
 	struct net_device *prev_dev = NULL;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -2201,7 +2244,7 @@
 			skb2 = skb_clone(skb, GFP_ATOMIC);
 			if (skb2) {
 				skb2->dev = prev_dev;
-				netif_rx(skb2);
+				netif_receive_skb(skb2);
 			}
 		}
 
@@ -2212,7 +2255,7 @@
 
 	if (prev_dev) {
 		skb->dev = prev_dev;
-		netif_rx(skb);
+		netif_receive_skb(skb);
 		skb = NULL;
 	} else
 		goto out_free_skb;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index e1b0be7..439c98d 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -286,6 +286,8 @@
 	local->scanning = 0;
 	local->scan_channel = NULL;
 
+	drv_sw_scan_complete(local);
+
 	/* we only have to protect scan_req and hw/sw scan */
 	mutex_unlock(&local->scan_mtx);
 
@@ -295,8 +297,6 @@
 
 	ieee80211_configure_filter(local);
 
-	drv_sw_scan_complete(local);
-
 	ieee80211_offchannel_return(local, true);
 
  done:
@@ -734,7 +734,7 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	int ret = -EBUSY;
-	enum nl80211_band band;
+	enum ieee80211_band band;
 
 	mutex_lock(&local->scan_mtx);
 
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ba9360a..67656cb 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -235,6 +235,8 @@
 	spin_lock_init(&sta->lock);
 	spin_lock_init(&sta->flaglock);
 	INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
+	INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
+	mutex_init(&sta->ampdu_mlme.mtx);
 
 	memcpy(sta->sta.addr, addr, ETH_ALEN);
 	sta->local = local;
@@ -246,14 +248,12 @@
 	}
 
 	for (i = 0; i < STA_TID_NUM; i++) {
-		/* timer_to_tid must be initialized with identity mapping to
-		 * enable session_timer's data differentiation. refer to
-		 * sta_rx_agg_session_timer_expired for useage */
+		/*
+		 * timer_to_tid must be initialized with identity mapping
+		 * to enable session_timer's data differentiation. See
+		 * sta_rx_agg_session_timer_expired for usage.
+		 */
 		sta->timer_to_tid[i] = i;
-		/* tx */
-		sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
-		sta->ampdu_mlme.tid_tx[i] = NULL;
-		sta->ampdu_mlme.addba_req_num[i] = 0;
 	}
 	skb_queue_head_init(&sta->ps_tx_buf);
 	skb_queue_head_init(&sta->tx_filtered);
@@ -648,14 +648,6 @@
 
 	if (sta->key) {
 		ieee80211_key_free(sta->key);
-		/*
-		 * We have only unlinked the key, and actually destroying it
-		 * may mean it is removed from hardware which requires that
-		 * the key->sta pointer is still valid, so flush the key todo
-		 * list here.
-		 */
-		ieee80211_key_todo();
-
 		WARN_ON(sta->key);
 	}
 
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index df9d455..54262e7 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -42,9 +42,6 @@
  *	be in the queues
  * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
  *	station in power-save mode, reply when the driver unblocks.
- * @WLAN_STA_DISASSOC: Disassociation in progress.
- *	This is used to reject TX BA session requests when disassociation
- *	is in progress.
  */
 enum ieee80211_sta_info_flags {
 	WLAN_STA_AUTH		= 1<<0,
@@ -60,38 +57,44 @@
 	WLAN_STA_BLOCK_BA	= 1<<11,
 	WLAN_STA_PS_DRIVER	= 1<<12,
 	WLAN_STA_PSPOLL		= 1<<13,
-	WLAN_STA_DISASSOC       = 1<<14,
 };
 
 #define STA_TID_NUM 16
 #define ADDBA_RESP_INTERVAL HZ
-#define HT_AGG_MAX_RETRIES		(0x3)
+#define HT_AGG_MAX_RETRIES		0x3
 
-#define HT_AGG_STATE_INITIATOR_SHIFT	(4)
-
-#define HT_ADDBA_REQUESTED_MSK		BIT(0)
-#define HT_ADDBA_DRV_READY_MSK		BIT(1)
-#define HT_ADDBA_RECEIVED_MSK		BIT(2)
-#define HT_AGG_STATE_REQ_STOP_BA_MSK	BIT(3)
-#define HT_AGG_STATE_INITIATOR_MSK      BIT(HT_AGG_STATE_INITIATOR_SHIFT)
-#define HT_AGG_STATE_IDLE		(0x0)
-#define HT_AGG_STATE_OPERATIONAL	(HT_ADDBA_REQUESTED_MSK |	\
-					 HT_ADDBA_DRV_READY_MSK |	\
-					 HT_ADDBA_RECEIVED_MSK)
+#define HT_AGG_STATE_DRV_READY		0
+#define HT_AGG_STATE_RESPONSE_RECEIVED	1
+#define HT_AGG_STATE_OPERATIONAL	2
+#define HT_AGG_STATE_STOPPING		3
+#define HT_AGG_STATE_WANT_START		4
+#define HT_AGG_STATE_WANT_STOP		5
 
 /**
  * struct tid_ampdu_tx - TID aggregation information (Tx).
  *
+ * @rcu_head: rcu head for freeing structure
  * @addba_resp_timer: timer for peer's response to addba request
  * @pending: pending frames queue -- use sta's spinlock to protect
- * @ssn: Starting Sequence Number expected to be aggregated.
  * @dialog_token: dialog token for aggregation session
+ * @state: session state (see above)
+ * @stop_initiator: initiator of a session stop
+ *
+ * This structure is protected by RCU and the per-station
+ * spinlock. Assignments to the array holding it must hold
+ * the spinlock, only the TX path can access it under RCU
+ * lock-free if, and only if, the state has  the flag
+ * %HT_AGG_STATE_OPERATIONAL set. Otherwise, the TX path
+ * must also acquire the spinlock and re-check the state,
+ * see comments in the tx code touching it.
  */
 struct tid_ampdu_tx {
+	struct rcu_head rcu_head;
 	struct timer_list addba_resp_timer;
 	struct sk_buff_head pending;
-	u16 ssn;
+	unsigned long state;
 	u8 dialog_token;
+	u8 stop_initiator;
 };
 
 /**
@@ -106,8 +109,18 @@
  * @buf_size: buffer size for incoming A-MPDUs
  * @timeout: reset timer value (in TUs).
  * @dialog_token: dialog token for aggregation session
+ * @rcu_head: RCU head used for freeing this struct
+ *
+ * This structure is protected by RCU and the per-station
+ * spinlock. Assignments to the array holding it must hold
+ * the spinlock, only the RX path can access it under RCU
+ * lock-free. The RX path, since it is single-threaded,
+ * can even modify the structure without locking since the
+ * only other modifications to it are done when the struct
+ * can not yet or no longer be found by the RX path.
  */
 struct tid_ampdu_rx {
+	struct rcu_head rcu_head;
 	struct sk_buff **reorder_buf;
 	unsigned long *reorder_time;
 	struct timer_list session_timer;
@@ -120,6 +133,32 @@
 };
 
 /**
+ * struct sta_ampdu_mlme - STA aggregation information.
+ *
+ * @tid_rx: aggregation info for Rx per TID -- RCU protected
+ * @tid_tx: aggregation info for Tx per TID
+ * @addba_req_num: number of times addBA request has been sent.
+ * @dialog_token_allocator: dialog token enumerator for each new session;
+ * @work: work struct for starting/stopping aggregation
+ * @tid_rx_timer_expired: bitmap indicating on which TIDs the
+ *	RX timer expired until the work for it runs
+ * @mtx: mutex to protect all TX data (except non-NULL assignments
+ *	to tid_tx[idx], which are protected by the sta spinlock)
+ */
+struct sta_ampdu_mlme {
+	struct mutex mtx;
+	/* rx */
+	struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
+	unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
+	/* tx */
+	struct work_struct work;
+	struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
+	u8 addba_req_num[STA_TID_NUM];
+	u8 dialog_token_allocator;
+};
+
+
+/**
  * enum plink_state - state of a mesh peer link finite state machine
  *
  * @PLINK_LISTEN: initial state, considered the implicit state of non existant
@@ -143,28 +182,6 @@
 };
 
 /**
- * struct sta_ampdu_mlme - STA aggregation information.
- *
- * @tid_active_rx: TID's state in Rx session state machine.
- * @tid_rx: aggregation info for Rx per TID
- * @tid_state_tx: TID's state in Tx session state machine.
- * @tid_tx: aggregation info for Tx per TID
- * @addba_req_num: number of times addBA request has been sent.
- * @dialog_token_allocator: dialog token enumerator for each new session;
- */
-struct sta_ampdu_mlme {
-	/* rx */
-	bool tid_active_rx[STA_TID_NUM];
-	struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
-	/* tx */
-	u8 tid_state_tx[STA_TID_NUM];
-	struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
-	u8 addba_req_num[STA_TID_NUM];
-	u8 dialog_token_allocator;
-};
-
-
-/**
  * struct sta_info - STA information
  *
  * This structure collects information about a station that
@@ -410,20 +427,20 @@
 {
 }
 
-#define for_each_sta_info(local, _addr, sta, nxt) 			\
+#define for_each_sta_info(local, _addr, _sta, nxt) 			\
 	for (	/* initialise loop */					\
-		sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
-		nxt = sta ? rcu_dereference(sta->hnext) : NULL;		\
+		_sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
+		nxt = _sta ? rcu_dereference(_sta->hnext) : NULL;	\
 		/* typecheck */						\
-		for_each_sta_info_type_check(local, (_addr), sta, nxt),	\
+		for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
 		/* continue condition */				\
-		sta;							\
+		_sta;							\
 		/* advance loop */					\
-		sta = nxt,						\
-		nxt = sta ? rcu_dereference(sta->hnext) : NULL		\
+		_sta = nxt,						\
+		nxt = _sta ? rcu_dereference(_sta->hnext) : NULL	\
 	     )								\
 	/* compare address and run code only if it matches */		\
-	if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0)
+	if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0)
 
 /*
  * Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 94613af..10caec5 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -47,7 +47,7 @@
 	/*
 	 * This skb 'survived' a round-trip through the driver, and
 	 * hopefully the driver didn't mangle it too badly. However,
-	 * we can definitely not rely on the the control information
+	 * we can definitely not rely on the control information
 	 * being correct. Clear it so we don't get junk there, and
 	 * indicate that it needs new processing, but must not be
 	 * modified/encrypted again.
@@ -377,7 +377,7 @@
 				skb2 = skb_clone(skb, GFP_ATOMIC);
 				if (skb2) {
 					skb2->dev = prev_dev;
-					netif_rx(skb2);
+					netif_receive_skb(skb2);
 				}
 			}
 
@@ -386,7 +386,7 @@
 	}
 	if (prev_dev) {
 		skb->dev = prev_dev;
-		netif_rx(skb);
+		netif_receive_skb(skb);
 		skb = NULL;
 	}
 	rcu_read_unlock();
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 680bcb7..698d471 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1092,6 +1092,59 @@
 	return true;
 }
 
+static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
+				  struct sk_buff *skb,
+				  struct ieee80211_tx_info *info,
+				  struct tid_ampdu_tx *tid_tx,
+				  int tid)
+{
+	bool queued = false;
+
+	if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
+		info->flags |= IEEE80211_TX_CTL_AMPDU;
+	} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
+		/*
+		 * nothing -- this aggregation session is being started
+		 * but that might still fail with the driver
+		 */
+	} else {
+		spin_lock(&tx->sta->lock);
+		/*
+		 * Need to re-check now, because we may get here
+		 *
+		 *  1) in the window during which the setup is actually
+		 *     already done, but not marked yet because not all
+		 *     packets are spliced over to the driver pending
+		 *     queue yet -- if this happened we acquire the lock
+		 *     either before or after the splice happens, but
+		 *     need to recheck which of these cases happened.
+		 *
+		 *  2) during session teardown, if the OPERATIONAL bit
+		 *     was cleared due to the teardown but the pointer
+		 *     hasn't been assigned NULL yet (or we loaded it
+		 *     before it was assigned) -- in this case it may
+		 *     now be NULL which means we should just let the
+		 *     packet pass through because splicing the frames
+		 *     back is already done.
+		 */
+		tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
+
+		if (!tid_tx) {
+			/* do nothing, let packet pass through */
+		} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
+			info->flags |= IEEE80211_TX_CTL_AMPDU;
+		} else {
+			queued = true;
+			info->control.vif = &tx->sdata->vif;
+			info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+			__skb_queue_tail(&tid_tx->pending, skb);
+		}
+		spin_unlock(&tx->sta->lock);
+	}
+
+	return queued;
+}
+
 /*
  * initialises @tx
  */
@@ -1104,8 +1157,7 @@
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	int hdrlen, tid;
-	u8 *qc, *state;
-	bool queued = false;
+	u8 *qc;
 
 	memset(tx, 0, sizeof(*tx));
 	tx->skb = skb;
@@ -1157,35 +1209,16 @@
 		qc = ieee80211_get_qos_ctl(hdr);
 		tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
 
-		spin_lock(&tx->sta->lock);
-		/*
-		 * XXX: This spinlock could be fairly expensive, but see the
-		 *	comment in agg-tx.c:ieee80211_agg_tx_operational().
-		 *	One way to solve this would be to do something RCU-like
-		 *	for managing the tid_tx struct and using atomic bitops
-		 *	for the actual state -- by introducing an actual
-		 *	'operational' bit that would be possible. It would
-		 *	require changing ieee80211_agg_tx_operational() to
-		 *	set that bit, and changing the way tid_tx is managed
-		 *	everywhere, including races between that bit and
-		 *	tid_tx going away (tid_tx being added can be easily
-		 *	committed to memory before the 'operational' bit).
-		 */
-		tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
-		state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
-		if (*state == HT_AGG_STATE_OPERATIONAL) {
-			info->flags |= IEEE80211_TX_CTL_AMPDU;
-		} else if (*state != HT_AGG_STATE_IDLE) {
-			/* in progress */
-			queued = true;
-			info->control.vif = &sdata->vif;
-			info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-			__skb_queue_tail(&tid_tx->pending, skb);
-		}
-		spin_unlock(&tx->sta->lock);
+		tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+		if (tid_tx) {
+			bool queued;
 
-		if (unlikely(queued))
-			return TX_QUEUED;
+			queued = ieee80211_tx_prep_agg(tx, skb, info,
+						       tid_tx, tid);
+
+			if (unlikely(queued))
+				return TX_QUEUED;
+		}
 	}
 
 	if (is_multicast_ether_addr(hdr->addr1)) {
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 5b79d55..a54cf14 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1138,18 +1138,6 @@
 	}
 	mutex_unlock(&local->sta_mtx);
 
-	/* Clear Suspend state so that ADDBA requests can be processed */
-
-	rcu_read_lock();
-
-	if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
-		list_for_each_entry_rcu(sta, &local->sta_list, list) {
-			clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
-		}
-	}
-
-	rcu_read_unlock();
-
 	/* setup RTS threshold */
 	drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
 
@@ -1202,13 +1190,26 @@
 		}
 	}
 
-	rcu_read_lock();
+	/*
+	 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
+	 * sessions can be established after a resume.
+	 *
+	 * Also tear down aggregation sessions since reconfiguring
+	 * them in a hardware restart scenario is not easily done
+	 * right now, and the hardware will have lost information
+	 * about the sessions, but we and the AP still think they
+	 * are active. This is really a workaround though.
+	 */
 	if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
-		list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		mutex_lock(&local->sta_mtx);
+
+		list_for_each_entry(sta, &local->sta_list, list) {
 			ieee80211_sta_tear_down_BA_sessions(sta);
+			clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
 		}
+
+		mutex_unlock(&local->sta_mtx);
 	}
-	rcu_read_unlock();
 
 	/* add back keys */
 	list_for_each_entry(sdata, &local->interfaces, list)
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index b025dc7..c22a71c 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -840,7 +840,7 @@
 
 	/*
 	 * ieee80211_queue_work() should have picked up most cases,
-	 * here we'll pick the the rest.
+	 * here we'll pick the rest.
 	 */
 	if (WARN(local->suspended, "work scheduled while going to suspend\n"))
 		return;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 0adbcc9..a14e677 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -436,6 +436,7 @@
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	u8 pn[CCMP_PN_LEN];
 	int data_len;
+	int queue;
 
 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
@@ -453,7 +454,10 @@
 
 	ccmp_hdr2pn(pn, skb->data + hdrlen);
 
-	if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) {
+	queue = ieee80211_is_mgmt(hdr->frame_control) ?
+		NUM_RX_DATA_QUEUES : rx->queue;
+
+	if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) {
 		key->u.ccmp.replays++;
 		return RX_DROP_UNUSABLE;
 	}
@@ -470,7 +474,7 @@
 			return RX_DROP_UNUSABLE;
 	}
 
-	memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN);
+	memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN);
 
 	/* Remove CCMP header and MIC */
 	skb_trim(skb, skb->len - CCMP_MIC_LEN);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8593a77..aa2f106 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -40,27 +40,6 @@
 
 if NF_CONNTRACK
 
-config NF_CT_ACCT
-	bool "Connection tracking flow accounting"
-	depends on NETFILTER_ADVANCED
-	help
-	  If this option is enabled, the connection tracking code will
-	  keep per-flow packet and byte counters.
-
-	  Those counters can be used for flow-based accounting or the
-	  `connbytes' match.
-
-	  Please note that currently this option only sets a default state.
-	  You may change it at boot time with nf_conntrack.acct=0/1 kernel
-	  parameter or by loading the nf_conntrack module with acct=0/1.
-
-	  You may also disable/enable it on a running system with:
-	   sysctl net.netfilter.nf_conntrack_acct=0/1
-
-	  This option will be removed in 2.6.29.
-
-	  If unsure, say `N'.
-
 config NF_CONNTRACK_MARK
 	bool  'Connection mark tracking support'
 	depends on NETFILTER_ADVANCED
@@ -424,6 +403,18 @@
 	since you can easily create immortal packets that loop
 	forever on the network.
 
+config NETFILTER_XT_TARGET_IDLETIMER
+	tristate  "IDLETIMER target support"
+	depends on NETFILTER_ADVANCED
+	help
+
+	  This option adds the `IDLETIMER' target.  Each matching packet
+	  resets the timer associated with label specified when the rule is
+	  added.  When the timer expires, it triggers a sysfs notification.
+	  The remaining time for expiration can be read via sysfs.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_TARGET_LED
 	tristate '"LED" target support'
 	depends on LEDS_CLASS && LEDS_TRIGGERS
@@ -503,7 +494,7 @@
 	  To compile it as a module, choose M here.  If unsure, say N.
 
 config NETFILTER_XT_TARGET_TEE
-	tristate '"TEE" - packet cloning to alternate destiantion'
+	tristate '"TEE" - packet cloning to alternate destination'
 	depends on NETFILTER_ADVANCED
 	depends on (IPV6 || IPV6=n)
 	depends on !NF_CONNTRACK || NF_CONNTRACK
@@ -618,7 +609,6 @@
 	tristate  '"connbytes" per-connection counter match support'
 	depends on NF_CONNTRACK
 	depends on NETFILTER_ADVANCED
-	select NF_CT_ACCT
 	help
 	  This option adds a `connbytes' match, which allows you to match the
 	  number of bytes and/or packets for each direction within a connection.
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 14e3a8f..e28420a 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -61,6 +61,7 @@
 obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
 
 # matches
 obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index ff04e9e..654544e 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -158,6 +158,9 @@
 	unsigned hash;
 	int ret;
 
+	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+		return 0;
+
 	/* Hash by protocol, client address and port */
 	hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
 
@@ -359,8 +362,9 @@
  */
 void ip_vs_conn_put(struct ip_vs_conn *cp)
 {
-	/* reset it expire in its timeout */
-	mod_timer(&cp->timer, jiffies+cp->timeout);
+	unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
+		0 : cp->timeout;
+	mod_timer(&cp->timer, jiffies+t);
 
 	__ip_vs_conn_put(cp);
 }
@@ -653,7 +657,7 @@
 	/*
 	 *	unhash it if it is hashed in the conn table
 	 */
-	if (!ip_vs_conn_unhash(cp))
+	if (!ip_vs_conn_unhash(cp) && !(cp->flags & IP_VS_CONN_F_ONE_PACKET))
 		goto expire_later;
 
 	/*
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1cd6e3f..50907d8 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -194,6 +194,7 @@
 	struct ip_vs_dest *dest;
 	struct ip_vs_conn *ct;
 	__be16  dport;			/* destination port to forward */
+	__be16  flags;
 	union nf_inet_addr snet;	/* source network of the client,
 					   after masking */
 
@@ -340,6 +341,10 @@
 		dport = ports[1];
 	}
 
+	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
+		 && iph.protocol == IPPROTO_UDP)?
+		IP_VS_CONN_F_ONE_PACKET : 0;
+
 	/*
 	 *    Create a new connection according to the template
 	 */
@@ -347,7 +352,7 @@
 			    &iph.saddr, ports[0],
 			    &iph.daddr, ports[1],
 			    &dest->addr, dport,
-			    0,
+			    flags,
 			    dest);
 	if (cp == NULL) {
 		ip_vs_conn_put(ct);
@@ -377,7 +382,7 @@
 	struct ip_vs_conn *cp = NULL;
 	struct ip_vs_iphdr iph;
 	struct ip_vs_dest *dest;
-	__be16 _ports[2], *pptr;
+	__be16 _ports[2], *pptr, flags;
 
 	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
 	pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
@@ -407,6 +412,10 @@
 		return NULL;
 	}
 
+	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
+		 && iph.protocol == IPPROTO_UDP)?
+		IP_VS_CONN_F_ONE_PACKET : 0;
+
 	/*
 	 *    Create a connection entry.
 	 */
@@ -414,7 +423,7 @@
 			    &iph.saddr, pptr[0],
 			    &iph.daddr, pptr[1],
 			    &dest->addr, dest->port ? dest->port : pptr[1],
-			    0,
+			    flags,
 			    dest);
 	if (cp == NULL)
 		return NULL;
@@ -464,6 +473,9 @@
 	if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
 		int ret, cs;
 		struct ip_vs_conn *cp;
+		__u16 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
+				iph.protocol == IPPROTO_UDP)?
+				IP_VS_CONN_F_ONE_PACKET : 0;
 		union nf_inet_addr daddr =  { .all = { 0, 0, 0, 0 } };
 
 		ip_vs_service_put(svc);
@@ -474,7 +486,7 @@
 				    &iph.saddr, pptr[0],
 				    &iph.daddr, pptr[1],
 				    &daddr, 0,
-				    IP_VS_CONN_F_BYPASS,
+				    IP_VS_CONN_F_BYPASS | flags,
 				    NULL);
 		if (cp == NULL)
 			return NF_DROP;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 36dc1d8..0f0c079 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1864,14 +1864,16 @@
 					   svc->scheduler->name);
 			else
 #endif
-				seq_printf(seq, "%s  %08X:%04X %s ",
+				seq_printf(seq, "%s  %08X:%04X %s %s ",
 					   ip_vs_proto_name(svc->protocol),
 					   ntohl(svc->addr.ip),
 					   ntohs(svc->port),
-					   svc->scheduler->name);
+					   svc->scheduler->name,
+					   (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
 		} else {
-			seq_printf(seq, "FWM  %08X %s ",
-				   svc->fwmark, svc->scheduler->name);
+			seq_printf(seq, "FWM  %08X %s %s",
+				   svc->fwmark, svc->scheduler->name,
+				   (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
 		}
 
 		if (svc->flags & IP_VS_SVC_F_PERSISTENT)
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 93c15a1..02b078e 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -90,10 +90,10 @@
 					     &dest->addr.ip);
 				return NULL;
 			}
-			__ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst));
+			__ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst));
 			IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
 				  &dest->addr.ip,
-				  atomic_read(&rt->u.dst.__refcnt), rtos);
+				  atomic_read(&rt->dst.__refcnt), rtos);
 		}
 		spin_unlock(&dest->dst_lock);
 	} else {
@@ -148,10 +148,10 @@
 					     &dest->addr.in6);
 				return NULL;
 			}
-			__ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst));
+			__ip_vs_dst_set(dest, 0, dst_clone(&rt->dst));
 			IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n",
 				  &dest->addr.in6,
-				  atomic_read(&rt->u.dst.__refcnt));
+				  atomic_read(&rt->dst.__refcnt));
 		}
 		spin_unlock(&dest->dst_lock);
 	} else {
@@ -198,7 +198,7 @@
 	(skb)->ipvs_property = 1;			\
 	skb_forward_csum(skb);				\
 	NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL,	\
-		(rt)->u.dst.dev, dst_output);		\
+		(rt)->dst.dev, dst_output);		\
 } while (0)
 
 
@@ -245,7 +245,7 @@
 	}
 
 	/* MTU checking */
-	mtu = dst_mtu(&rt->u.dst);
+	mtu = dst_mtu(&rt->dst);
 	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
 		ip_rt_put(rt);
 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
@@ -265,7 +265,7 @@
 
 	/* drop old route */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
@@ -309,9 +309,9 @@
 	}
 
 	/* MTU checking */
-	mtu = dst_mtu(&rt->u.dst);
+	mtu = dst_mtu(&rt->dst);
 	if (skb->len > mtu) {
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error;
@@ -323,13 +323,13 @@
 	 */
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (unlikely(skb == NULL)) {
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		return NF_STOLEN;
 	}
 
 	/* drop old route */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
@@ -376,7 +376,7 @@
 		goto tx_error_icmp;
 
 	/* MTU checking */
-	mtu = dst_mtu(&rt->u.dst);
+	mtu = dst_mtu(&rt->dst);
 	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
 		ip_rt_put(rt);
 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
@@ -388,12 +388,12 @@
 	if (!skb_make_writable(skb, sizeof(struct iphdr)))
 		goto tx_error_put;
 
-	if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+	if (skb_cow(skb, rt->dst.dev->hard_header_len))
 		goto tx_error_put;
 
 	/* drop old route */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/* mangle the packet */
 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@@ -452,9 +452,9 @@
 		goto tx_error_icmp;
 
 	/* MTU checking */
-	mtu = dst_mtu(&rt->u.dst);
+	mtu = dst_mtu(&rt->dst);
 	if (skb->len > mtu) {
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 		IP_VS_DBG_RL_PKT(0, pp, skb, 0,
 				 "ip_vs_nat_xmit_v6(): frag needed for");
@@ -465,12 +465,12 @@
 	if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
 		goto tx_error_put;
 
-	if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+	if (skb_cow(skb, rt->dst.dev->hard_header_len))
 		goto tx_error_put;
 
 	/* drop old route */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/* mangle the packet */
 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@@ -498,7 +498,7 @@
 	kfree_skb(skb);
 	return NF_STOLEN;
 tx_error_put:
-	dst_release(&rt->u.dst);
+	dst_release(&rt->dst);
 	goto tx_error;
 }
 #endif
@@ -549,9 +549,9 @@
 	if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos))))
 		goto tx_error_icmp;
 
-	tdev = rt->u.dst.dev;
+	tdev = rt->dst.dev;
 
-	mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
+	mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
 	if (mtu < 68) {
 		ip_rt_put(rt);
 		IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
@@ -601,7 +601,7 @@
 
 	/* drop old route */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/*
 	 *	Push down and install the IPIP header.
@@ -615,7 +615,7 @@
 	iph->daddr		=	rt->rt_dst;
 	iph->saddr		=	rt->rt_src;
 	iph->ttl		=	old_iph->ttl;
-	ip_select_ident(iph, &rt->u.dst, NULL);
+	ip_select_ident(iph, &rt->dst, NULL);
 
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
@@ -660,12 +660,12 @@
 	if (!rt)
 		goto tx_error_icmp;
 
-	tdev = rt->u.dst.dev;
+	tdev = rt->dst.dev;
 
-	mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr);
+	mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
 	/* TODO IPv6: do we need this check in IPv6? */
 	if (mtu < 1280) {
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__);
 		goto tx_error;
 	}
@@ -674,7 +674,7 @@
 
 	if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error;
 	}
@@ -689,7 +689,7 @@
 		struct sk_buff *new_skb =
 			skb_realloc_headroom(skb, max_headroom);
 		if (!new_skb) {
-			dst_release(&rt->u.dst);
+			dst_release(&rt->dst);
 			kfree_skb(skb);
 			IP_VS_ERR_RL("%s(): no memory\n", __func__);
 			return NF_STOLEN;
@@ -707,7 +707,7 @@
 
 	/* drop old route */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/*
 	 *	Push down and install the IPIP header.
@@ -760,7 +760,7 @@
 		goto tx_error_icmp;
 
 	/* MTU checking */
-	mtu = dst_mtu(&rt->u.dst);
+	mtu = dst_mtu(&rt->dst);
 	if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
 		ip_rt_put(rt);
@@ -780,7 +780,7 @@
 
 	/* drop old route */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
@@ -813,10 +813,10 @@
 		goto tx_error_icmp;
 
 	/* MTU checking */
-	mtu = dst_mtu(&rt->u.dst);
+	mtu = dst_mtu(&rt->dst);
 	if (skb->len > mtu) {
 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error;
 	}
@@ -827,13 +827,13 @@
 	 */
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (unlikely(skb == NULL)) {
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		return NF_STOLEN;
 	}
 
 	/* drop old route */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
@@ -888,7 +888,7 @@
 		goto tx_error_icmp;
 
 	/* MTU checking */
-	mtu = dst_mtu(&rt->u.dst);
+	mtu = dst_mtu(&rt->dst);
 	if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
 		ip_rt_put(rt);
 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
@@ -900,12 +900,12 @@
 	if (!skb_make_writable(skb, offset))
 		goto tx_error_put;
 
-	if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+	if (skb_cow(skb, rt->dst.dev->hard_header_len))
 		goto tx_error_put;
 
 	/* drop the old route when skb is not shared */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	ip_vs_nat_icmp(skb, pp, cp, 0);
 
@@ -963,9 +963,9 @@
 		goto tx_error_icmp;
 
 	/* MTU checking */
-	mtu = dst_mtu(&rt->u.dst);
+	mtu = dst_mtu(&rt->dst);
 	if (skb->len > mtu) {
-		dst_release(&rt->u.dst);
+		dst_release(&rt->dst);
 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error;
@@ -975,12 +975,12 @@
 	if (!skb_make_writable(skb, offset))
 		goto tx_error_put;
 
-	if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+	if (skb_cow(skb, rt->dst.dev->hard_header_len))
 		goto tx_error_put;
 
 	/* drop the old route when skb is not shared */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
+	skb_dst_set(skb, &rt->dst);
 
 	ip_vs_nat_icmp_v6(skb, pp, cp, 0);
 
@@ -1001,7 +1001,7 @@
 	LeaveFunction(10);
 	return rc;
 tx_error_put:
-	dst_release(&rt->u.dst);
+	dst_release(&rt->dst);
 	goto tx_error;
 }
 #endif
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index ab81b38..5178c69 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -17,13 +17,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 
-#ifdef CONFIG_NF_CT_ACCT
-#define NF_CT_ACCT_DEFAULT 1
-#else
-#define NF_CT_ACCT_DEFAULT 0
-#endif
-
-static int nf_ct_acct __read_mostly = NF_CT_ACCT_DEFAULT;
+static int nf_ct_acct __read_mostly;
 
 module_param_named(acct, nf_ct_acct, bool, 0644);
 MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
@@ -114,12 +108,6 @@
 	net->ct.sysctl_acct = nf_ct_acct;
 
 	if (net_eq(net, &init_net)) {
-#ifdef CONFIG_NF_CT_ACCT
-	printk(KERN_WARNING "CONFIG_NF_CT_ACCT is deprecated and will be removed soon. Please use\n");
-		printk(KERN_WARNING "nf_conntrack.acct=1 kernel parameter, acct=1 nf_conntrack module option or\n");
-		printk(KERN_WARNING "sysctl net.netfilter.nf_conntrack_acct=1 to enable it.\n");
-#endif
-
 		ret = nf_ct_extend_register(&acct_extend);
 		if (ret < 0) {
 			printk(KERN_ERR "nf_conntrack_acct: Unable to register extension\n");
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index eeeb8bc..16b41b4 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -62,8 +62,8 @@
 unsigned int nf_conntrack_max __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_max);
 
-struct nf_conn nf_conntrack_untracked __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
+DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 
 static int nf_conntrack_hash_rnd_initted;
 static unsigned int nf_conntrack_hash_rnd;
@@ -619,9 +619,7 @@
 	ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
 	/* Don't set timer yet: wait for confirmation */
 	setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
-#ifdef CONFIG_NET_NS
-	ct->ct_net = net;
-#endif
+	write_pnet(&ct->ct_net, net);
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 	if (zone) {
 		struct nf_conntrack_zone *nf_ct_zone;
@@ -1183,10 +1181,21 @@
 	spin_unlock_bh(&nf_conntrack_lock);
 }
 
+static int untrack_refs(void)
+{
+	int cnt = 0, cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
+
+		cnt += atomic_read(&ct->ct_general.use) - 1;
+	}
+	return cnt;
+}
+
 static void nf_conntrack_cleanup_init_net(void)
 {
-	/* wait until all references to nf_conntrack_untracked are dropped */
-	while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
+	while (untrack_refs() > 0)
 		schedule();
 
 	nf_conntrack_helper_fini();
@@ -1321,10 +1330,19 @@
 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
 		  &nf_conntrack_htable_size, 0600);
 
+void nf_ct_untracked_status_or(unsigned long bits)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		per_cpu(nf_conntrack_untracked, cpu).status |= bits;
+}
+EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
+
 static int nf_conntrack_init_init_net(void)
 {
 	int max_factor = 8;
-	int ret;
+	int ret, cpu;
 
 	/* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
 	 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
@@ -1363,13 +1381,13 @@
 		goto err_extend;
 #endif
 	/* Set up fake conntrack: to never be deleted, not in any hashes */
-#ifdef CONFIG_NET_NS
-	nf_conntrack_untracked.ct_net = &init_net;
-#endif
-	atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
+	for_each_possible_cpu(cpu) {
+		struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
+		write_pnet(&ct->ct_net, &init_net);
+		atomic_set(&ct->ct_general.use, 1);
+	}
 	/*  - and look it like as a confirmed connection */
-	set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
-
+	nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
 	return 0;
 
 #ifdef CONFIG_NF_CONNTRACK_ZONES
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 6eaee7c..b969025 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -734,11 +734,11 @@
 		if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) {
 			if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
 				if (rt1->rt_gateway == rt2->rt_gateway &&
-				    rt1->u.dst.dev  == rt2->u.dst.dev)
+				    rt1->dst.dev  == rt2->dst.dev)
 					ret = 1;
-				dst_release(&rt2->u.dst);
+				dst_release(&rt2->dst);
 			}
-			dst_release(&rt1->u.dst);
+			dst_release(&rt1->dst);
 		}
 		break;
 	}
@@ -753,11 +753,11 @@
 			if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
 				if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
 					    sizeof(rt1->rt6i_gateway)) &&
-				    rt1->u.dst.dev == rt2->u.dst.dev)
+				    rt1->dst.dev == rt2->dst.dev)
 					ret = 1;
-				dst_release(&rt2->u.dst);
+				dst_release(&rt2->dst);
 			}
-			dst_release(&rt1->u.dst);
+			dst_release(&rt1->dst);
 		}
 		break;
 	}
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index 497b222..aadde01 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -61,7 +61,7 @@
 		goto out;
 
 	rcu_read_lock();
-	in_dev = __in_dev_get_rcu(rt->u.dst.dev);
+	in_dev = __in_dev_get_rcu(rt->dst.dev);
 	if (in_dev != NULL) {
 		for_primary_ifa(in_dev) {
 			if (ifa->ifa_broadcast == iph->daddr) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c42ff6a..5bae1cd 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -480,7 +480,7 @@
 	int err;
 
 	/* ignore our fake conntrack entry */
-	if (ct == &nf_conntrack_untracked)
+	if (nf_ct_is_untracked(ct))
 		return 0;
 
 	if (events & (1 << IPCT_DESTROY)) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 9dd8cd4..802dbff 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -736,27 +736,19 @@
 	return res;
 }
 
-#define	TH_FIN	0x01
-#define	TH_SYN	0x02
-#define	TH_RST	0x04
-#define	TH_PUSH	0x08
-#define	TH_ACK	0x10
-#define	TH_URG	0x20
-#define	TH_ECE	0x40
-#define	TH_CWR	0x80
-
 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
-static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
+static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
+				 TCPHDR_URG) + 1] =
 {
-	[TH_SYN]			= 1,
-	[TH_SYN|TH_URG]			= 1,
-	[TH_SYN|TH_ACK]			= 1,
-	[TH_RST]			= 1,
-	[TH_RST|TH_ACK]			= 1,
-	[TH_FIN|TH_ACK]			= 1,
-	[TH_FIN|TH_ACK|TH_URG]		= 1,
-	[TH_ACK]			= 1,
-	[TH_ACK|TH_URG]			= 1,
+	[TCPHDR_SYN]				= 1,
+	[TCPHDR_SYN|TCPHDR_URG]			= 1,
+	[TCPHDR_SYN|TCPHDR_ACK]			= 1,
+	[TCPHDR_RST]				= 1,
+	[TCPHDR_RST|TCPHDR_ACK]			= 1,
+	[TCPHDR_FIN|TCPHDR_ACK]			= 1,
+	[TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG]	= 1,
+	[TCPHDR_ACK]				= 1,
+	[TCPHDR_ACK|TCPHDR_URG]			= 1,
 };
 
 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
@@ -803,7 +795,7 @@
 	}
 
 	/* Check TCP flags. */
-	tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR|TH_PUSH));
+	tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
 	if (!tcp_valid_flags[tcpflags]) {
 		if (LOG_INVALID(net, IPPROTO_TCP))
 			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fc9a211..6a1572b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -66,9 +66,10 @@
 	u_int16_t group_num;		/* number of this queue */
 	u_int16_t flags;
 	u_int8_t copy_mode;
+	struct rcu_head rcu;
 };
 
-static DEFINE_RWLOCK(instances_lock);
+static DEFINE_SPINLOCK(instances_lock);
 static atomic_t global_seq;
 
 #define INSTANCE_BUCKETS	16
@@ -88,7 +89,7 @@
 	struct nfulnl_instance *inst;
 
 	head = &instance_table[instance_hashfn(group_num)];
-	hlist_for_each_entry(inst, pos, head, hlist) {
+	hlist_for_each_entry_rcu(inst, pos, head, hlist) {
 		if (inst->group_num == group_num)
 			return inst;
 	}
@@ -106,22 +107,26 @@
 {
 	struct nfulnl_instance *inst;
 
-	read_lock_bh(&instances_lock);
+	rcu_read_lock_bh();
 	inst = __instance_lookup(group_num);
-	if (inst)
-		instance_get(inst);
-	read_unlock_bh(&instances_lock);
+	if (inst && !atomic_inc_not_zero(&inst->use))
+		inst = NULL;
+	rcu_read_unlock_bh();
 
 	return inst;
 }
 
+static void nfulnl_instance_free_rcu(struct rcu_head *head)
+{
+	kfree(container_of(head, struct nfulnl_instance, rcu));
+	module_put(THIS_MODULE);
+}
+
 static void
 instance_put(struct nfulnl_instance *inst)
 {
-	if (inst && atomic_dec_and_test(&inst->use)) {
-		kfree(inst);
-		module_put(THIS_MODULE);
-	}
+	if (inst && atomic_dec_and_test(&inst->use))
+		call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
 }
 
 static void nfulnl_timer(unsigned long data);
@@ -132,7 +137,7 @@
 	struct nfulnl_instance *inst;
 	int err;
 
-	write_lock_bh(&instances_lock);
+	spin_lock_bh(&instances_lock);
 	if (__instance_lookup(group_num)) {
 		err = -EEXIST;
 		goto out_unlock;
@@ -166,32 +171,37 @@
 	inst->copy_mode 	= NFULNL_COPY_PACKET;
 	inst->copy_range 	= NFULNL_COPY_RANGE_MAX;
 
-	hlist_add_head(&inst->hlist,
+	hlist_add_head_rcu(&inst->hlist,
 		       &instance_table[instance_hashfn(group_num)]);
 
-	write_unlock_bh(&instances_lock);
+	spin_unlock_bh(&instances_lock);
 
 	return inst;
 
 out_unlock:
-	write_unlock_bh(&instances_lock);
+	spin_unlock_bh(&instances_lock);
 	return ERR_PTR(err);
 }
 
 static void __nfulnl_flush(struct nfulnl_instance *inst);
 
+/* called with BH disabled */
 static void
 __instance_destroy(struct nfulnl_instance *inst)
 {
 	/* first pull it out of the global list */
-	hlist_del(&inst->hlist);
+	hlist_del_rcu(&inst->hlist);
 
 	/* then flush all pending packets from skb */
 
-	spin_lock_bh(&inst->lock);
+	spin_lock(&inst->lock);
+
+	/* lockless readers wont be able to use us */
+	inst->copy_mode = NFULNL_COPY_DISABLED;
+
 	if (inst->skb)
 		__nfulnl_flush(inst);
-	spin_unlock_bh(&inst->lock);
+	spin_unlock(&inst->lock);
 
 	/* and finally put the refcount */
 	instance_put(inst);
@@ -200,9 +210,9 @@
 static inline void
 instance_destroy(struct nfulnl_instance *inst)
 {
-	write_lock_bh(&instances_lock);
+	spin_lock_bh(&instances_lock);
 	__instance_destroy(inst);
-	write_unlock_bh(&instances_lock);
+	spin_unlock_bh(&instances_lock);
 }
 
 static int
@@ -403,8 +413,9 @@
 			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
 				     htonl(indev->ifindex));
 			/* this is the bridge group "brX" */
+			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
 			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-				     htonl(indev->br_port->br->dev->ifindex));
+				     htonl(br_port_get_rcu(indev)->br->dev->ifindex));
 		} else {
 			/* Case 2: indev is bridge group, we need to look for
 			 * physical device (when called from ipv4) */
@@ -430,8 +441,9 @@
 			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
 				     htonl(outdev->ifindex));
 			/* this is the bridge group "brX" */
+			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
 			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-				     htonl(outdev->br_port->br->dev->ifindex));
+				     htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
 		} else {
 			/* Case 2: indev is a bridge group, we need to look
 			 * for physical device (when called from ipv4) */
@@ -619,6 +631,7 @@
 		size += nla_total_size(data_len);
 		break;
 
+	case NFULNL_COPY_DISABLED:
 	default:
 		goto unlock_and_release;
 	}
@@ -672,7 +685,7 @@
 		int i;
 
 		/* destroy all instances for this pid */
-		write_lock_bh(&instances_lock);
+		spin_lock_bh(&instances_lock);
 		for  (i = 0; i < INSTANCE_BUCKETS; i++) {
 			struct hlist_node *tmp, *t2;
 			struct nfulnl_instance *inst;
@@ -684,7 +697,7 @@
 					__instance_destroy(inst);
 			}
 		}
-		write_unlock_bh(&instances_lock);
+		spin_unlock_bh(&instances_lock);
 	}
 	return NOTIFY_DONE;
 }
@@ -861,19 +874,19 @@
 
 	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
 		if (!hlist_empty(&instance_table[st->bucket]))
-			return instance_table[st->bucket].first;
+			return rcu_dereference_bh(instance_table[st->bucket].first);
 	}
 	return NULL;
 }
 
 static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
 {
-	h = h->next;
+	h = rcu_dereference_bh(h->next);
 	while (!h) {
 		if (++st->bucket >= INSTANCE_BUCKETS)
 			return NULL;
 
-		h = instance_table[st->bucket].first;
+		h = rcu_dereference_bh(instance_table[st->bucket].first);
 	}
 	return h;
 }
@@ -890,9 +903,9 @@
 }
 
 static void *seq_start(struct seq_file *seq, loff_t *pos)
-	__acquires(instances_lock)
+	__acquires(rcu_bh)
 {
-	read_lock_bh(&instances_lock);
+	rcu_read_lock_bh();
 	return get_idx(seq->private, *pos);
 }
 
@@ -903,9 +916,9 @@
 }
 
 static void seq_stop(struct seq_file *s, void *v)
-	__releases(instances_lock)
+	__releases(rcu_bh)
 {
-	read_unlock_bh(&instances_lock);
+	rcu_read_unlock_bh();
 }
 
 static int seq_show(struct seq_file *s, void *v)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 12e1ab3..68e67d1 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -46,17 +46,19 @@
 	int peer_pid;
 	unsigned int queue_maxlen;
 	unsigned int copy_range;
-	unsigned int queue_total;
 	unsigned int queue_dropped;
 	unsigned int queue_user_dropped;
 
-	unsigned int id_sequence;		/* 'sequence' of pkt ids */
 
 	u_int16_t queue_num;			/* number of this queue */
 	u_int8_t copy_mode;
-
-	spinlock_t lock;
-
+/*
+ * Following fields are dirtied for each queued packet,
+ * keep them in same cache line if possible.
+ */
+	spinlock_t	lock;
+	unsigned int	queue_total;
+	atomic_t	id_sequence;		/* 'sequence' of pkt ids */
 	struct list_head queue_list;		/* packets in queue */
 };
 
@@ -238,32 +240,24 @@
 
 	outdev = entry->outdev;
 
-	spin_lock_bh(&queue->lock);
-
-	switch ((enum nfqnl_config_mode)queue->copy_mode) {
+	switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
 	case NFQNL_COPY_META:
 	case NFQNL_COPY_NONE:
 		break;
 
 	case NFQNL_COPY_PACKET:
 		if (entskb->ip_summed == CHECKSUM_PARTIAL &&
-		    skb_checksum_help(entskb)) {
-			spin_unlock_bh(&queue->lock);
+		    skb_checksum_help(entskb))
 			return NULL;
-		}
-		if (queue->copy_range == 0
-		    || queue->copy_range > entskb->len)
+
+		data_len = ACCESS_ONCE(queue->copy_range);
+		if (data_len == 0 || data_len > entskb->len)
 			data_len = entskb->len;
-		else
-			data_len = queue->copy_range;
 
 		size += nla_total_size(data_len);
 		break;
 	}
 
-	entry->id = queue->id_sequence++;
-
-	spin_unlock_bh(&queue->lock);
 
 	skb = alloc_skb(size, GFP_ATOMIC);
 	if (!skb)
@@ -278,6 +272,7 @@
 	nfmsg->version = NFNETLINK_V0;
 	nfmsg->res_id = htons(queue->queue_num);
 
+	entry->id = atomic_inc_return(&queue->id_sequence);
 	pmsg.packet_id 		= htonl(entry->id);
 	pmsg.hw_protocol	= entskb->protocol;
 	pmsg.hook		= entry->hook;
@@ -296,8 +291,9 @@
 			NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
 				     htonl(indev->ifindex));
 			/* this is the bridge group "brX" */
+			/* rcu_read_lock()ed by __nf_queue */
 			NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
-				     htonl(indev->br_port->br->dev->ifindex));
+				     htonl(br_port_get_rcu(indev)->br->dev->ifindex));
 		} else {
 			/* Case 2: indev is bridge group, we need to look for
 			 * physical device (when called from ipv4) */
@@ -321,8 +317,9 @@
 			NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
 				     htonl(outdev->ifindex));
 			/* this is the bridge group "brX" */
+			/* rcu_read_lock()ed by __nf_queue */
 			NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
-				     htonl(outdev->br_port->br->dev->ifindex));
+				     htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
 		} else {
 			/* Case 2: outdev is bridge group, we need to look for
 			 * physical output device (when called from ipv4) */
@@ -866,7 +863,7 @@
 			  inst->peer_pid, inst->queue_total,
 			  inst->copy_mode, inst->copy_range,
 			  inst->queue_dropped, inst->queue_user_dropped,
-			  inst->id_sequence, 1);
+			  atomic_read(&inst->id_sequence), 1);
 }
 
 static const struct seq_operations nfqnl_seq_ops = {
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 562bf32..0cb6053 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -67,7 +67,7 @@
 		return -EINVAL;
 
 	if (info->flags & XT_CT_NOTRACK) {
-		ct = &nf_conntrack_untracked;
+		ct = nf_ct_untracked_get();
 		atomic_inc(&ct->ct_general.use);
 		goto out;
 	}
@@ -132,7 +132,7 @@
 	struct nf_conn *ct = info->ct;
 	struct nf_conn_help *help;
 
-	if (ct != &nf_conntrack_untracked) {
+	if (!nf_ct_is_untracked(ct)) {
 		help = nfct_help(ct);
 		if (help)
 			module_put(help->helper->me);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
new file mode 100644
index 0000000..be1f22e
--- /dev/null
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -0,0 +1,315 @@
+/*
+ * linux/net/netfilter/xt_IDLETIMER.c
+ *
+ * Netfilter module to trigger a timer when packet matches.
+ * After timer expires a kevent will be sent.
+ *
+ * Copyright (C) 2004, 2010 Nokia Corporation
+ * Written by Timo Teras <ext-timo.teras@nokia.com>
+ *
+ * Converted to x_tables and reworked for upstream inclusion
+ * by Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_IDLETIMER.h>
+#include <linux/kdev_t.h>
+#include <linux/kobject.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+
+struct idletimer_tg_attr {
+	struct attribute attr;
+	ssize_t	(*show)(struct kobject *kobj,
+			struct attribute *attr, char *buf);
+};
+
+struct idletimer_tg {
+	struct list_head entry;
+	struct timer_list timer;
+	struct work_struct work;
+
+	struct kobject *kobj;
+	struct idletimer_tg_attr attr;
+
+	unsigned int refcnt;
+};
+
+static LIST_HEAD(idletimer_tg_list);
+static DEFINE_MUTEX(list_mutex);
+
+static struct kobject *idletimer_tg_kobj;
+
+static
+struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
+{
+	struct idletimer_tg *entry;
+
+	BUG_ON(!label);
+
+	list_for_each_entry(entry, &idletimer_tg_list, entry) {
+		if (!strcmp(label, entry->attr.attr.name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
+				 char *buf)
+{
+	struct idletimer_tg *timer;
+	unsigned long expires = 0;
+
+	mutex_lock(&list_mutex);
+
+	timer =	__idletimer_tg_find_by_label(attr->name);
+	if (timer)
+		expires = timer->timer.expires;
+
+	mutex_unlock(&list_mutex);
+
+	if (time_after(expires, jiffies))
+		return sprintf(buf, "%u\n",
+			       jiffies_to_msecs(expires - jiffies) / 1000);
+
+	return sprintf(buf, "0\n");
+}
+
+static void idletimer_tg_work(struct work_struct *work)
+{
+	struct idletimer_tg *timer = container_of(work, struct idletimer_tg,
+						  work);
+
+	sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+}
+
+static void idletimer_tg_expired(unsigned long data)
+{
+	struct idletimer_tg *timer = (struct idletimer_tg *) data;
+
+	pr_debug("timer %s expired\n", timer->attr.attr.name);
+
+	schedule_work(&timer->work);
+}
+
+static int idletimer_tg_create(struct idletimer_tg_info *info)
+{
+	int ret;
+
+	info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+	if (!info->timer) {
+		pr_debug("couldn't alloc timer\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
+	if (!info->timer->attr.attr.name) {
+		pr_debug("couldn't alloc attribute name\n");
+		ret = -ENOMEM;
+		goto out_free_timer;
+	}
+	info->timer->attr.attr.mode = S_IRUGO;
+	info->timer->attr.show = idletimer_tg_show;
+
+	ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
+	if (ret < 0) {
+		pr_debug("couldn't add file to sysfs");
+		goto out_free_attr;
+	}
+
+	list_add(&info->timer->entry, &idletimer_tg_list);
+
+	setup_timer(&info->timer->timer, idletimer_tg_expired,
+		    (unsigned long) info->timer);
+	info->timer->refcnt = 1;
+
+	mod_timer(&info->timer->timer,
+		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
+
+	INIT_WORK(&info->timer->work, idletimer_tg_work);
+
+	return 0;
+
+out_free_attr:
+	kfree(info->timer->attr.attr.name);
+out_free_timer:
+	kfree(info->timer);
+out:
+	return ret;
+}
+
+/*
+ * The actual xt_tables plugin.
+ */
+static unsigned int idletimer_tg_target(struct sk_buff *skb,
+					 const struct xt_action_param *par)
+{
+	const struct idletimer_tg_info *info = par->targinfo;
+
+	pr_debug("resetting timer %s, timeout period %u\n",
+		 info->label, info->timeout);
+
+	BUG_ON(!info->timer);
+
+	mod_timer(&info->timer->timer,
+		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
+
+	return XT_CONTINUE;
+}
+
+static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
+{
+	struct idletimer_tg_info *info = par->targinfo;
+	int ret;
+
+	pr_debug("checkentry targinfo%s\n", info->label);
+
+	if (info->timeout == 0) {
+		pr_debug("timeout value is zero\n");
+		return -EINVAL;
+	}
+
+	if (info->label[0] == '\0' ||
+	    strnlen(info->label,
+		    MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
+		pr_debug("label is empty or not nul-terminated\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&list_mutex);
+
+	info->timer = __idletimer_tg_find_by_label(info->label);
+	if (info->timer) {
+		info->timer->refcnt++;
+		mod_timer(&info->timer->timer,
+			  msecs_to_jiffies(info->timeout * 1000) + jiffies);
+
+		pr_debug("increased refcnt of timer %s to %u\n",
+			 info->label, info->timer->refcnt);
+	} else {
+		ret = idletimer_tg_create(info);
+		if (ret < 0) {
+			pr_debug("failed to create timer\n");
+			mutex_unlock(&list_mutex);
+			return ret;
+		}
+	}
+
+	mutex_unlock(&list_mutex);
+	return 0;
+}
+
+static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
+{
+	const struct idletimer_tg_info *info = par->targinfo;
+
+	pr_debug("destroy targinfo %s\n", info->label);
+
+	mutex_lock(&list_mutex);
+
+	if (--info->timer->refcnt == 0) {
+		pr_debug("deleting timer %s\n", info->label);
+
+		list_del(&info->timer->entry);
+		del_timer_sync(&info->timer->timer);
+		sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+		kfree(info->timer->attr.attr.name);
+		kfree(info->timer);
+	} else {
+		pr_debug("decreased refcnt of timer %s to %u\n",
+			 info->label, info->timer->refcnt);
+	}
+
+	mutex_unlock(&list_mutex);
+}
+
+static struct xt_target idletimer_tg __read_mostly = {
+	.name		= "IDLETIMER",
+	.family		= NFPROTO_UNSPEC,
+	.target		= idletimer_tg_target,
+	.targetsize     = sizeof(struct idletimer_tg_info),
+	.checkentry	= idletimer_tg_checkentry,
+	.destroy        = idletimer_tg_destroy,
+	.me		= THIS_MODULE,
+};
+
+static struct class *idletimer_tg_class;
+
+static struct device *idletimer_tg_device;
+
+static int __init idletimer_tg_init(void)
+{
+	int err;
+
+	idletimer_tg_class = class_create(THIS_MODULE, "xt_idletimer");
+	err = PTR_ERR(idletimer_tg_class);
+	if (IS_ERR(idletimer_tg_class)) {
+		pr_debug("couldn't register device class\n");
+		goto out;
+	}
+
+	idletimer_tg_device = device_create(idletimer_tg_class, NULL,
+					    MKDEV(0, 0), NULL, "timers");
+	err = PTR_ERR(idletimer_tg_device);
+	if (IS_ERR(idletimer_tg_device)) {
+		pr_debug("couldn't register system device\n");
+		goto out_class;
+	}
+
+	idletimer_tg_kobj = &idletimer_tg_device->kobj;
+
+	err =  xt_register_target(&idletimer_tg);
+	if (err < 0) {
+		pr_debug("couldn't register xt target\n");
+		goto out_dev;
+	}
+
+	return 0;
+out_dev:
+	device_destroy(idletimer_tg_class, MKDEV(0, 0));
+out_class:
+	class_destroy(idletimer_tg_class);
+out:
+	return err;
+}
+
+static void __exit idletimer_tg_exit(void)
+{
+	xt_unregister_target(&idletimer_tg);
+
+	device_destroy(idletimer_tg_class, MKDEV(0, 0));
+	class_destroy(idletimer_tg_class);
+}
+
+module_init(idletimer_tg_init);
+module_exit(idletimer_tg_exit);
+
+MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_DESCRIPTION("Xtables: idle time monitor");
+MODULE_LICENSE("GPL v2");
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
index 512b912..9d78218 100644
--- a/net/netfilter/xt_NOTRACK.c
+++ b/net/netfilter/xt_NOTRACK.c
@@ -23,7 +23,7 @@
 	   If there is a real ct entry correspondig to this packet,
 	   it'll hang aroun till timing out. We don't deal with it
 	   for performance reasons. JK */
-	skb->nfct = &nf_conntrack_untracked.ct_general;
+	skb->nfct = &nf_ct_untracked_get()->ct_general;
 	skb->nfctinfo = IP_CT_NEW;
 	nf_conntrack_get(skb->nfct);
 
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 69c01e1..de079abd 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -60,13 +60,22 @@
 }
 EXPORT_SYMBOL_GPL(xt_rateest_lookup);
 
+static void xt_rateest_free_rcu(struct rcu_head *head)
+{
+	kfree(container_of(head, struct xt_rateest, rcu));
+}
+
 void xt_rateest_put(struct xt_rateest *est)
 {
 	mutex_lock(&xt_rateest_mutex);
 	if (--est->refcnt == 0) {
 		hlist_del(&est->list);
 		gen_kill_estimator(&est->bstats, &est->rstats);
-		kfree(est);
+		/*
+		 * gen_estimator est_timer() might access est->lock or bstats,
+		 * wait a RCU grace period before freeing 'est'
+		 */
+		call_rcu(&est->rcu, xt_rateest_free_rcu);
 	}
 	mutex_unlock(&xt_rateest_mutex);
 }
@@ -179,6 +188,7 @@
 static void __exit xt_rateest_tg_fini(void)
 {
 	xt_unregister_target(&xt_rateest_tg_reg);
+	rcu_barrier(); /* Wait for completion of call_rcu()'s (xt_rateest_free_rcu) */
 }
 
 
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 62ec021..eb81c38 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -165,8 +165,8 @@
 	rcu_read_unlock();
 
 	if (rt != NULL) {
-		mtu = dst_mtu(&rt->u.dst);
-		dst_release(&rt->u.dst);
+		mtu = dst_mtu(&rt->dst);
+		dst_release(&rt->dst);
 	}
 	return mtu;
 }
@@ -220,15 +220,13 @@
 }
 #endif
 
-#define TH_SYN 0x02
-
 /* Must specify -p tcp --syn */
 static inline bool find_syn_match(const struct xt_entry_match *m)
 {
 	const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
 
 	if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
-	    tcpinfo->flg_cmp & TH_SYN &&
+	    tcpinfo->flg_cmp & TCPHDR_SYN &&
 	    !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
 		return true;
 
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 859d9fd..22a2d42 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -77,8 +77,8 @@
 		return false;
 
 	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->u.dst);
-	skb->dev      = rt->u.dst.dev;
+	skb_dst_set(skb, &rt->dst);
+	skb->dev      = rt->dst.dev;
 	skb->protocol = htons(ETH_P_IP);
 	return true;
 }
@@ -104,7 +104,7 @@
 #ifdef WITH_CONNTRACK
 	/* Avoid counting cloned packets towards the original connection. */
 	nf_conntrack_put(skb->nfct);
-	skb->nfct     = &nf_conntrack_untracked.ct_general;
+	skb->nfct     = &nf_ct_untracked_get()->ct_general;
 	skb->nfctinfo = IP_CT_NEW;
 	nf_conntrack_get(skb->nfct);
 #endif
@@ -177,7 +177,7 @@
 
 #ifdef WITH_CONNTRACK
 	nf_conntrack_put(skb->nfct);
-	skb->nfct     = &nf_conntrack_untracked.ct_general;
+	skb->nfct     = &nf_ct_untracked_get()->ct_general;
 	skb->nfctinfo = IP_CT_NEW;
 	nf_conntrack_get(skb->nfct);
 #endif
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 30b95a1..f4af1bf 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -120,7 +120,7 @@
 	if (ct == NULL)
 		return false;
 
-	if (ct == &nf_conntrack_untracked)
+	if (nf_ct_is_untracked(ct))
 		return false;
 
 	if (ct->master)
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 7351783..5b13850 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -112,6 +112,16 @@
 	if (ret < 0)
 		pr_info("cannot load conntrack support for proto=%u\n",
 			par->family);
+
+	/*
+	 * This filter cannot function correctly unless connection tracking
+	 * accounting is enabled, so complain in the hope that someone notices.
+	 */
+	if (!nf_ct_acct_enabled(par->net)) {
+		pr_warning("Forcing CT accounting to be enabled\n");
+		nf_ct_set_acct(par->net, true);
+	}
+
 	return ret;
 }
 
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 39681f1..e536710 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -123,11 +123,12 @@
 
 	ct = nf_ct_get(skb, &ctinfo);
 
-	if (ct == &nf_conntrack_untracked)
-		statebit = XT_CONNTRACK_STATE_UNTRACKED;
-	else if (ct != NULL)
-		statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
-	else
+	if (ct) {
+		if (nf_ct_is_untracked(ct))
+			statebit = XT_CONNTRACK_STATE_UNTRACKED;
+		else
+			statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
+	} else
 		statebit = XT_CONNTRACK_STATE_INVALID;
 
 	if (info->match_flags & XT_CONNTRACK_STATE) {
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index c04fcf3..ef36a56 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -3,6 +3,7 @@
 #include <linux/skbuff.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <net/sctp/sctp.h>
 #include <linux/sctp.h>
 
 #include <linux/netfilter/x_tables.h>
@@ -67,7 +68,7 @@
 			 ++i, offset, sch->type, htons(sch->length),
 			 sch->flags);
 #endif
-		offset += (ntohs(sch->length) + 3) & ~3;
+		offset += WORD_ROUND(ntohs(sch->length));
 
 		pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
 
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 3d54c23..1ca8990 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -127,7 +127,7 @@
 	 * reply packet of an established SNAT-ted connection. */
 
 	ct = nf_ct_get(skb, &ctinfo);
-	if (ct && (ct != &nf_conntrack_untracked) &&
+	if (ct && !nf_ct_is_untracked(ct) &&
 	    ((iph->protocol != IPPROTO_ICMP &&
 	      ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) ||
 	     (iph->protocol == IPPROTO_ICMP &&
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
index e12e053..a507922 100644
--- a/net/netfilter/xt_state.c
+++ b/net/netfilter/xt_state.c
@@ -26,14 +26,16 @@
 	const struct xt_state_info *sinfo = par->matchinfo;
 	enum ip_conntrack_info ctinfo;
 	unsigned int statebit;
+	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
-	if (nf_ct_is_untracked(skb))
-		statebit = XT_STATE_UNTRACKED;
-	else if (!nf_ct_get(skb, &ctinfo))
+	if (!ct)
 		statebit = XT_STATE_INVALID;
-	else
-		statebit = XT_STATE_BIT(ctinfo);
-
+	else {
+		if (nf_ct_is_untracked(ct))
+			statebit = XT_STATE_UNTRACKED;
+		else
+			statebit = XT_STATE_BIT(ctinfo);
+	}
 	return (sinfo->statemask & statebit);
 }
 
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 96e62b8..42ecb71 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -18,8 +18,8 @@
 #include <linux/netfilter/x_tables.h>
 
 struct xt_statistic_priv {
-	uint32_t count;
-};
+	atomic_t count;
+} ____cacheline_aligned_in_smp;
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -27,13 +27,12 @@
 MODULE_ALIAS("ipt_statistic");
 MODULE_ALIAS("ip6t_statistic");
 
-static DEFINE_SPINLOCK(nth_lock);
-
 static bool
 statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_statistic_info *info = par->matchinfo;
 	bool ret = info->flags & XT_STATISTIC_INVERT;
+	int nval, oval;
 
 	switch (info->mode) {
 	case XT_STATISTIC_MODE_RANDOM:
@@ -41,12 +40,12 @@
 			ret = !ret;
 		break;
 	case XT_STATISTIC_MODE_NTH:
-		spin_lock_bh(&nth_lock);
-		if (info->master->count++ == info->u.nth.every) {
-			info->master->count = 0;
+		do {
+			oval = atomic_read(&info->master->count);
+			nval = (oval == info->u.nth.every) ? 0 : oval + 1;
+		} while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
+		if (nval == 0)
 			ret = !ret;
-		}
-		spin_unlock_bh(&nth_lock);
 		break;
 	}
 
@@ -64,7 +63,7 @@
 	info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
 	if (info->master == NULL)
 		return -ENOMEM;
-	info->master->count = info->u.nth.count;
+	atomic_set(&info->master->count, info->u.nth.count);
 
 	return 0;
 }
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a2eb965..7aeaa83 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1323,19 +1323,23 @@
 	if (msg->msg_flags&MSG_OOB)
 		return -EOPNOTSUPP;
 
-	if (NULL == siocb->scm)
+	if (NULL == siocb->scm) {
 		siocb->scm = &scm;
+		memset(&scm, 0, sizeof(scm));
+	}
 	err = scm_send(sock, msg, siocb->scm);
 	if (err < 0)
 		return err;
 
 	if (msg->msg_namelen) {
+		err = -EINVAL;
 		if (addr->nl_family != AF_NETLINK)
-			return -EINVAL;
+			goto out;
 		dst_pid = addr->nl_pid;
 		dst_group = ffs(addr->nl_groups);
+		err =  -EPERM;
 		if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
-			return -EPERM;
+			goto out;
 	} else {
 		dst_pid = nlk->dst_pid;
 		dst_group = nlk->dst_group;
@@ -1387,6 +1391,7 @@
 	err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
 
 out:
+	scm_destroy(siocb->scm);
 	return err;
 }
 
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2078a27..9a17f28 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -83,6 +83,7 @@
 #include <linux/if_vlan.h>
 #include <linux/virtio_net.h>
 #include <linux/errqueue.h>
+#include <linux/net_tstamp.h>
 
 #ifdef CONFIG_INET
 #include <net/inet_common.h>
@@ -202,6 +203,7 @@
 	unsigned int		tp_hdrlen;
 	unsigned int		tp_reserve;
 	unsigned int		tp_loss:1;
+	unsigned int		tp_tstamp;
 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 };
 
@@ -656,6 +658,7 @@
 	struct sk_buff *copy_skb = NULL;
 	struct timeval tv;
 	struct timespec ts;
+	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 
 	if (skb->pkt_type == PACKET_LOOPBACK)
 		goto drop;
@@ -737,7 +740,13 @@
 		h.h1->tp_snaplen = snaplen;
 		h.h1->tp_mac = macoff;
 		h.h1->tp_net = netoff;
-		if (skb->tstamp.tv64)
+		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
+				&& shhwtstamps->syststamp.tv64)
+			tv = ktime_to_timeval(shhwtstamps->syststamp);
+		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
+				&& shhwtstamps->hwtstamp.tv64)
+			tv = ktime_to_timeval(shhwtstamps->hwtstamp);
+		else if (skb->tstamp.tv64)
 			tv = ktime_to_timeval(skb->tstamp);
 		else
 			do_gettimeofday(&tv);
@@ -750,7 +759,13 @@
 		h.h2->tp_snaplen = snaplen;
 		h.h2->tp_mac = macoff;
 		h.h2->tp_net = netoff;
-		if (skb->tstamp.tv64)
+		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
+				&& shhwtstamps->syststamp.tv64)
+			ts = ktime_to_timespec(shhwtstamps->syststamp);
+		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
+				&& shhwtstamps->hwtstamp.tv64)
+			ts = ktime_to_timespec(shhwtstamps->hwtstamp);
+		else if (skb->tstamp.tv64)
 			ts = ktime_to_timespec(skb->tstamp);
 		else
 			getnstimeofday(&ts);
@@ -2027,6 +2042,18 @@
 		po->has_vnet_hdr = !!val;
 		return 0;
 	}
+	case PACKET_TIMESTAMP:
+	{
+		int val;
+
+		if (optlen != sizeof(val))
+			return -EINVAL;
+		if (copy_from_user(&val, optval, sizeof(val)))
+			return -EFAULT;
+
+		po->tp_tstamp = val;
+		return 0;
+	}
 	default:
 		return -ENOPROTOOPT;
 	}
@@ -2119,6 +2146,12 @@
 		val = po->tp_loss;
 		data = &val;
 		break;
+	case PACKET_TIMESTAMP:
+		if (len > sizeof(int))
+			len = sizeof(int);
+		val = po->tp_tstamp;
+		data = &val;
+		break;
 	default:
 		return -ENOPROTOOPT;
 	}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index c33da65..b18e48f 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -162,6 +162,14 @@
 	return err;
 }
 
+static void phonet_device_rcu_free(struct rcu_head *head)
+{
+	struct phonet_device *pnd;
+
+	pnd = container_of(head, struct phonet_device, rcu);
+	kfree(pnd);
+}
+
 int phonet_address_del(struct net_device *dev, u8 addr)
 {
 	struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
@@ -179,10 +187,9 @@
 		pnd = NULL;
 	mutex_unlock(&pndevs->lock);
 
-	if (pnd) {
-		synchronize_rcu();
-		kfree(pnd);
-	}
+	if (pnd)
+		call_rcu(&pnd->rcu, phonet_device_rcu_free);
+
 	return err;
 }
 
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index f0f85b0..9f1729b 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -64,8 +64,8 @@
 		return;
 	}
 
-	peer->if_mtu = dst_mtu(&rt->u.dst);
-	dst_release(&rt->u.dst);
+	peer->if_mtu = dst_mtu(&rt->dst);
+	dst_release(&rt->dst);
 
 	_leave(" [if_mtu %u]", peer->if_mtu);
 }
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 972378f..23b25f8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -26,6 +26,11 @@
 #include <net/act_api.h>
 #include <net/netlink.h>
 
+static void tcf_common_free_rcu(struct rcu_head *head)
+{
+	kfree(container_of(head, struct tcf_common, tcfc_rcu));
+}
+
 void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
 {
 	unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
@@ -38,7 +43,11 @@
 			write_unlock_bh(hinfo->lock);
 			gen_kill_estimator(&p->tcfc_bstats,
 					   &p->tcfc_rate_est);
-			kfree(p);
+			/*
+			 * gen_estimator est_timer() might access p->tcfc_lock
+			 * or bstats, wait a RCU grace period before freeing p
+			 */
+			call_rcu(&p->tcfc_rcu, tcf_common_free_rcu);
 			return;
 		}
 	}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index c0b6863..a16b017 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -160,6 +160,8 @@
 
 	spin_lock(&m->tcf_lock);
 	m->tcf_tm.lastuse = jiffies;
+	m->tcf_bstats.bytes += qdisc_pkt_len(skb);
+	m->tcf_bstats.packets++;
 
 	dev = m->tcfm_dev;
 	if (!(dev->flags & IFF_UP)) {
@@ -169,13 +171,11 @@
 		goto out;
 	}
 
-	skb2 = skb_act_clone(skb, GFP_ATOMIC);
+	at = G_TC_AT(skb->tc_verd);
+	skb2 = skb_act_clone(skb, GFP_ATOMIC, m->tcf_action);
 	if (skb2 == NULL)
 		goto out;
 
-	m->tcf_bstats.bytes += qdisc_pkt_len(skb2);
-	m->tcf_bstats.packets++;
-	at = G_TC_AT(skb->tc_verd);
 	if (!(at & AT_EGRESS)) {
 		if (m->tcfm_ok_push)
 			skb_push(skb2, skb2->dev->hard_header_len);
@@ -185,16 +185,14 @@
 	if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
 		skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
 
-	skb2->dev = dev;
 	skb2->skb_iif = skb->dev->ifindex;
+	skb2->dev = dev;
 	dev_queue_xmit(skb2);
 	err = 0;
 
 out:
 	if (err) {
 		m->tcf_qstats.overlimits++;
-		m->tcf_bstats.bytes += qdisc_pkt_len(skb);
-		m->tcf_bstats.packets++;
 		/* should we be asking for packet to be dropped?
 		 * may make sense for redirect case only
 		 */
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 5709494..0be49a4 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -265,40 +265,29 @@
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_nat *p = a->priv;
-	struct tc_nat *opt;
+	struct tc_nat opt;
 	struct tcf_t t;
-	int s;
 
-	s = sizeof(*opt);
+	opt.old_addr = p->old_addr;
+	opt.new_addr = p->new_addr;
+	opt.mask = p->mask;
+	opt.flags = p->flags;
 
-	/* netlink spinlocks held above us - must use ATOMIC */
-	opt = kzalloc(s, GFP_ATOMIC);
-	if (unlikely(!opt))
-		return -ENOBUFS;
+	opt.index = p->tcf_index;
+	opt.action = p->tcf_action;
+	opt.refcnt = p->tcf_refcnt - ref;
+	opt.bindcnt = p->tcf_bindcnt - bind;
 
-	opt->old_addr = p->old_addr;
-	opt->new_addr = p->new_addr;
-	opt->mask = p->mask;
-	opt->flags = p->flags;
-
-	opt->index = p->tcf_index;
-	opt->action = p->tcf_action;
-	opt->refcnt = p->tcf_refcnt - ref;
-	opt->bindcnt = p->tcf_bindcnt - bind;
-
-	NLA_PUT(skb, TCA_NAT_PARMS, s, opt);
+	NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
 	NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
 
-	kfree(opt);
-
 	return skb->len;
 
 nla_put_failure:
 	nlmsg_trim(skb, b);
-	kfree(opt);
 	return -1;
 }
 
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 50e3d94..a0593c9 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -127,8 +127,7 @@
 	int i, munged = 0;
 	unsigned int off;
 
-	if (!(skb->tc_verd & TC_OK2MUNGE)) {
-		/* should we set skb->cloned? */
+	if (skb_cloned(skb)) {
 		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
 			return p->tcf_action;
 		}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 654f73d..537a487 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -97,6 +97,11 @@
 	goto done;
 }
 
+static void tcf_police_free_rcu(struct rcu_head *head)
+{
+	kfree(container_of(head, struct tcf_police, tcf_rcu));
+}
+
 static void tcf_police_destroy(struct tcf_police *p)
 {
 	unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
@@ -113,7 +118,11 @@
 				qdisc_put_rtab(p->tcfp_R_tab);
 			if (p->tcfp_P_tab)
 				qdisc_put_rtab(p->tcfp_P_tab);
-			kfree(p);
+			/*
+			 * gen_estimator est_timer() might access p->tcf_lock
+			 * or bstats, wait a RCU grace period before freeing p
+			 */
+			call_rcu(&p->tcf_rcu, tcf_police_free_rcu);
 			return;
 		}
 	}
@@ -397,6 +406,7 @@
 police_cleanup_module(void)
 {
 	tcf_unregister_action(&act_police_ops);
+	rcu_barrier(); /* Wait for completion of call_rcu()'s (tcf_police_free_rcu) */
 }
 
 module_init(police_init_module);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index a63029e..d20fcd2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -205,7 +205,7 @@
 		}
 	}
 
-	clear_bit(__QDISC_STATE_RUNNING, &q->state);
+	qdisc_run_end(q);
 }
 
 unsigned long dev_trans_start(struct net_device *dev)
@@ -327,6 +327,24 @@
 }
 EXPORT_SYMBOL(netif_carrier_off);
 
+/**
+ * 	netif_notify_peers - notify network peers about existence of @dev
+ * 	@dev: network device
+ *
+ * Generate traffic such that interested network peers are aware of
+ * @dev, such as by generating a gratuitous ARP. This may be used when
+ * a device wants to inform the rest of the network about some sort of
+ * reconfiguration such as a failover event or virtual machine
+ * migration.
+ */
+void netif_notify_peers(struct net_device *dev)
+{
+	rtnl_lock();
+	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+	rtnl_unlock();
+}
+EXPORT_SYMBOL(netif_notify_peers);
+
 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
    under all circumstances. It is difficult to invent anything faster or
    cheaper.
@@ -543,6 +561,7 @@
 
 	INIT_LIST_HEAD(&sch->list);
 	skb_queue_head_init(&sch->q);
+	spin_lock_init(&sch->busylock);
 	sch->ops = ops;
 	sch->enqueue = ops->enqueue;
 	sch->dequeue = ops->dequeue;
@@ -779,7 +798,7 @@
 
 		spin_lock_bh(root_lock);
 
-		val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
+		val = (qdisc_is_running(q) ||
 		       test_bit(__QDISC_STATE_SCHED, &q->state));
 
 		spin_unlock_bh(root_lock);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0b52b8d..4be8d04 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1550,7 +1550,6 @@
 };
 
 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
-	.next		=	NULL,
 	.cl_ops		=	&htb_class_ops,
 	.id		=	"htb",
 	.priv_size	=	sizeof(struct htb_sched),
@@ -1561,7 +1560,6 @@
 	.init		=	htb_init,
 	.reset		=	htb_reset,
 	.destroy	=	htb_destroy,
-	.change		=	NULL /* htb_change */,
 	.dump		=	htb_dump,
 	.owner		=	THIS_MODULE,
 };
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1827498..c0e162a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -490,7 +490,7 @@
 			  __func__, &fl.fl4_dst, &fl.fl4_src);
 
 	if (!ip_route_output_key(&init_net, &rt, &fl)) {
-		dst = &rt->u.dst;
+		dst = &rt->dst;
 	}
 
 	/* If there is no association or if a source address is passed, no
@@ -534,7 +534,7 @@
 			fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
 			fl.fl_ip_sport = laddr->a.v4.sin_port;
 			if (!ip_route_output_key(&init_net, &rt, &fl)) {
-				dst = &rt->u.dst;
+				dst = &rt->dst;
 				goto out_unlock;
 			}
 		}
@@ -1002,7 +1002,8 @@
 static inline int init_sctp_mibs(void)
 {
 	return snmp_mib_init((void __percpu **)sctp_statistics,
-			     sizeof(struct sctp_mib));
+			     sizeof(struct sctp_mib),
+			     __alignof__(struct sctp_mib));
 }
 
 static inline void cleanup_sctp_mibs(void)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index bd2a50b..246f929 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1817,7 +1817,7 @@
 struct __sctp_missing {
 	__be32 num_missing;
 	__be16 type;
-}  __attribute__((packed));
+}  __packed;
 
 /*
  * Report a missing mandatory parameter.
diff --git a/net/socket.c b/net/socket.c
index 367d547..acfa173 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -124,7 +124,7 @@
 static ssize_t sock_sendpage(struct file *file, struct page *page,
 			     int offset, size_t size, loff_t *ppos, int more);
 static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
-			        struct pipe_inode_info *pipe, size_t len,
+				struct pipe_inode_info *pipe, size_t len,
 				unsigned int flags);
 
 /*
@@ -162,7 +162,7 @@
  *	Statistics counters of the socket lists
  */
 
-static DEFINE_PER_CPU(int, sockets_in_use) = 0;
+static DEFINE_PER_CPU(int, sockets_in_use);
 
 /*
  * Support routines.
@@ -309,9 +309,9 @@
 }
 
 static const struct super_operations sockfs_ops = {
-	.alloc_inode =	sock_alloc_inode,
-	.destroy_inode =sock_destroy_inode,
-	.statfs =	simple_statfs,
+	.alloc_inode	= sock_alloc_inode,
+	.destroy_inode	= sock_destroy_inode,
+	.statfs		= simple_statfs,
 };
 
 static int sockfs_get_sb(struct file_system_type *fs_type,
@@ -411,6 +411,7 @@
 
 	return fd;
 }
+EXPORT_SYMBOL(sock_map_fd);
 
 static struct socket *sock_from_file(struct file *file, int *err)
 {
@@ -422,7 +423,7 @@
 }
 
 /**
- *	sockfd_lookup	- 	Go from a file number to its socket slot
+ *	sockfd_lookup - Go from a file number to its socket slot
  *	@fd: file handle
  *	@err: pointer to an error code return
  *
@@ -450,6 +451,7 @@
 		fput(file);
 	return sock;
 }
+EXPORT_SYMBOL(sockfd_lookup);
 
 static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
 {
@@ -540,6 +542,7 @@
 	}
 	sock->file = NULL;
 }
+EXPORT_SYMBOL(sock_release);
 
 int sock_tx_timestamp(struct msghdr *msg, struct sock *sk,
 		      union skb_shared_tx *shtx)
@@ -586,6 +589,7 @@
 		ret = wait_on_sync_kiocb(&iocb);
 	return ret;
 }
+EXPORT_SYMBOL(sock_sendmsg);
 
 int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
 		   struct kvec *vec, size_t num, size_t size)
@@ -604,6 +608,7 @@
 	set_fs(oldfs);
 	return result;
 }
+EXPORT_SYMBOL(kernel_sendmsg);
 
 static int ktime2ts(ktime_t kt, struct timespec *ts)
 {
@@ -664,7 +669,6 @@
 		put_cmsg(msg, SOL_SOCKET,
 			 SCM_TIMESTAMPING, sizeof(ts), &ts);
 }
-
 EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
 
 inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -720,6 +724,7 @@
 		ret = wait_on_sync_kiocb(&iocb);
 	return ret;
 }
+EXPORT_SYMBOL(sock_recvmsg);
 
 static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
 			      size_t size, int flags)
@@ -752,6 +757,7 @@
 	set_fs(oldfs);
 	return result;
 }
+EXPORT_SYMBOL(kernel_recvmsg);
 
 static void sock_aio_dtor(struct kiocb *iocb)
 {
@@ -774,7 +780,7 @@
 }
 
 static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
-			        struct pipe_inode_info *pipe, size_t len,
+				struct pipe_inode_info *pipe, size_t len,
 				unsigned int flags)
 {
 	struct socket *sock = file->private_data;
@@ -887,7 +893,7 @@
  */
 
 static DEFINE_MUTEX(br_ioctl_mutex);
-static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL;
+static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg);
 
 void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *))
 {
@@ -895,7 +901,6 @@
 	br_ioctl_hook = hook;
 	mutex_unlock(&br_ioctl_mutex);
 }
-
 EXPORT_SYMBOL(brioctl_set);
 
 static DEFINE_MUTEX(vlan_ioctl_mutex);
@@ -907,7 +912,6 @@
 	vlan_ioctl_hook = hook;
 	mutex_unlock(&vlan_ioctl_mutex);
 }
-
 EXPORT_SYMBOL(vlan_ioctl_set);
 
 static DEFINE_MUTEX(dlci_ioctl_mutex);
@@ -919,7 +923,6 @@
 	dlci_ioctl_hook = hook;
 	mutex_unlock(&dlci_ioctl_mutex);
 }
-
 EXPORT_SYMBOL(dlci_ioctl_set);
 
 static long sock_do_ioctl(struct net *net, struct socket *sock,
@@ -1047,6 +1050,7 @@
 	sock = NULL;
 	goto out;
 }
+EXPORT_SYMBOL(sock_create_lite);
 
 /* No kernel lock held - perfect */
 static unsigned int sock_poll(struct file *file, poll_table *wait)
@@ -1147,6 +1151,7 @@
 	rcu_read_unlock();
 	return 0;
 }
+EXPORT_SYMBOL(sock_wake_async);
 
 static int __sock_create(struct net *net, int family, int type, int protocol,
 			 struct socket **res, int kern)
@@ -1265,11 +1270,13 @@
 {
 	return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
 }
+EXPORT_SYMBOL(sock_create);
 
 int sock_create_kern(int family, int type, int protocol, struct socket **res)
 {
 	return __sock_create(&init_net, family, type, protocol, res, 1);
 }
+EXPORT_SYMBOL(sock_create_kern);
 
 SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
 {
@@ -1474,7 +1481,8 @@
 		goto out;
 
 	err = -ENFILE;
-	if (!(newsock = sock_alloc()))
+	newsock = sock_alloc();
+	if (!newsock)
 		goto out_put;
 
 	newsock->type = sock->type;
@@ -1861,8 +1869,7 @@
 	if (MSG_CMSG_COMPAT & flags) {
 		if (get_compat_msghdr(&msg_sys, msg_compat))
 			return -EFAULT;
-	}
-	else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
+	} else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
 		return -EFAULT;
 
 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
@@ -1964,8 +1971,7 @@
 	if (MSG_CMSG_COMPAT & flags) {
 		if (get_compat_msghdr(msg_sys, msg_compat))
 			return -EFAULT;
-	}
-	else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
+	} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
 		return -EFAULT;
 
 	err = -EMSGSIZE;
@@ -2191,10 +2197,10 @@
 /* Argument list sizes for sys_socketcall */
 #define AL(x) ((x) * sizeof(unsigned long))
 static const unsigned char nargs[20] = {
-	AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
-	AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
-	AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
-	AL(4),AL(5)
+	AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
+	AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
+	AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
+	AL(4), AL(5)
 };
 
 #undef AL
@@ -2340,6 +2346,7 @@
 	printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
 	return err;
 }
+EXPORT_SYMBOL(sock_register);
 
 /**
  *	sock_unregister - remove a protocol handler
@@ -2366,6 +2373,7 @@
 
 	printk(KERN_INFO "NET: Unregistered protocol family %d\n", family);
 }
+EXPORT_SYMBOL(sock_unregister);
 
 static int __init sock_init(void)
 {
@@ -2490,13 +2498,13 @@
 		ifc.ifc_req = NULL;
 		uifc = compat_alloc_user_space(sizeof(struct ifconf));
 	} else {
-		size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) *
-			sizeof (struct ifreq);
+		size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) *
+			sizeof(struct ifreq);
 		uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
 		ifc.ifc_len = len;
 		ifr = ifc.ifc_req = (void __user *)(uifc + 1);
 		ifr32 = compat_ptr(ifc32.ifcbuf);
-		for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) {
+		for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) {
 			if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq)))
 				return -EFAULT;
 			ifr++;
@@ -2516,9 +2524,9 @@
 	ifr = ifc.ifc_req;
 	ifr32 = compat_ptr(ifc32.ifcbuf);
 	for (i = 0, j = 0;
-             i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
-	     i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) {
-		if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq)))
+	     i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
+	     i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) {
+		if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq)))
 			return -EFAULT;
 		ifr32++;
 		ifr++;
@@ -2567,7 +2575,7 @@
 	compat_uptr_t uptr32;
 	struct ifreq __user *uifr;
 
-	uifr = compat_alloc_user_space(sizeof (*uifr));
+	uifr = compat_alloc_user_space(sizeof(*uifr));
 	if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
 		return -EFAULT;
 
@@ -2601,9 +2609,9 @@
 			return -EFAULT;
 
 		old_fs = get_fs();
-		set_fs (KERNEL_DS);
+		set_fs(KERNEL_DS);
 		err = dev_ioctl(net, cmd, &kifr);
-		set_fs (old_fs);
+		set_fs(old_fs);
 
 		return err;
 	case SIOCBONDSLAVEINFOQUERY:
@@ -2710,9 +2718,9 @@
 		return -EFAULT;
 
 	old_fs = get_fs();
-	set_fs (KERNEL_DS);
+	set_fs(KERNEL_DS);
 	err = dev_ioctl(net, cmd, (void __user *)&ifr);
-	set_fs (old_fs);
+	set_fs(old_fs);
 
 	if (cmd == SIOCGIFMAP && !err) {
 		err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
@@ -2734,7 +2742,7 @@
 	compat_uptr_t uptr32;
 	struct ifreq __user *uifr;
 
-	uifr = compat_alloc_user_space(sizeof (*uifr));
+	uifr = compat_alloc_user_space(sizeof(*uifr));
 	if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
 		return -EFAULT;
 
@@ -2750,20 +2758,20 @@
 }
 
 struct rtentry32 {
-	u32   		rt_pad1;
+	u32		rt_pad1;
 	struct sockaddr rt_dst;         /* target address               */
 	struct sockaddr rt_gateway;     /* gateway addr (RTF_GATEWAY)   */
 	struct sockaddr rt_genmask;     /* target network mask (IP)     */
-	unsigned short  rt_flags;
-	short           rt_pad2;
-	u32   		rt_pad3;
-	unsigned char   rt_tos;
-	unsigned char   rt_class;
-	short           rt_pad4;
-	short           rt_metric;      /* +1 for binary compatibility! */
+	unsigned short	rt_flags;
+	short		rt_pad2;
+	u32		rt_pad3;
+	unsigned char	rt_tos;
+	unsigned char	rt_class;
+	short		rt_pad4;
+	short		rt_metric;      /* +1 for binary compatibility! */
 	/* char * */ u32 rt_dev;        /* forcing the device at add    */
-	u32   		rt_mtu;         /* per route MTU/Window         */
-	u32   		rt_window;      /* Window clamping              */
+	u32		rt_mtu;         /* per route MTU/Window         */
+	u32		rt_window;      /* Window clamping              */
 	unsigned short  rt_irtt;        /* Initial RTT                  */
 };
 
@@ -2793,29 +2801,29 @@
 
 	if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */
 		struct in6_rtmsg32 __user *ur6 = argp;
-		ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst),
+		ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst),
 			3 * sizeof(struct in6_addr));
-		ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type));
-		ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
-		ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
-		ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric));
-		ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info));
-		ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags));
-		ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
+		ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type));
+		ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
+		ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
+		ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric));
+		ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info));
+		ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags));
+		ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
 
 		r = (void *) &r6;
 	} else { /* ipv4 */
 		struct rtentry32 __user *ur4 = argp;
-		ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst),
+		ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst),
 					3 * sizeof(struct sockaddr));
-		ret |= __get_user (r4.rt_flags, &(ur4->rt_flags));
-		ret |= __get_user (r4.rt_metric, &(ur4->rt_metric));
-		ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu));
-		ret |= __get_user (r4.rt_window, &(ur4->rt_window));
-		ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt));
-		ret |= __get_user (rtdev, &(ur4->rt_dev));
+		ret |= __get_user(r4.rt_flags, &(ur4->rt_flags));
+		ret |= __get_user(r4.rt_metric, &(ur4->rt_metric));
+		ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu));
+		ret |= __get_user(r4.rt_window, &(ur4->rt_window));
+		ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt));
+		ret |= __get_user(rtdev, &(ur4->rt_dev));
 		if (rtdev) {
-			ret |= copy_from_user (devname, compat_ptr(rtdev), 15);
+			ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
 			r4.rt_dev = devname; devname[15] = 0;
 		} else
 			r4.rt_dev = NULL;
@@ -2828,9 +2836,9 @@
 		goto out;
 	}
 
-	set_fs (KERNEL_DS);
+	set_fs(KERNEL_DS);
 	ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
-	set_fs (old_fs);
+	set_fs(old_fs);
 
 out:
 	return ret;
@@ -2993,11 +3001,13 @@
 {
 	return sock->ops->bind(sock, addr, addrlen);
 }
+EXPORT_SYMBOL(kernel_bind);
 
 int kernel_listen(struct socket *sock, int backlog)
 {
 	return sock->ops->listen(sock, backlog);
 }
+EXPORT_SYMBOL(kernel_listen);
 
 int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
 {
@@ -3022,24 +3032,28 @@
 done:
 	return err;
 }
+EXPORT_SYMBOL(kernel_accept);
 
 int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
 		   int flags)
 {
 	return sock->ops->connect(sock, addr, addrlen, flags);
 }
+EXPORT_SYMBOL(kernel_connect);
 
 int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
 			 int *addrlen)
 {
 	return sock->ops->getname(sock, addr, addrlen, 0);
 }
+EXPORT_SYMBOL(kernel_getsockname);
 
 int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
 			 int *addrlen)
 {
 	return sock->ops->getname(sock, addr, addrlen, 1);
 }
+EXPORT_SYMBOL(kernel_getpeername);
 
 int kernel_getsockopt(struct socket *sock, int level, int optname,
 			char *optval, int *optlen)
@@ -3056,6 +3070,7 @@
 	set_fs(oldfs);
 	return err;
 }
+EXPORT_SYMBOL(kernel_getsockopt);
 
 int kernel_setsockopt(struct socket *sock, int level, int optname,
 			char *optval, unsigned int optlen)
@@ -3072,6 +3087,7 @@
 	set_fs(oldfs);
 	return err;
 }
+EXPORT_SYMBOL(kernel_setsockopt);
 
 int kernel_sendpage(struct socket *sock, struct page *page, int offset,
 		    size_t size, int flags)
@@ -3083,6 +3099,7 @@
 
 	return sock_no_sendpage(sock, page, offset, size, flags);
 }
+EXPORT_SYMBOL(kernel_sendpage);
 
 int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
 {
@@ -3095,33 +3112,10 @@
 
 	return err;
 }
+EXPORT_SYMBOL(kernel_sock_ioctl);
 
 int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
 {
 	return sock->ops->shutdown(sock, how);
 }
-
-EXPORT_SYMBOL(sock_create);
-EXPORT_SYMBOL(sock_create_kern);
-EXPORT_SYMBOL(sock_create_lite);
-EXPORT_SYMBOL(sock_map_fd);
-EXPORT_SYMBOL(sock_recvmsg);
-EXPORT_SYMBOL(sock_register);
-EXPORT_SYMBOL(sock_release);
-EXPORT_SYMBOL(sock_sendmsg);
-EXPORT_SYMBOL(sock_unregister);
-EXPORT_SYMBOL(sock_wake_async);
-EXPORT_SYMBOL(sockfd_lookup);
-EXPORT_SYMBOL(kernel_sendmsg);
-EXPORT_SYMBOL(kernel_recvmsg);
-EXPORT_SYMBOL(kernel_bind);
-EXPORT_SYMBOL(kernel_listen);
-EXPORT_SYMBOL(kernel_accept);
-EXPORT_SYMBOL(kernel_connect);
-EXPORT_SYMBOL(kernel_getsockname);
-EXPORT_SYMBOL(kernel_getpeername);
-EXPORT_SYMBOL(kernel_getsockopt);
-EXPORT_SYMBOL(kernel_setsockopt);
-EXPORT_SYMBOL(kernel_sendpage);
-EXPORT_SYMBOL(kernel_sock_ioctl);
 EXPORT_SYMBOL(kernel_sock_shutdown);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index fef2cc5..75ba48b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -282,7 +282,7 @@
 	return s;
 }
 
-static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
+static struct sock *unix_find_socket_byinode(struct inode *i)
 {
 	struct sock *s;
 	struct hlist_node *node;
@@ -292,9 +292,6 @@
 		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
 		struct dentry *dentry = unix_sk(s)->dentry;
 
-		if (!net_eq(sock_net(s), net))
-			continue;
-
 		if (dentry && dentry->d_inode == i) {
 			sock_hold(s);
 			goto found;
@@ -450,11 +447,31 @@
 	return 0;
 }
 
+static void init_peercred(struct sock *sk)
+{
+	put_pid(sk->sk_peer_pid);
+	if (sk->sk_peer_cred)
+		put_cred(sk->sk_peer_cred);
+	sk->sk_peer_pid  = get_pid(task_tgid(current));
+	sk->sk_peer_cred = get_current_cred();
+}
+
+static void copy_peercred(struct sock *sk, struct sock *peersk)
+{
+	put_pid(sk->sk_peer_pid);
+	if (sk->sk_peer_cred)
+		put_cred(sk->sk_peer_cred);
+	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
+	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+}
+
 static int unix_listen(struct socket *sock, int backlog)
 {
 	int err;
 	struct sock *sk = sock->sk;
 	struct unix_sock *u = unix_sk(sk);
+	struct pid *old_pid = NULL;
+	const struct cred *old_cred = NULL;
 
 	err = -EOPNOTSUPP;
 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
@@ -470,12 +487,14 @@
 	sk->sk_max_ack_backlog	= backlog;
 	sk->sk_state		= TCP_LISTEN;
 	/* set credentials so connect can copy them */
-	sk->sk_peercred.pid	= task_tgid_vnr(current);
-	current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid);
+	init_peercred(sk);
 	err = 0;
 
 out_unlock:
 	unix_state_unlock(sk);
+	put_pid(old_pid);
+	if (old_cred)
+		put_cred(old_cred);
 out:
 	return err;
 }
@@ -736,7 +755,7 @@
 		err = -ECONNREFUSED;
 		if (!S_ISSOCK(inode->i_mode))
 			goto put_fail;
-		u = unix_find_socket_byinode(net, inode);
+		u = unix_find_socket_byinode(inode);
 		if (!u)
 			goto put_fail;
 
@@ -1140,8 +1159,7 @@
 	unix_peer(newsk)	= sk;
 	newsk->sk_state		= TCP_ESTABLISHED;
 	newsk->sk_type		= sk->sk_type;
-	newsk->sk_peercred.pid	= task_tgid_vnr(current);
-	current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
+	init_peercred(newsk);
 	newu = unix_sk(newsk);
 	newsk->sk_wq		= &newu->peer_wq;
 	otheru = unix_sk(other);
@@ -1157,7 +1175,7 @@
 	}
 
 	/* Set credentials */
-	sk->sk_peercred = other->sk_peercred;
+	copy_peercred(sk, other);
 
 	sock->state	= SS_CONNECTED;
 	sk->sk_state	= TCP_ESTABLISHED;
@@ -1199,10 +1217,8 @@
 	sock_hold(skb);
 	unix_peer(ska) = skb;
 	unix_peer(skb) = ska;
-	ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
-	current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid);
-	ska->sk_peercred.uid = skb->sk_peercred.uid;
-	ska->sk_peercred.gid = skb->sk_peercred.gid;
+	init_peercred(ska);
+	init_peercred(skb);
 
 	if (ska->sk_type != SOCK_DGRAM) {
 		ska->sk_state = TCP_ESTABLISHED;
@@ -1297,18 +1313,20 @@
 	int i;
 
 	scm->fp = UNIXCB(skb).fp;
-	skb->destructor = sock_wfree;
 	UNIXCB(skb).fp = NULL;
 
 	for (i = scm->fp->count-1; i >= 0; i--)
 		unix_notinflight(scm->fp->fp[i]);
 }
 
-static void unix_destruct_fds(struct sk_buff *skb)
+static void unix_destruct_scm(struct sk_buff *skb)
 {
 	struct scm_cookie scm;
 	memset(&scm, 0, sizeof(scm));
-	unix_detach_fds(&scm, skb);
+	scm.pid  = UNIXCB(skb).pid;
+	scm.cred = UNIXCB(skb).cred;
+	if (UNIXCB(skb).fp)
+		unix_detach_fds(&scm, skb);
 
 	/* Alas, it calls VFS */
 	/* So fscking what? fput() had been SMP-safe since the last Summer */
@@ -1331,10 +1349,22 @@
 
 	for (i = scm->fp->count-1; i >= 0; i--)
 		unix_inflight(scm->fp->fp[i]);
-	skb->destructor = unix_destruct_fds;
 	return 0;
 }
 
+static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
+{
+	int err = 0;
+	UNIXCB(skb).pid  = get_pid(scm->pid);
+	UNIXCB(skb).cred = get_cred(scm->cred);
+	UNIXCB(skb).fp = NULL;
+	if (scm->fp && send_fds)
+		err = unix_attach_fds(scm, skb);
+
+	skb->destructor = unix_destruct_scm;
+	return err;
+}
+
 /*
  *	Send AF_UNIX data.
  */
@@ -1391,12 +1421,9 @@
 	if (skb == NULL)
 		goto out;
 
-	memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
-	if (siocb->scm->fp) {
-		err = unix_attach_fds(siocb->scm, skb);
-		if (err)
-			goto out_free;
-	}
+	err = unix_scm_to_skb(siocb->scm, skb, true);
+	if (err)
+		goto out_free;
 	unix_get_secdata(siocb->scm, skb);
 
 	skb_reset_transport_header(skb);
@@ -1566,16 +1593,14 @@
 		 */
 		size = min_t(int, size, skb_tailroom(skb));
 
-		memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
+
 		/* Only send the fds in the first buffer */
-		if (siocb->scm->fp && !fds_sent) {
-			err = unix_attach_fds(siocb->scm, skb);
-			if (err) {
-				kfree_skb(skb);
-				goto out_err;
-			}
-			fds_sent = true;
+		err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
+		if (err) {
+			kfree_skb(skb);
+			goto out_err;
 		}
+		fds_sent = true;
 
 		err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
 		if (err) {
@@ -1692,7 +1717,7 @@
 		siocb->scm = &tmp_scm;
 		memset(&tmp_scm, 0, sizeof(tmp_scm));
 	}
-	siocb->scm->creds = *UNIXCREDS(skb);
+	scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
 	unix_set_secdata(siocb->scm, skb);
 
 	if (!(flags & MSG_PEEK)) {
@@ -1841,14 +1866,14 @@
 
 		if (check_creds) {
 			/* Never glue messages from different writers */
-			if (memcmp(UNIXCREDS(skb), &siocb->scm->creds,
-				   sizeof(siocb->scm->creds)) != 0) {
+			if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
+			    (UNIXCB(skb).cred != siocb->scm->cred)) {
 				skb_queue_head(&sk->sk_receive_queue, skb);
 				break;
 			}
 		} else {
 			/* Copy credentials */
-			siocb->scm->creds = *UNIXCREDS(skb);
+			scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
 			check_creds = 1;
 		}
 
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index b01a6f6..d0c92dd 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -35,8 +35,9 @@
 		if (!ht_cap->ht_supported)
 			return NULL;
 
-		if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
-		    ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
+		if (channel_type != NL80211_CHAN_HT20 &&
+		    (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
+		    ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT))
 			return NULL;
 	}
 
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 37d0e0a..47fcfd0 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -894,7 +894,7 @@
 }
 subsys_initcall(cfg80211_init);
 
-static void cfg80211_exit(void)
+static void __exit cfg80211_exit(void)
 {
 	debugfs_remove(ieee80211_debugfs_dir);
 	nl80211_exit();
diff --git a/net/wireless/core.h b/net/wireless/core.h
index ae930ac..63d57ae 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -339,6 +339,7 @@
 			 struct net_device *dev,
 			 struct ieee80211_channel *chan,
 			 enum nl80211_channel_type channel_type,
+			 bool channel_type_valid,
 			 const u8 *buf, size_t len, u64 *cookie);
 
 /* SME */
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 48ead6f..9f95354 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -827,6 +827,7 @@
 			 struct net_device *dev,
 			 struct ieee80211_channel *chan,
 			 enum nl80211_channel_type channel_type,
+			 bool channel_type_valid,
 			 const u8 *buf, size_t len, u64 *cookie)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -845,8 +846,9 @@
 		if (!wdev->current_bss ||
 		    memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
 			   ETH_ALEN) != 0 ||
-		    memcmp(wdev->current_bss->pub.bssid, mgmt->da,
-			   ETH_ALEN) != 0)
+		    (wdev->iftype == NL80211_IFTYPE_STATION &&
+		     memcmp(wdev->current_bss->pub.bssid, mgmt->da,
+			    ETH_ALEN) != 0))
 			return -ENOTCONN;
 	}
 
@@ -855,7 +857,7 @@
 
 	/* Transmit the Action frame as requested by user space */
 	return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type,
-				 buf, len, cookie);
+				 channel_type_valid, buf, len, cookie);
 }
 
 bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index db71150..85285b4 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -153,6 +153,9 @@
 	[NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
 	[NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
 	[NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 },
+
+	[NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 },
+	[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 },
 };
 
 /* policy for the attributes */
@@ -869,6 +872,34 @@
 			goto bad_res;
 	}
 
+	if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) {
+		enum nl80211_tx_power_setting type;
+		int idx, mbm = 0;
+
+		if (!rdev->ops->set_tx_power) {
+			return -EOPNOTSUPP;
+			goto bad_res;
+		}
+
+		idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING;
+		type = nla_get_u32(info->attrs[idx]);
+
+		if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] &&
+		    (type != NL80211_TX_POWER_AUTOMATIC)) {
+			result = -EINVAL;
+			goto bad_res;
+		}
+
+		if (type != NL80211_TX_POWER_AUTOMATIC) {
+			idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL;
+			mbm = nla_get_u32(info->attrs[idx]);
+		}
+
+		result = rdev->ops->set_tx_power(&rdev->wiphy, type, mbm);
+		if (result)
+			goto bad_res;
+	}
+
 	changed = 0;
 
 	if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) {
@@ -1107,7 +1138,7 @@
 			       enum nl80211_iftype iftype)
 {
 	if (!use_4addr) {
-		if (netdev && netdev->br_port)
+		if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT))
 			return -EBUSY;
 		return 0;
 	}
@@ -3955,6 +3986,55 @@
 		}
 	}
 
+	if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) {
+		u8 *rates =
+			nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+		int n_rates =
+			nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+		struct ieee80211_supported_band *sband =
+			wiphy->bands[ibss.channel->band];
+		int i, j;
+
+		if (n_rates == 0) {
+			err = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0; i < n_rates; i++) {
+			int rate = (rates[i] & 0x7f) * 5;
+			bool found = false;
+
+			for (j = 0; j < sband->n_bitrates; j++) {
+				if (sband->bitrates[j].bitrate == rate) {
+					found = true;
+					ibss.basic_rates |= BIT(j);
+					break;
+				}
+			}
+			if (!found) {
+				err = -EINVAL;
+				goto out;
+			}
+		}
+	} else {
+		/*
+		* If no rates were explicitly configured,
+		* use the mandatory rate set for 11b or
+		* 11a for maximum compatibility.
+		*/
+		struct ieee80211_supported_band *sband =
+			wiphy->bands[ibss.channel->band];
+		int j;
+		u32 flag = ibss.channel->band == IEEE80211_BAND_5GHZ ?
+			IEEE80211_RATE_MANDATORY_A :
+			IEEE80211_RATE_MANDATORY_B;
+
+		for (j = 0; j < sband->n_bitrates; j++) {
+			if (sband->bitrates[j].flags & flag)
+				ibss.basic_rates |= BIT(j);
+		}
+	}
+
 	err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
 
 out:
@@ -4653,7 +4733,8 @@
 	if (err)
 		goto unlock_rtnl;
 
-	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
 		err = -EOPNOTSUPP;
 		goto out;
 	}
@@ -4681,6 +4762,7 @@
 	struct net_device *dev;
 	struct ieee80211_channel *chan;
 	enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+	bool channel_type_valid = false;
 	u32 freq;
 	int err;
 	void *hdr;
@@ -4702,7 +4784,8 @@
 		goto out;
 	}
 
-	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
 		err = -EOPNOTSUPP;
 		goto out;
 	}
@@ -4722,6 +4805,7 @@
 			err = -EINVAL;
 			goto out;
 		}
+		channel_type_valid = true;
 	}
 
 	freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -4745,6 +4829,7 @@
 		goto free_msg;
 	}
 	err = cfg80211_mlme_action(rdev, dev, chan, channel_type,
+				   channel_type_valid,
 				   nla_data(info->attrs[NL80211_ATTR_FRAME]),
 				   nla_len(info->attrs[NL80211_ATTR_FRAME]),
 				   &cookie);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 8f0d97d..1ac2bdd 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -80,7 +80,7 @@
  *     - country_ie_regdomain
  *     - last_request
  */
-DEFINE_MUTEX(reg_mutex);
+static DEFINE_MUTEX(reg_mutex);
 #define assert_reg_lock() WARN_ON(!mutex_is_locked(&reg_mutex))
 
 /* Used to queue up regulatory hints */
@@ -2630,7 +2630,7 @@
 	mutex_unlock(&reg_mutex);
 }
 
-int regulatory_init(void)
+int __init regulatory_init(void)
 {
 	int err = 0;
 
@@ -2676,7 +2676,7 @@
 	return 0;
 }
 
-void regulatory_exit(void)
+void /* __init_or_exit */ regulatory_exit(void)
 {
 	struct regulatory_request *reg_request, *tmp;
 	struct reg_beacon *reg_beacon, *btmp;
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index b26224a..c4695d0 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -10,7 +10,7 @@
 
 void reg_device_remove(struct wiphy *wiphy);
 
-int regulatory_init(void);
+int __init regulatory_init(void);
 void regulatory_exit(void);
 
 int set_regdom(const struct ieee80211_regdomain *rd);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 3416373..0c8a1e8 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -770,8 +770,8 @@
 		return -EOPNOTSUPP;
 
 	/* if it's part of a bridge, reject changing type to station/ibss */
-	if (dev->br_port && (ntype == NL80211_IFTYPE_ADHOC ||
-			     ntype == NL80211_IFTYPE_STATION))
+	if ((dev->priv_flags & IFF_BRIDGE_PORT) &&
+	    (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION))
 		return -EBUSY;
 
 	if (ntype != otype) {
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 9634299..1ff1e9f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -829,7 +829,7 @@
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
-	enum tx_power_setting type;
+	enum nl80211_tx_power_setting type;
 	int dbm = 0;
 
 	if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
@@ -852,7 +852,7 @@
 			if (data->txpower.value < 0)
 				return -EINVAL;
 			dbm = data->txpower.value;
-			type = TX_POWER_FIXED;
+			type = NL80211_TX_POWER_FIXED;
 			/* TODO: do regulatory check! */
 		} else {
 			/*
@@ -860,10 +860,10 @@
 			 * passed in from userland.
 			 */
 			if (data->txpower.value < 0) {
-				type = TX_POWER_AUTOMATIC;
+				type = NL80211_TX_POWER_AUTOMATIC;
 			} else {
 				dbm = data->txpower.value;
-				type = TX_POWER_LIMITED;
+				type = NL80211_TX_POWER_LIMITED;
 			}
 		}
 	} else {
@@ -872,7 +872,7 @@
 		return 0;
 	}
 
-	return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);
+	return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm));
 }
 EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
 
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index af1c173..037d956 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2481,7 +2481,8 @@
 	int rv;
 
 	if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
-			  sizeof(struct linux_xfrm_mib)) < 0)
+			  sizeof(struct linux_xfrm_mib),
+			  __alignof__(struct linux_xfrm_mib)) < 0)
 		return -ENOMEM;
 	rv = xfrm_proc_init(net);
 	if (rv < 0)