Merge master.kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb
* master.kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb: (33 commits)
V4L/DVB (3965): Fix CONFIG_VIDEO_VIVI=y build bug
V4L/DVB (3964): Bt8xx/bttv-cards.c: fix off-by-one errors
V4L/DVB (3914): Vivi build fix
V4L/DVB (3912): Sparc32 vivi fix
V4L/DVB (3832): Get_dvb_firmware: download nxt2002 firmware from new driver location
V4L/DVB (3829): Fix frequency values in the ranges structures of the LG TDVS H06xF tuners
V4L/DVB (3826): Saa7134: Missing 'break' in Terratec Cinergy 400 TV initialization
V4L/DVB (3825): Remove broken 'fast firmware load' from cx25840.
V4L/DVB (3819): Cxusb-bluebird: bug-fix: power down corrupts frontend
V4L/DVB (3813): Add support for TCL M2523_5N_E tuner.
V4L/DVB (3804): Tweak bandselect setup fox cx24123
V4L/DVB (3803): Various correctness fixes to tuning.
V4L/DVB (3797): Always wait for diseqc queue to become ready before transmitting a diseqc message
V4L/DVB (3796): Add several debug messages to cx24123 code
V4L/DVB (3795): Fix for CX24123 & low symbol rates
V4L/DVB (3792): Kbuild: DVB_BT8XX must select DVB_ZL10353
V4L/DVB (3790): Use after free in drivers/media/video/em28xx/em28xx-video.c
V4L/DVB (3788): Fix compilation with V4L1_COMPAT
V4L/DVB (3782): Removed uneeded stuff from pwc Makefile
V4L/DVB (3775): Add VIVI Kconfig stuff
...
diff --git a/CREDITS b/CREDITS
index 6f50be3..9bf714a 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3241,14 +3241,9 @@
S: Beaverton, Oregon 97005
S: USA
-N: Marcelo W. Tosatti
-E: marcelo.tosatti@cyclades.com
-D: Miscellaneous kernel hacker
+N: Marcelo Tosatti
+E: marcelo@kvack.org
D: v2.4 kernel maintainer
-D: Current pc300/cyclades maintainer
-S: Cyclades Corporation
-S: Av Cristovao Colombo, 462. Floresta.
-S: Porto Alegre
S: Brazil
N: Stefan Traby
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 3c406ac..b369a8c 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -1721,11 +1721,6 @@
These devices support the same API as the generic SCSI
devices.
- 97 block Packet writing for CD/DVD devices
- 0 = /dev/pktcdvd0 First packet-writing module
- 1 = /dev/pktcdvd1 Second packet-writing module
- ...
-
98 char Control and Measurement Device (comedi)
0 = /dev/comedi0 First comedi device
1 = /dev/comedi1 Second comedi device
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 421bcff..43ab119 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -57,6 +57,15 @@
---------------------------
+What: sbp2: module parameter "force_inquiry_hack"
+When: July 2006
+Why: Superceded by parameter "workarounds". Both parameters are meant to be
+ used ad-hoc and for single devices only, i.e. not in modprobe.conf,
+ therefore the impact of this feature replacement should be low.
+Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+---------------------------
+
What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
When: July 2006
Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
diff --git a/Documentation/firmware_class/README b/Documentation/firmware_class/README
index 43e836c..e9cc8bb 100644
--- a/Documentation/firmware_class/README
+++ b/Documentation/firmware_class/README
@@ -105,20 +105,3 @@
on the setup, so I think that the choice on what firmware to make
persistent should be left to userspace.
- - Why register_firmware()+__init can be useful:
- - For boot devices needing firmware.
- - To make the transition easier:
- The firmware can be declared __init and register_firmware()
- called on module_init. Then the firmware is warranted to be
- there even if "firmware hotplug userspace" is not there yet or
- it doesn't yet provide the needed firmware.
- Once the firmware is widely available in userspace, it can be
- removed from the kernel. Or made optional (CONFIG_.*_FIRMWARE).
-
- In either case, if firmware hotplug support is there, it can move the
- firmware out of kernel memory into the real filesystem for later
- usage.
-
- Note: If persistence is implemented on top of initramfs,
- register_firmware() may not be appropriate.
-
diff --git a/Documentation/firmware_class/firmware_sample_driver.c b/Documentation/firmware_class/firmware_sample_driver.c
index ad3edab..87feccd 100644
--- a/Documentation/firmware_class/firmware_sample_driver.c
+++ b/Documentation/firmware_class/firmware_sample_driver.c
@@ -5,8 +5,6 @@
*
* Sample code on how to use request_firmware() from drivers.
*
- * Note that register_firmware() is currently useless.
- *
*/
#include <linux/module.h>
@@ -17,11 +15,6 @@
#include "linux/firmware.h"
-#define WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE
-#ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE
-char __init inkernel_firmware[] = "let's say that this is firmware\n";
-#endif
-
static struct device ghost_device = {
.bus_id = "ghost0",
};
@@ -104,10 +97,6 @@
static int sample_init(void)
{
-#ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE
- register_firmware("sample_driver_fw", inkernel_firmware,
- sizeof(inkernel_firmware));
-#endif
device_initialize(&ghost_device);
/* since there is no real hardware insertion I just call the
* sample probe functions here */
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 92f0056..c61d8b8 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1031,7 +1031,7 @@
LOCKS VS MEMORY ACCESSES
------------------------
-Consider the following: the system has a pair of spinlocks (N) and (Q), and
+Consider the following: the system has a pair of spinlocks (M) and (Q), and
three CPUs; then should the following sequence of events occur:
CPU 1 CPU 2
@@ -1678,7 +1678,7 @@
smp_wmb();
<A:modify v=2> <C:busy>
<C:queue v=2>
- p = &b; q = p;
+ p = &v; q = p;
<D:request p>
<B:modify p=&v> <D:commit p=&v>
<D:read p>
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx
new file mode 100644
index 0000000..9c45f3d
--- /dev/null
+++ b/Documentation/spi/pxa2xx
@@ -0,0 +1,234 @@
+PXA2xx SPI on SSP driver HOWTO
+===================================================
+This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx
+synchronous serial port into a SPI master controller
+(see Documentation/spi/spi_summary). The driver has the following features
+
+- Support for any PXA2xx SSP
+- SSP PIO and SSP DMA data transfers.
+- External and Internal (SSPFRM) chip selects.
+- Per slave device (chip) configuration.
+- Full suspend, freeze, resume support.
+
+The driver is built around a "spi_message" fifo serviced by workqueue and a
+tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet
+(pump_transfer) is responsible for queuing SPI transactions and setting up and
+launching the dma/interrupt driven transfers.
+
+Declaring PXA2xx Master Controllers
+-----------------------------------
+Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
+"platform device". The master configuration is passed to the driver via a table
+found in include/asm-arm/arch-pxa/pxa2xx_spi.h:
+
+struct pxa2xx_spi_master {
+ enum pxa_ssp_type ssp_type;
+ u32 clock_enable;
+ u16 num_chipselect;
+ u8 enable_dma;
+};
+
+The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and
+informs the driver which features a particular SSP supports.
+
+The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the
+corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See
+the "PXA2xx Developer Manual" section "Clocks and Power Management".
+
+The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of
+slave device (chips) attached to this SPI master.
+
+The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should
+be used. This caused the driver to acquire two DMA channels: rx_channel and
+tx_channel. The rx_channel has a higher DMA service priority the tx_channel.
+See the "PXA2xx Developer Manual" section "DMA Controller".
+
+NSSP MASTER SAMPLE
+------------------
+Below is a sample configuration using the PXA255 NSSP.
+
+static struct resource pxa_spi_nssp_resources[] = {
+ [0] = {
+ .start = __PREG(SSCR0_P(2)), /* Start address of NSSP */
+ .end = __PREG(SSCR0_P(2)) + 0x2c, /* Range of registers */
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_NSSP, /* NSSP IRQ */
+ .end = IRQ_NSSP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct pxa2xx_spi_master pxa_nssp_master_info = {
+ .ssp_type = PXA25x_NSSP, /* Type of SSP */
+ .clock_enable = CKEN9_NSSP, /* NSSP Peripheral clock */
+ .num_chipselect = 1, /* Matches the number of chips attached to NSSP */
+ .enable_dma = 1, /* Enables NSSP DMA */
+};
+
+static struct platform_device pxa_spi_nssp = {
+ .name = "pxa2xx-spi", /* MUST BE THIS VALUE, so device match driver */
+ .id = 2, /* Bus number, MUST MATCH SSP number 1..n */
+ .resource = pxa_spi_nssp_resources,
+ .num_resources = ARRAY_SIZE(pxa_spi_nssp_resources),
+ .dev = {
+ .platform_data = &pxa_nssp_master_info, /* Passed to driver */
+ },
+};
+
+static struct platform_device *devices[] __initdata = {
+ &pxa_spi_nssp,
+};
+
+static void __init board_init(void)
+{
+ (void)platform_add_device(devices, ARRAY_SIZE(devices));
+}
+
+Declaring Slave Devices
+-----------------------
+Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c
+using the "spi_board_info" structure found in "linux/spi/spi.h". See
+"Documentation/spi/spi_summary" for additional information.
+
+Each slave device attached to the PXA must provide slave specific configuration
+information via the structure "pxa2xx_spi_chip" found in
+"include/asm-arm/arch-pxa/pxa2xx_spi.h". The pxa2xx_spi master controller driver
+will uses the configuration whenever the driver communicates with the slave
+device.
+
+struct pxa2xx_spi_chip {
+ u8 tx_threshold;
+ u8 rx_threshold;
+ u8 dma_burst_size;
+ u32 timeout_microsecs;
+ u8 enable_loopback;
+ void (*cs_control)(u32 command);
+};
+
+The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are
+used to configure the SSP hardware fifo. These fields are critical to the
+performance of pxa2xx_spi driver and misconfiguration will result in rx
+fifo overruns (especially in PIO mode transfers). Good default values are
+
+ .tx_threshold = 12,
+ .rx_threshold = 4,
+
+The "pxa2xx_spi_chip.dma_burst_size" field is used to configure PXA2xx DMA
+engine and is related the "spi_device.bits_per_word" field. Read and understand
+the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers
+to determine the correct value. An SSP configured for byte-wide transfers would
+use a value of 8.
+
+The "pxa2xx_spi_chip.timeout_microsecs" fields is used to efficiently handle
+trailing bytes in the SSP receiver fifo. The correct value for this field is
+dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
+slave device. Please note the the PXA2xx SSP 1 does not support trailing byte
+timeouts and must busy-wait any trailing bytes.
+
+The "pxa2xx_spi_chip.enable_loopback" field is used to place the SSP porting
+into internal loopback mode. In this mode the SSP controller internally
+connects the SSPTX pin the the SSPRX pin. This is useful for initial setup
+testing.
+
+The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific
+function for asserting/deasserting a slave device chip select. If the field is
+NULL, the pxa2xx_spi master controller driver assumes that the SSP port is
+configured to use SSPFRM instead.
+
+NSSP SALVE SAMPLE
+-----------------
+The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the
+"spi_board_info.controller_data" field. Below is a sample configuration using
+the PXA255 NSSP.
+
+/* Chip Select control for the CS8415A SPI slave device */
+static void cs8415a_cs_control(u32 command)
+{
+ if (command & PXA2XX_CS_ASSERT)
+ GPCR(2) = GPIO_bit(2);
+ else
+ GPSR(2) = GPIO_bit(2);
+}
+
+/* Chip Select control for the CS8405A SPI slave device */
+static void cs8405a_cs_control(u32 command)
+{
+ if (command & PXA2XX_CS_ASSERT)
+ GPCR(3) = GPIO_bit(3);
+ else
+ GPSR(3) = GPIO_bit(3);
+}
+
+static struct pxa2xx_spi_chip cs8415a_chip_info = {
+ .tx_threshold = 12, /* SSP hardward FIFO threshold */
+ .rx_threshold = 4, /* SSP hardward FIFO threshold */
+ .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
+ .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
+ .cs_control = cs8415a_cs_control, /* Use external chip select */
+};
+
+static struct pxa2xx_spi_chip cs8405a_chip_info = {
+ .tx_threshold = 12, /* SSP hardward FIFO threshold */
+ .rx_threshold = 4, /* SSP hardward FIFO threshold */
+ .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
+ .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
+ .cs_control = cs8405a_cs_control, /* Use external chip select */
+};
+
+static struct spi_board_info streetracer_spi_board_info[] __initdata = {
+ {
+ .modalias = "cs8415a", /* Name of spi_driver for this device */
+ .max_speed_hz = 3686400, /* Run SSP as fast a possbile */
+ .bus_num = 2, /* Framework bus number */
+ .chip_select = 0, /* Framework chip select */
+ .platform_data = NULL; /* No spi_driver specific config */
+ .controller_data = &cs8415a_chip_info, /* Master chip config */
+ .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
+ },
+ {
+ .modalias = "cs8405a", /* Name of spi_driver for this device */
+ .max_speed_hz = 3686400, /* Run SSP as fast a possbile */
+ .bus_num = 2, /* Framework bus number */
+ .chip_select = 1, /* Framework chip select */
+ .controller_data = &cs8405a_chip_info, /* Master chip config */
+ .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
+ },
+};
+
+static void __init streetracer_init(void)
+{
+ spi_register_board_info(streetracer_spi_board_info,
+ ARRAY_SIZE(streetracer_spi_board_info));
+}
+
+
+DMA and PIO I/O Support
+-----------------------
+The pxa2xx_spi driver support both DMA and interrupt driven PIO message
+transfers. The driver defaults to PIO mode and DMA transfers must enabled by
+setting the "enable_dma" flag in the "pxa2xx_spi_master" structure and and
+ensuring that the "pxa2xx_spi_chip.dma_burst_size" field is non-zero. The DMA
+mode support both coherent and stream based DMA mappings.
+
+The following logic is used to determine the type of I/O to be used on
+a per "spi_transfer" basis:
+
+if !enable_dma or dma_burst_size == 0 then
+ always use PIO transfers
+
+if spi_message.is_dma_mapped and rx_dma_buf != 0 and tx_dma_buf != 0 then
+ use coherent DMA mode
+
+if rx_buf and tx_buf are aligned on 8 byte boundary then
+ use streaming DMA mode
+
+otherwise
+ use PIO transfer
+
+THANKS TO
+---------
+
+David Brownell and others for mentoring the development of this driver.
+
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index a5ffba3..068732d3 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -414,7 +414,33 @@
The driver will initialize the fields of that spi_master, including the
bus number (maybe the same as the platform device ID) and three methods
used to interact with the SPI core and SPI protocol drivers. It will
-also initialize its own internal state.
+also initialize its own internal state. (See below about bus numbering
+and those methods.)
+
+After you initialize the spi_master, then use spi_register_master() to
+publish it to the rest of the system. At that time, device nodes for
+the controller and any predeclared spi devices will be made available,
+and the driver model core will take care of binding them to drivers.
+
+If you need to remove your SPI controller driver, spi_unregister_master()
+will reverse the effect of spi_register_master().
+
+
+BUS NUMBERING
+
+Bus numbering is important, since that's how Linux identifies a given
+SPI bus (shared SCK, MOSI, MISO). Valid bus numbers start at zero. On
+SOC systems, the bus numbers should match the numbers defined by the chip
+manufacturer. For example, hardware controller SPI2 would be bus number 2,
+and spi_board_info for devices connected to it would use that number.
+
+If you don't have such hardware-assigned bus number, and for some reason
+you can't just assign them, then provide a negative bus number. That will
+then be replaced by a dynamically assigned number. You'd then need to treat
+this as a non-static configuration (see above).
+
+
+SPI MASTER METHODS
master->setup(struct spi_device *spi)
This sets up the device clock rate, SPI mode, and word sizes.
@@ -431,6 +457,9 @@
state it dynamically associates with that device. If you do that,
be sure to provide the cleanup() method to free that state.
+
+SPI MESSAGE QUEUE
+
The bulk of the driver will be managing the I/O queue fed by transfer().
That queue could be purely conceptual. For example, a driver used only
@@ -440,6 +469,9 @@
often DMA (especially if the root filesystem is in SPI flash), and
execution contexts like IRQ handlers, tasklets, or workqueues (such
as keventd). Your driver can be as fancy, or as simple, as you need.
+Such a transfer() method would normally just add the message to a
+queue, and then start some asynchronous transfer engine (unless it's
+already running).
THANKS TO
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt
index c5beb54..21ed511 100644
--- a/Documentation/watchdog/watchdog-api.txt
+++ b/Documentation/watchdog/watchdog-api.txt
@@ -36,6 +36,9 @@
some data to the device. So a very simple watchdog daemon would look
like this:
+#include <stdlib.h>
+#include <fcntl.h>
+
int main(int argc, const char *argv[]) {
int fd=open("/dev/watchdog",O_WRONLY);
if (fd==-1) {
diff --git a/MAINTAINERS b/MAINTAINERS
index 5e33558..bd10b2a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -40,11 +40,20 @@
PLEASE document known bugs. If it doesn't work for everything
or does something very odd once a month document it.
+ PLEASE remember that submissions must be made under the terms
+ of the OSDL certificate of contribution
+ (http://www.osdl.org/newsroom/press_releases/2004/2004_05_24_dco.html)
+ and should include a Signed-off-by: line.
+
6. Make sure you have the right to send any changes you make. If you
do changes at work you may find your employer owns the patch
not you.
-7. Happy hacking.
+7. When sending security related changes or reports to a maintainer
+ please Cc: security@kernel.org, especially if the maintainer
+ does not respond.
+
+8. Happy hacking.
-----------------------------------
@@ -969,7 +978,7 @@
EXT3 FILE SYSTEM
P: Stephen Tweedie, Andrew Morton
M: sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com
-L: ext3-users@redhat.com
+L: ext2-devel@lists.sourceforge.net
S: Maintained
F71805F HARDWARE MONITORING DRIVER
@@ -1530,12 +1539,28 @@
T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
S: Supported
+JOURNALLING LAYER FOR BLOCK DEVICS (JBD)
+P: Stephen Tweedie, Andrew Morton
+M: sct@redhat.com, akpm@osdl.org
+L: ext2-devel@lists.sourceforge.net
+S: Maintained
+
KCONFIG
P: Roman Zippel
M: zippel@linux-m68k.org
L: kbuild-devel@lists.sourceforge.net
S: Maintained
+KDUMP
+P: Vivek Goyal
+M: vgoyal@in.ibm.com
+P: Haren Myneni
+M: hbabu@us.ibm.com
+L: fastboot@lists.osdl.org
+L: linux-kernel@vger.kernel.org
+W: http://lse.sourceforge.net/kdump/
+S: Maintained
+
KERNEL AUTOMOUNTER (AUTOFS)
P: H. Peter Anvin
M: hpa@zytor.com
@@ -1603,6 +1628,11 @@
L: linux-scsi@vger.kernel.org
S: Maintained
+LED SUBSYSTEM
+P: Richard Purdie
+M: rpurdie@rpsys.net
+S: Maintained
+
LEGO USB Tower driver
P: Juergen Stuber
M: starblue@users.sourceforge.net
@@ -1662,7 +1692,7 @@
LINUX FOR POWERPC EMBEDDED PPC8XX
P: Marcelo Tosatti
-M: marcelo.tosatti@cyclades.com
+M: marcelo@kvack.org
W: http://www.penguinppc.org/
L: linuxppc-embedded@ozlabs.org
S: Maintained
@@ -2513,6 +2543,12 @@
L: alsa-devel@alsa-project.org
S: Maintained
+SPI SUBSYSTEM
+P: David Brownell
+M: dbrownell@users.sourceforge.net
+L: spi-devel-general@lists.sourceforge.net
+S: Maintained
+
TPM DEVICE DRIVER
P: Kylene Hall
M: kjhall@us.ibm.com
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 45fdf4a..396efba 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -99,6 +99,8 @@
DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name));
DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io));
DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst));
+ BLANK();
+ DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list));
DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush));
DEFINE(PROCINFO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mmu_flags));
return 0;
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
index 0353276..0a3e9ad 100644
--- a/arch/arm/kernel/dma-isa.c
+++ b/arch/arm/kernel/dma-isa.c
@@ -143,12 +143,23 @@
.residue = isa_get_dma_residue,
};
-static struct resource dma_resources[] = {
- { "dma1", 0x0000, 0x000f },
- { "dma low page", 0x0080, 0x008f },
- { "dma2", 0x00c0, 0x00df },
- { "dma high page", 0x0480, 0x048f }
-};
+static struct resource dma_resources[] = { {
+ .name = "dma1",
+ .start = 0x0000,
+ .end = 0x000f
+}, {
+ .name = "dma low page",
+ .start = 0x0080,
+ .end = 0x008f
+}, {
+ .name = "dma2",
+ .start = 0x00c0,
+ .end = 0x00df
+}, {
+ .name = "dma high page",
+ .start = 0x0480,
+ .end = 0x048f
+} };
void __init isa_init_dma(dma_t *dma)
{
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1a1539e..7df6e1a 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -311,7 +311,7 @@
struct thread_info_list *th = &get_cpu_var(thread_info_list);
if (th->nr < EXTRA_TASK_STRUCT) {
unsigned long *p = (unsigned long *)thread;
- p[0] = th->head;
+ p[0] = (unsigned long)th->head;
th->head = p;
th->nr += 1;
put_cpu_var(thread_info_list);
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 3bdc8c6..16153c8 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -122,7 +122,7 @@
#define reg r5
#define stack r6
-.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr}
+.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, r8, lr}
mov stack, r0
mov instr, r1
mov reg, #9
@@ -145,7 +145,7 @@
adrne r0, .Lcr
blne printk
mov r0, stack
- LOADREGS(fd, sp!, {instr, reg, stack, r7, pc})
+ LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc})
.Lfp: .asciz " r%d = %08X%c"
.Lcr: .asciz "\n"
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index ec9a1cd..58eef66 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -189,12 +189,12 @@
moveq pc, lr
@ Division by 0:
- str lr, [sp, #-4]!
+ str lr, [sp, #-8]!
bl __div0
@ as wrong as it could be...
mov yl, #0
mov yh, #0
mov xh, #0
- ldr pc, [sp], #4
+ ldr pc, [sp], #8
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 98356f8..02e188d 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -95,7 +95,10 @@
for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
set_irq_chip(irq, &mainstone_irq_chip);
set_irq_handler(irq, do_level_IRQ);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
+ else
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
set_irq_flags(MAINSTONE_IRQ(8), 0);
set_irq_flags(MAINSTONE_IRQ(12), 0);
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index d4a586e..693fb1e 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -137,8 +137,11 @@
static void __init gic_init_irq(void)
{
#ifdef CONFIG_REALVIEW_MPCORE
+ unsigned int pldctrl;
writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
- writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8);
+ pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + 0xd8);
+ pldctrl |= 0x00800000; /* New irq mode */
+ writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + 0xd8);
writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
#endif
gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE));
diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
index 832fb86..73de2ea 100644
--- a/arch/arm/mach-s3c2410/sleep.S
+++ b/arch/arm/mach-s3c2410/sleep.S
@@ -59,8 +59,7 @@
mrc p15, 0, r5, c13, c0, 0 @ PID
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
mrc p15, 0, r7, c2, c0, 0 @ translation table base address
- mrc p15, 0, r8, c2, c0, 0 @ auxiliary control register
- mrc p15, 0, r9, c1, c0, 0 @ control register
+ mrc p15, 0, r8, c1, c0, 0 @ control register
stmia r0, { r4 - r13 }
@@ -165,7 +164,6 @@
mcr p15, 0, r5, c13, c0, 0 @ PID
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
mcr p15, 0, r7, c2, c0, 0 @ translation table base
- mcr p15, 0, r8, c1, c1, 0 @ auxilliary control
#ifdef CONFIG_DEBUG_RESUME
mov r3, #'R'
@@ -173,7 +171,7 @@
#endif
ldr r2, =resume_with_mmu
- mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc
+ mcr p15, 0, r8, c1, c0, 0 @ turn on MMU, etc
nop @ second-to-last before mmu
mov pc, r2 @ go back to virtual address
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 25e0ca3e..c1f7180 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -141,7 +141,7 @@
return NULL;
addr = (unsigned long)area->addr;
if (remap_area_pages(addr, pfn, size, flags)) {
- vfree((void *)addr);
+ vunmap((void *)addr);
return NULL;
}
return (void __iomem *) (offset + (char *)addr);
@@ -173,7 +173,7 @@
void __iounmap(void __iomem *addr)
{
- vfree((void *) (PAGE_MASK & (unsigned long) addr));
+ vunmap((void *)(PAGE_MASK & (unsigned long)addr));
}
EXPORT_SYMBOL(__iounmap);
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index c6fe99e..8dfa305 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -758,10 +758,10 @@
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
---help---
- Say Y here to experiment with turning CPUs off and on. CPUs
- can be controlled through /sys/devices/system/cpu.
+ Say Y here to experiment with turning CPUs off and on, and to
+ enable suspend on SMP systems. CPUs can be controlled through
+ /sys/devices/system/cpu.
- Say N.
endmenu
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 40e5aba..daee695 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -1066,6 +1066,14 @@
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
},
},
+ {
+ .callback = disable_acpi_pci,
+ .ident = "HP xw9300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
+ },
+ },
{}
};
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 013b85d..3d4b2f3 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -1341,6 +1341,14 @@
connect_bsp_APIC();
+ /*
+ * Hack: In case of kdump, after a crash, kernel might be booting
+ * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
+ * might be zero if read from MP tables. Get it from LAPIC.
+ */
+#ifdef CONFIG_CRASH_DUMP
+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+#endif
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
setup_local_APIC();
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index d77e89a..846e163 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1320,6 +1320,8 @@
probe_roms();
for (i = 0; i < e820.nr_map; i++) {
struct resource *res;
+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+ continue;
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
switch (e820.map[i].type) {
case E820_RAM: res->name = "System RAM"; break;
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 2d22f57..0e49836 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -130,9 +130,8 @@
print_symbol("%s", addr);
printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS;
-
if (printed)
- printk(" ");
+ printk(" ");
else
printk("\n");
@@ -212,7 +211,6 @@
}
stack = esp;
- printk(log_lvl);
for(i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(stack))
break;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index ae6534a..3df1371 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -651,7 +651,7 @@
* Specifically, in the case of x86, we will always add
* memory to the highmem for now.
*/
-#ifdef CONFIG_HOTPLUG_MEMORY
+#ifdef CONFIG_MEMORY_HOTPLUG
#ifndef CONFIG_NEED_MULTIPLE_NODES
int add_memory(u64 start, u64 size)
{
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index 1a2076c..ec0fd3c 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -332,10 +332,11 @@
{
__u8 cpu_model = boot_cpu_data.x86_model;
- if (cpu_model > 0xd)
+ if (cpu_model == 14)
+ *cpu_type = "i386/core";
+ else if (cpu_model > 0xd)
return 0;
-
- if (cpu_model == 9) {
+ else if (cpu_model == 9) {
*cpu_type = "i386/p6_mobile";
} else if (cpu_model > 5) {
*cpu_type = "i386/piii";
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index f6a8853..9ea3539 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -134,7 +134,7 @@
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_NUMA=y
-CONFIG_NODES_SHIFT=8
+CONFIG_NODES_SHIFT=10
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
@@ -1159,7 +1159,7 @@
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_PREEMPT=y
-CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7956eb9..d58c1c5 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -416,7 +416,7 @@
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte;
- move_irq(irq);
+ move_native_irq(irq);
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
iosapic_eoi(rte->addr, vec);
}
@@ -458,7 +458,7 @@
{
irq_desc_t *idesc = irq_descp(irq);
- move_irq(irq);
+ move_native_irq(irq);
/*
* Once we have recorded IRQ_PENDING already, we can mask the
* interrupt for real. This prevents IRQ storms from unhandled
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 5ce908e..9c72ea3 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -101,7 +101,6 @@
if (irq < NR_IRQS) {
irq_affinity[irq] = mask;
- set_irq_info(irq, mask);
irq_redir[irq] = (char) (redir & 0xff);
}
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 078fb55..2d80653 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1636,7 +1636,7 @@
compat, sizeof(compat)-1);
if (len <= 0)
return PLATFORM_GENERIC;
- if (strncmp(compat, RELOC("chrp"), 4))
+ if (strcmp(compat, RELOC("chrp")))
return PLATFORM_GENERIC;
/* Default to pSeries. We need to know if we are running LPAR */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 5eb55ef..5f79f01 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -255,7 +255,7 @@
{
/* Manually leave the kernel version on the panel. */
ppc_md.progress("Linux ppc64\n", 0);
- ppc_md.progress(system_utsname.version, 0);
+ ppc_md.progress(system_utsname.release, 0);
return 0;
}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index ef5b9c4..4d53b27 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1650,3 +1650,11 @@
llgfr %r4,%r4 # size_t
llgfr %r5,%r5 # unsigned int
jg sys_tee
+
+ .globl compat_sys_vmsplice_wrapper
+compat_sys_vmsplice_wrapper:
+ lgfr %r2,%r2 # int
+ llgtr %r3,%r3 # compat_iovec *
+ llgfr %r4,%r4 # unsigned int
+ llgfr %r5,%r5 # unsigned int
+ jg compat_sys_vmsplice
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index fc2c076..93be1d5 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -317,3 +317,4 @@
SYSCALL(sys_splice,sys_splice,sys_splice_wrapper)
SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper)
SYSCALL(sys_tee,sys_tee,sys_tee_wrapper)
+SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index fea043b..ce19ad4 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -249,18 +249,19 @@
unsigned long flags;
unsigned long seq, next;
__u64 timer, todval;
+ int cpu = smp_processor_id();
if (sysctl_hz_timer != 0)
return;
- cpu_set(smp_processor_id(), nohz_cpu_mask);
+ cpu_set(cpu, nohz_cpu_mask);
/*
* Leave the clock comparator set up for the next timer
* tick if either rcu or a softirq is pending.
*/
- if (rcu_pending(smp_processor_id()) || local_softirq_pending()) {
- cpu_clear(smp_processor_id(), nohz_cpu_mask);
+ if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
+ cpu_clear(cpu, nohz_cpu_mask);
return;
}
@@ -271,7 +272,7 @@
next = next_timer_interrupt();
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
- timer = (__u64)(next - jiffies) + jiffies_64;
+ timer = (__u64 next) - (__u64 jiffies) + jiffies_64;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
todval = -1ULL;
/* Be careful about overflows. */
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 1eaa5da..fa1d19c 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -514,13 +514,13 @@
*tos = orig_rip + (*tos - copy_rip);
break;
case 0xff:
- if ((*insn & 0x30) == 0x10) {
+ if ((insn[1] & 0x30) == 0x10) {
/* call absolute, indirect */
/* Fix return addr; rip is correct. */
next_rip = regs->rip;
*tos = orig_rip + (*tos - copy_rip);
- } else if (((*insn & 0x31) == 0x20) || /* jmp near, absolute indirect */
- ((*insn & 0x31) == 0x21)) { /* jmp far, absolute indirect */
+ } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
+ ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
/* rip is correct. */
next_rip = regs->rip;
}
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 44adcc2..1f6ecc6 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -12,9 +12,10 @@
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
if (hwdev && bus + size > *hwdev->dma_mask) {
- printk(KERN_ERR
- "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
- name, (long long)bus, size, (long long)*hwdev->dma_mask);
+ if (*hwdev->dma_mask >= 0xffffffffULL)
+ printk(KERN_ERR
+ "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
+ name, (long long)bus, size, (long long)*hwdev->dma_mask);
return 0;
}
return 1;
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 6b87268..cea335e 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -102,6 +102,8 @@
{
if (regs->eflags & X86_EFLAGS_IF)
local_irq_disable();
+ /* Make sure to not schedule here because we could be running
+ on an exception stack. */
preempt_enable_no_resched();
}
@@ -483,8 +485,6 @@
{
struct task_struct *tsk = current;
- conditional_sti(regs);
-
tsk->thread.error_code = error_code;
tsk->thread.trap_no = trapnr;
@@ -521,6 +521,7 @@
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return; \
+ conditional_sti(regs); \
do_trap(trapnr, signr, str, regs, error_code, NULL); \
}
@@ -535,6 +536,7 @@
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return; \
+ conditional_sti(regs); \
do_trap(trapnr, signr, str, regs, error_code, &info); \
}
@@ -548,7 +550,17 @@
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
DO_ERROR(18, SIGSEGV, "reserved", reserved)
-DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
+
+/* Runs on IST stack */
+asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
+{
+ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
+ 12, SIGBUS) == NOTIFY_STOP)
+ return;
+ preempt_conditional_sti(regs);
+ do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
+ preempt_conditional_cli(regs);
+}
asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
{
@@ -682,8 +694,9 @@
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
return;
}
+ preempt_conditional_sti(regs);
do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
- return;
+ preempt_conditional_cli(regs);
}
/* Help handler running on IST stack to switch back to user stack
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 15ae9fc..e151353 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -34,7 +34,10 @@
static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
static int found_add_area __initdata;
-int hotadd_percent __initdata = 10;
+int hotadd_percent __initdata = 0;
+#ifndef RESERVE_HOTADD
+#define hotadd_percent 0 /* Ignore all settings */
+#endif
static u8 pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
@@ -103,6 +106,7 @@
int i;
printk(KERN_ERR "SRAT: SRAT not used.\n");
acpi_numa = -1;
+ found_add_area = 0;
for (i = 0; i < MAX_LOCAL_APIC; i++)
apicid_to_node[i] = NUMA_NO_NODE;
for (i = 0; i < MAX_NUMNODES; i++)
@@ -154,7 +158,8 @@
int pxm, node;
if (srat_disabled())
return;
- if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
+ if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
+ bad_srat();
return;
}
if (pa->flags.enabled == 0)
@@ -191,15 +196,17 @@
allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
allowed = (allowed / 100) * hotadd_percent;
if (allocated + mem > allowed) {
+ unsigned long range;
/* Give them at least part of their hotadd memory upto hotadd_percent
It would be better to spread the limit out
over multiple hotplug areas, but that is too complicated
right now */
if (allocated >= allowed)
return 0;
- pages = (allowed - allocated + mem) / sizeof(struct page);
+ range = allowed - allocated;
+ pages = (range / PAGE_SIZE);
mem = pages * sizeof(struct page);
- nd->end = nd->start + pages*PAGE_SIZE;
+ nd->end = nd->start + range;
}
/* Not completely fool proof, but a good sanity check */
addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4723182..0c99ae6 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -86,18 +86,9 @@
static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store);
static void fw_class_dev_release(struct class_device *class_dev);
-int firmware_class_uevent(struct class_device *dev, char **envp,
- int num_envp, char *buffer, int buffer_size);
-static struct class firmware_class = {
- .name = "firmware",
- .uevent = firmware_class_uevent,
- .release = fw_class_dev_release,
-};
-
-int
-firmware_class_uevent(struct class_device *class_dev, char **envp,
- int num_envp, char *buffer, int buffer_size)
+static int firmware_class_uevent(struct class_device *class_dev, char **envp,
+ int num_envp, char *buffer, int buffer_size)
{
struct firmware_priv *fw_priv = class_get_devdata(class_dev);
int i = 0, len = 0;
@@ -116,6 +107,12 @@
return 0;
}
+static struct class firmware_class = {
+ .name = "firmware",
+ .uevent = firmware_class_uevent,
+ .release = fw_class_dev_release,
+};
+
static ssize_t
firmware_loading_show(struct class_device *class_dev, char *buf)
{
@@ -493,25 +490,6 @@
}
}
-/**
- * register_firmware: - provide a firmware image for later usage
- * @name: name of firmware image file
- * @data: buffer pointer for the firmware image
- * @size: size of the data buffer area
- *
- * Make sure that @data will be available by requesting firmware @name.
- *
- * Note: This will not be possible until some kind of persistence
- * is available.
- **/
-void
-register_firmware(const char *name, const u8 *data, size_t size)
-{
- /* This is meaningless without firmware caching, so until we
- * decide if firmware caching is reasonable just leave it as a
- * noop */
-}
-
/* Async support */
struct firmware_work {
struct work_struct work;
@@ -630,4 +608,3 @@
EXPORT_SYMBOL(release_firmware);
EXPORT_SYMBOL(request_firmware);
EXPORT_SYMBOL(request_firmware_nowait);
-EXPORT_SYMBOL(register_firmware);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 4022966..78d928f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -291,7 +291,7 @@
config RIO
tristate "Specialix RIO system support"
- depends on SERIAL_NONSTANDARD && !64BIT
+ depends on SERIAL_NONSTANDARD
help
This is a driver for the Specialix RIO, a smart serial card which
drives an outboard box that can support up to 128 ports. Product
diff --git a/drivers/char/rio/host.h b/drivers/char/rio/host.h
index 3ec73d1..179cdbe 100644
--- a/drivers/char/rio/host.h
+++ b/drivers/char/rio/host.h
@@ -33,12 +33,6 @@
#ifndef __rio_host_h__
#define __rio_host_h__
-#ifdef SCCS_LABELS
-#ifndef lint
-static char *_host_h_sccs_ = "@(#)host.h 1.2";
-#endif
-#endif
-
/*
** the host structure - one per host card in the system.
*/
@@ -77,9 +71,6 @@
#define RC_STARTUP 1
#define RC_RUNNING 2
#define RC_STUFFED 3
-#define RC_SOMETHING 4
-#define RC_SOMETHING_NEW 5
-#define RC_SOMETHING_ELSE 6
#define RC_READY 7
#define RUN_STATE 7
/*
diff --git a/drivers/char/rio/rioboot.c b/drivers/char/rio/rioboot.c
index acda932..290143a 100644
--- a/drivers/char/rio/rioboot.c
+++ b/drivers/char/rio/rioboot.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/termios.h>
#include <linux/serial.h>
+#include <linux/vmalloc.h>
#include <asm/semaphore.h>
#include <linux/generic_serial.h>
#include <linux/errno.h>
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c
index d31aba6..75b2557 100644
--- a/drivers/char/rio/rioctrl.c
+++ b/drivers/char/rio/rioctrl.c
@@ -1394,14 +1394,17 @@
return RIO_FAIL;
}
- if (((int) ((char) PortP->InUse) == -1) || !(CmdBlkP = RIOGetCmdBlk())) {
- rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n", Cmd, PortP->PortNum);
+ if ((PortP->InUse == (typeof(PortP->InUse))-1) ||
+ !(CmdBlkP = RIOGetCmdBlk())) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block "
+ "for command %d on port %d\n", Cmd, PortP->PortNum);
return RIO_FAIL;
}
- rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", CmdBlkP, PortP->InUse);
+ rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n",
+ CmdBlkP, PortP->InUse);
- PktCmdP = (struct PktCmd_M *) &CmdBlkP->Packet.data[0];
+ PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0];
CmdBlkP->Packet.src_unit = 0;
if (PortP->SecondBlock)
@@ -1425,38 +1428,46 @@
switch (Cmd) {
case MEMDUMP:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p (addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p "
+ "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
PktCmdP->SubCommand = MEMDUMP;
PktCmdP->SubAddr = SubCmd.Addr;
break;
case FCLOSE:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", CmdBlkP);
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n",
+ CmdBlkP);
break;
case READ_REGISTER:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) "
+ "command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
PktCmdP->SubCommand = READ_REGISTER;
PktCmdP->SubAddr = SubCmd.Addr;
break;
case RESUME:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", CmdBlkP);
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n",
+ CmdBlkP);
break;
case RFLUSH:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", CmdBlkP);
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n",
+ CmdBlkP);
CmdBlkP->PostFuncP = RIORFlushEnable;
break;
case SUSPEND:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", CmdBlkP);
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n",
+ CmdBlkP);
break;
case MGET:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", CmdBlkP);
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n",
+ CmdBlkP);
break;
case MSET:
case MBIC:
case MBIS:
CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
- rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk %p\n", CmdBlkP);
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command "
+ "blk %p\n", CmdBlkP);
break;
case WFLUSH:
@@ -1465,12 +1476,14 @@
** allowed then we should not bother sending any more to the
** RTA.
*/
- if ((int) ((char) PortP->WflushFlag) == (int) -1) {
- rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!");
+ if (PortP->WflushFlag == (typeof(PortP->WflushFlag))-1) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, "
+ "WflushFlag about to wrap!");
RIOFreeCmdBlk(CmdBlkP);
return (RIO_FAIL);
} else {
- rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command blk %p\n", CmdBlkP);
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command "
+ "blk %p\n", CmdBlkP);
CmdBlkP->PostFuncP = RIOWFlushMark;
}
break;
diff --git a/drivers/char/rio/rioioctl.h b/drivers/char/rio/rioioctl.h
index 14b83fa..e8af5b3 100644
--- a/drivers/char/rio/rioioctl.h
+++ b/drivers/char/rio/rioioctl.h
@@ -33,10 +33,6 @@
#ifndef __rioioctl_h__
#define __rioioctl_h__
-#ifdef SCCS_LABELS
-static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2";
-#endif
-
/*
** RIO device driver - user ioctls and associated structures.
*/
@@ -44,55 +40,13 @@
struct portStats {
int port;
int gather;
- ulong txchars;
- ulong rxchars;
- ulong opens;
- ulong closes;
- ulong ioctls;
+ unsigned long txchars;
+ unsigned long rxchars;
+ unsigned long opens;
+ unsigned long closes;
+ unsigned long ioctls;
};
-
-#define rIOC ('r'<<8)
-#define TCRIOSTATE (rIOC | 1)
-#define TCRIOXPON (rIOC | 2)
-#define TCRIOXPOFF (rIOC | 3)
-#define TCRIOXPCPS (rIOC | 4)
-#define TCRIOXPRINT (rIOC | 5)
-#define TCRIOIXANYON (rIOC | 6)
-#define TCRIOIXANYOFF (rIOC | 7)
-#define TCRIOIXONON (rIOC | 8)
-#define TCRIOIXONOFF (rIOC | 9)
-#define TCRIOMBIS (rIOC | 10)
-#define TCRIOMBIC (rIOC | 11)
-#define TCRIOTRIAD (rIOC | 12)
-#define TCRIOTSTATE (rIOC | 13)
-
-/*
-** 15.10.1998 ARG - ESIL 0761 part fix
-** Add RIO ioctls for manipulating RTS and CTS flow control, (as LynxOS
-** appears to not support hardware flow control).
-*/
-#define TCRIOCTSFLOWEN (rIOC | 14) /* enable CTS flow control */
-#define TCRIOCTSFLOWDIS (rIOC | 15) /* disable CTS flow control */
-#define TCRIORTSFLOWEN (rIOC | 16) /* enable RTS flow control */
-#define TCRIORTSFLOWDIS (rIOC | 17) /* disable RTS flow control */
-
-/*
-** 09.12.1998 ARG - ESIL 0776 part fix
-** Definition for 'RIOC' also appears in daemon.h, so we'd better do a
-** #ifndef here first.
-** 'RIO_QUICK_CHECK' also #define'd here as this ioctl is now
-** allowed to be used by customers.
-**
-** 05.02.1999 ARG -
-** This is what I've decied to do with ioctls etc., which are intended to be
-** invoked from users applications :
-** Anything that needs to be defined here will be removed from daemon.h, that
-** way it won't end up having to be defined/maintained in two places. The only
-** consequence of this is that this file should now be #include'd by daemon.h
-**
-** 'stats' ioctls now #define'd here as they are to be used by customers.
-*/
#define RIOC ('R'<<8)|('i'<<16)|('o'<<24)
#define RIO_QUICK_CHECK (RIOC | 105)
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 1efde3b..fe00c7d 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -22,7 +22,7 @@
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface"
- depends on TCG_TPM
+ depends on TCG_TPM && PNPACPI
---help---
If you have a TPM security chip that is compliant with the
TCG TIS 1.2 TPM specification say Yes and it will be accessible
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 54a4c80..050ced2 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -140,7 +140,7 @@
extern struct dentry ** tpm_bios_log_setup(char *);
extern void tpm_bios_log_teardown(struct dentry **);
#else
-static inline struct dentry* tpm_bios_log_setup(char *name)
+static inline struct dentry ** tpm_bios_log_setup(char *name)
{
return NULL;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index b9cae9a..f621168 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -55,7 +55,7 @@
};
enum tis_defaults {
- TIS_MEM_BASE = 0xFED4000,
+ TIS_MEM_BASE = 0xFED40000,
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c
index a13395e..fa2ba9e 100644
--- a/drivers/char/watchdog/i8xx_tco.c
+++ b/drivers/char/watchdog/i8xx_tco.c
@@ -33,11 +33,6 @@
* 82801E (C-ICH) : document number 273599-001, 273645-002,
* 82801EB (ICH5) : document number 252516-001, 252517-003,
* 82801ER (ICH5R) : document number 252516-001, 252517-003,
- * 82801FB (ICH6) : document number 301473-002, 301474-007,
- * 82801FR (ICH6R) : document number 301473-002, 301474-007,
- * 82801FBM (ICH6-M) : document number 301473-002, 301474-007,
- * 82801FW (ICH6W) : document number 301473-001, 301474-007,
- * 82801FRW (ICH6RW) : document number 301473-001, 301474-007
*
* 20000710 Nils Faerber
* Initial Version 0.01
@@ -66,6 +61,10 @@
* 20050807 Wim Van Sebroeck <wim@iguana.be>
* 0.08 Make sure that the watchdog is only "armed" when started.
* (Kernel Bug 4251)
+ * 20060416 Wim Van Sebroeck <wim@iguana.be>
+ * 0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and
+ * ICH7 chipsets. (See Kernel Bug 6031 - other code will support these
+ * chipsets)
*/
/*
@@ -90,7 +89,7 @@
#include "i8xx_tco.h"
/* Module and version information */
-#define TCO_VERSION "0.08"
+#define TCO_VERSION "0.09"
#define TCO_MODULE_NAME "i8xx TCO timer"
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
#define PFX TCO_MODULE_NAME ": "
@@ -391,11 +390,6 @@
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }, /* End of list */
};
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 9dc5473..1ea04e9 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -423,6 +423,12 @@
if (tmr_atboot && started == 0) {
printk(KERN_INFO PFX "Starting Watchdog Timer\n");
s3c2410wdt_start();
+ } else if (!tmr_atboot) {
+ /* if we're not enabling the watchdog, then ensure it is
+ * disabled if it has been left running from the bootloader
+ * or other source */
+
+ s3c2410wdt_stop();
}
return 0;
diff --git a/drivers/char/watchdog/sc1200wdt.c b/drivers/char/watchdog/sc1200wdt.c
index 515ce75..20b88f9 100644
--- a/drivers/char/watchdog/sc1200wdt.c
+++ b/drivers/char/watchdog/sc1200wdt.c
@@ -377,7 +377,7 @@
{
int ret;
- printk(banner);
+ printk("%s\n", banner);
spin_lock_init(&sc1200wdt_lock);
sema_init(&open_sem, 1);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 4961f1e..602797a 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -392,6 +392,7 @@
PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 78e30f8..ffca8b6 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -553,6 +553,8 @@
if (irq != NULL)
*irq = pmac_ide[ix].irq;
+
+ hw->dev = &pmac_ide[ix].mdev->ofdev.dev;
}
#define PMAC_IDE_REG(x) ((void __iomem *)(IDE_DATA_REG+(x)))
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 1922287..11f1377 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -553,7 +553,7 @@
* register content.
* To actually enable physical responses is the job of our interrupt
* handler which programs the physical request filter. */
- reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
+ reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
DBGMSG("physUpperBoundOffset=%08x",
reg_read(ohci, OHCI1394_PhyUpperBound));
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f420660..8a23fb5 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -42,6 +42,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/string.h>
+#include <linux/stringify.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
@@ -117,7 +118,8 @@
*/
static int max_sectors = SBP2_MAX_SECTORS;
module_param(max_sectors, int, 0444);
-MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
+MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
+ __stringify(SBP2_MAX_SECTORS) ")");
/*
* Exclusive login to sbp2 device? In most cases, the sbp2 driver should
@@ -135,18 +137,45 @@
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
/*
- * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
- * if your sbp2 device is not properly handling the SCSI inquiry command.
- * This hack makes the inquiry look more like a typical MS Windows inquiry
- * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
+ * If any of the following workarounds is required for your device to work,
+ * please submit the kernel messages logged by sbp2 to the linux1394-devel
+ * mailing list.
*
- * If force_inquiry_hack=1 is required for your device to work,
- * please submit the logged sbp2_firmware_revision value of this device to
- * the linux1394-devel mailing list.
+ * - 128kB max transfer
+ * Limit transfer size. Necessary for some old bridges.
+ *
+ * - 36 byte inquiry
+ * When scsi_mod probes the device, let the inquiry command look like that
+ * from MS Windows.
+ *
+ * - skip mode page 8
+ * Suppress sending of mode_sense for mode page 8 if the device pretends to
+ * support the SCSI Primary Block commands instead of Reduced Block Commands.
+ *
+ * - fix capacity
+ * Tell sd_mod to correct the last sector number reported by read_capacity.
+ * Avoids access beyond actual disk limits on devices with an off-by-one bug.
+ * Don't use this with devices which don't have this bug.
+ *
+ * - override internal blacklist
+ * Instead of adding to the built-in blacklist, use only the workarounds
+ * specified in the module load parameter.
+ * Useful if a blacklist entry interfered with a non-broken device.
*/
+static int sbp2_default_workarounds;
+module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
+MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
+ ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
+ ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
+ ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
+ ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+ ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
+ ", or a combination)");
+
+/* legacy parameter */
static int force_inquiry_hack;
module_param(force_inquiry_hack, int, 0644);
-MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
+MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'");
/*
* Export information about protocols/devices supported by this driver.
@@ -266,14 +295,55 @@
};
/*
- * List of device firmwares that require the inquiry hack.
- * Yields a few false positives but did not break other devices so far.
+ * List of devices with known bugs.
+ *
+ * The firmware_revision field, masked with 0xffff00, is the best indicator
+ * for the type of bridge chip of a device. It yields a few false positives
+ * but this did not break correctly behaving devices so far.
*/
-static u32 sbp2_broken_inquiry_list[] = {
- 0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */
- /* DViCO Momobay CX-1 */
- 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
- /* QPS Fire DVDBurner */
+static const struct {
+ u32 firmware_revision;
+ u32 model_id;
+ unsigned workarounds;
+} sbp2_workarounds_table[] = {
+ /* TSB42AA9 */ {
+ .firmware_revision = 0x002800,
+ .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
+ SBP2_WORKAROUND_MODE_SENSE_8,
+ },
+ /* Initio bridges, actually only needed for some older ones */ {
+ .firmware_revision = 0x000200,
+ .workarounds = SBP2_WORKAROUND_INQUIRY_36,
+ },
+ /* Symbios bridge */ {
+ .firmware_revision = 0xa0b800,
+ .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
+ },
+ /*
+ * Note about the following Apple iPod blacklist entries:
+ *
+ * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our
+ * matching logic treats 0 as a wildcard, we cannot match this ID
+ * without rewriting the matching routine. Fortunately these iPods
+ * do not feature the read_capacity bug according to one report.
+ * Read_capacity behaviour as well as model_id could change due to
+ * Apple-supplied firmware updates though.
+ */
+ /* iPod 4th generation */ {
+ .firmware_revision = 0x0a2700,
+ .model_id = 0x000021,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ },
+ /* iPod mini */ {
+ .firmware_revision = 0x0a2700,
+ .model_id = 0x000023,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ },
+ /* iPod Photo */ {
+ .firmware_revision = 0x0a2700,
+ .model_id = 0x00007e,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ }
};
/**************************************
@@ -765,11 +835,16 @@
/* Register the status FIFO address range. We could use the same FIFO
* for targets at different nodes. However we need different FIFOs per
- * target in order to support multi-unit devices. */
+ * target in order to support multi-unit devices.
+ * The FIFO is located out of the local host controller's physical range
+ * but, if possible, within the posted write area. Status writes will
+ * then be performed as unified transactions. This slightly reduces
+ * bandwidth usage, and some Prolific based devices seem to require it.
+ */
scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
- ~0ULL, ~0ULL);
+ 0x010000000000ULL, CSR1212_ALL_SPACE_END);
if (!scsi_id->status_fifo_addr) {
SBP2_ERR("failed to allocate status FIFO address range");
goto failed_alloc;
@@ -1450,7 +1525,8 @@
struct csr1212_dentry *dentry;
u64 management_agent_addr;
u32 command_set_spec_id, command_set, unit_characteristics,
- firmware_revision, workarounds;
+ firmware_revision;
+ unsigned workarounds;
int i;
SBP2_DEBUG_ENTER();
@@ -1506,12 +1582,8 @@
case SBP2_FIRMWARE_REVISION_KEY:
/* Firmware revision */
firmware_revision = kv->value.immediate;
- if (force_inquiry_hack)
- SBP2_INFO("sbp2_firmware_revision = %x",
- (unsigned int)firmware_revision);
- else
- SBP2_DEBUG("sbp2_firmware_revision = %x",
- (unsigned int)firmware_revision);
+ SBP2_DEBUG("sbp2_firmware_revision = %x",
+ (unsigned int)firmware_revision);
break;
default:
@@ -1519,41 +1591,44 @@
}
}
- /* This is the start of our broken device checking. We try to hack
- * around oddities and known defects. */
- workarounds = 0x0;
-
- /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
- * bridge with 128KB max transfer size limitation. For sanity, we
- * only voice this when the current max_sectors setting
- * exceeds the 128k limit. By default, that is not the case.
- *
- * It would be really nice if we could detect this before the scsi
- * host gets initialized. That way we can down-force the
- * max_sectors to account for it. That is not currently
- * possible. */
- if ((firmware_revision & 0xffff00) ==
- SBP2_128KB_BROKEN_FIRMWARE &&
- (max_sectors * 512) > (128*1024)) {
- SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
- NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
- SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
- max_sectors);
- workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
+ workarounds = sbp2_default_workarounds;
+ if (force_inquiry_hack) {
+ SBP2_WARN("force_inquiry_hack is deprecated. "
+ "Use parameter 'workarounds' instead.");
+ workarounds |= SBP2_WORKAROUND_INQUIRY_36;
}
- /* Check for a blacklisted set of devices that require us to force
- * a 36 byte host inquiry. This can be overriden as a module param
- * (to force all hosts). */
- for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
- if ((firmware_revision & 0xffff00) ==
- sbp2_broken_inquiry_list[i]) {
- SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
- NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
- workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
- break; /* No need to continue. */
+ if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
+ for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+ if (sbp2_workarounds_table[i].firmware_revision &&
+ sbp2_workarounds_table[i].firmware_revision !=
+ (firmware_revision & 0xffff00))
+ continue;
+ if (sbp2_workarounds_table[i].model_id &&
+ sbp2_workarounds_table[i].model_id != ud->model_id)
+ continue;
+ workarounds |= sbp2_workarounds_table[i].workarounds;
+ break;
}
- }
+
+ if (workarounds)
+ SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
+ "(firmware_revision 0x%06x, vendor_id 0x%06x,"
+ " model_id 0x%06x)",
+ NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+ workarounds, firmware_revision,
+ ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
+ ud->model_id);
+
+ /* We would need one SCSI host template for each target to adjust
+ * max_sectors on the fly, therefore warn only. */
+ if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+ (max_sectors * 512) > (128 * 1024))
+ SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
+ "max transfer size. WARNING: Current max_sectors "
+ "setting is larger than 128KB (%d sectors)",
+ NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+ max_sectors);
/* If this is a logical unit directory entry, process the parent
* to get the values. */
@@ -2447,19 +2522,25 @@
scsi_id->sdev = sdev;
- if (force_inquiry_hack ||
- scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
+ if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
- sdev->skip_ms_page_8 = 1;
- }
return 0;
}
static int sbp2scsi_slave_configure(struct scsi_device *sdev)
{
+ struct scsi_id_instance_data *scsi_id =
+ (struct scsi_id_instance_data *)sdev->host->hostdata[0];
+
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
+
+ if (sdev->type == TYPE_DISK &&
+ scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+ sdev->skip_ms_page_8 = 1;
+ if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
+ sdev->fix_capacity = 1;
return 0;
}
@@ -2603,7 +2684,9 @@
scsi_driver_template.cmd_per_lun = 1;
}
- /* Set max sectors (module load option). Default is 255 sectors. */
+ if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+ (max_sectors * 512) > (128 * 1024))
+ max_sectors = 128 * 1024 / 512;
scsi_driver_template.max_sectors = max_sectors;
/* Register our high level driver with 1394 stack */
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index e2d357a..f4ccc9d 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -227,11 +227,6 @@
#define SBP2_SW_VERSION_ENTRY 0x00010483
/*
- * Other misc defines
- */
-#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
-
-/*
* SCSI specific stuff
*/
@@ -239,6 +234,13 @@
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
#define SBP2_MAX_CMDS 8 /* This should be safe */
+/* Flags for detected oddities and brokeness */
+#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
+#define SBP2_WORKAROUND_INQUIRY_36 0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
+#define SBP2_WORKAROUND_OVERRIDE 0x100
+
/* This is the two dma types we use for cmd_dma below */
enum cmd_dma_types {
CMD_DMA_NONE,
@@ -268,10 +270,6 @@
};
-/* A list of flags for detected oddities and brokeness. */
-#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
-#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
-
struct sbp2scsi_host_info;
/*
@@ -345,7 +343,7 @@
struct Scsi_Host *scsi_host;
/* Device specific workarounds/brokeness */
- u32 workarounds;
+ unsigned workarounds;
};
/* Sbp2 host data structure (one per IEEE1394 host) */
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index 36a32c3..efe147d 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -211,8 +211,10 @@
*/
work = kmalloc(sizeof *work, GFP_KERNEL);
- if (!work)
+ if (!work) {
+ mmput(mm);
return;
+ }
INIT_WORK(&work->work, ib_umem_account, work);
work->mm = mm;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 1985b5d..798e13e 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -182,7 +182,7 @@
u8 status;
};
-static int fw_cmd_doorbell = 1;
+static int fw_cmd_doorbell = 0;
module_param(fw_cmd_doorbell, int, 0644);
MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
"(and supported by FW)");
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 19765f6..07c13be 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1727,23 +1727,7 @@
ind = qp->rq.next_ind;
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
- nreq = 0;
-
- doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
- doorbell[1] = cpu_to_be32(qp->qpn << 8);
-
- wmb();
-
- mthca_write64(doorbell,
- dev->kar + MTHCA_RECEIVE_DOORBELL,
- MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-
- qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
- size0 = 0;
- }
-
+ for (nreq = 0; wr; wr = wr->next) {
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
mthca_err(dev, "RQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", qp->qpn,
@@ -1797,6 +1781,23 @@
++ind;
if (unlikely(ind >= qp->rq.max))
ind -= qp->rq.max;
+
+ ++nreq;
+ if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+ nreq = 0;
+
+ doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+ doorbell[1] = cpu_to_be32(qp->qpn << 8);
+
+ wmb();
+
+ mthca_write64(doorbell,
+ dev->kar + MTHCA_RECEIVE_DOORBELL,
+ MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+ qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+ size0 = 0;
+ }
}
out:
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index c32ce43..9cbdffa 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -340,7 +340,10 @@
/* XXX should send SRP_I_LOGOUT request */
init_completion(&target->done);
- ib_send_cm_dreq(target->cm_id, NULL, 0);
+ if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
+ printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
+ return;
+ }
wait_for_completion(&target->done);
}
@@ -351,7 +354,6 @@
spin_lock_irq(target->scsi_host->host_lock);
if (target->state != SRP_TARGET_DEAD) {
spin_unlock_irq(target->scsi_host->host_lock);
- scsi_host_put(target->scsi_host);
return;
}
target->state = SRP_TARGET_REMOVED;
@@ -365,8 +367,6 @@
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
scsi_host_put(target->scsi_host);
- /* And another put to really free the target port... */
- scsi_host_put(target->scsi_host);
}
static int srp_connect_target(struct srp_target_port *target)
@@ -1241,7 +1241,7 @@
list_for_each_entry_safe(req, tmp, &target->req_queue, list)
if (req->scmnd->device == scmnd->device) {
req->scmnd->result = DID_RESET << 16;
- scmnd->scsi_done(scmnd);
+ req->scmnd->scsi_done(req->scmnd);
srp_remove_req(target, req);
}
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 9b493f0..173c899 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1499,7 +1499,6 @@
printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
return major_ret;
}
- capi_major = major_ret;
capi_class = class_create(THIS_MODULE, "capi");
if (IS_ERR(capi_class)) {
unregister_chrdev(capi_major, "capi20");
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index bfb73fd..d86ab68 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -710,8 +710,8 @@
retval = -ENODEV; //FIXME
/* See if the device offered us matches what we can accept */
- if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) ||
- (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID)))
+ if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) ||
+ (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID))
return -ENODEV;
/* this starts to become ascii art... */
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 3585fb1..2ac9024 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -2880,7 +2880,7 @@
p[0]++;
i = 0;
while (*p[0] && (strchr("0123456789,-*[]?;", *p[0])) &&
- (i < ISDN_LMSNLEN))
+ (i < ISDN_LMSNLEN - 1))
m->lmsn[i++] = *p[0]++;
m->lmsn[i] = '\0';
break;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 3f5b647..6265062 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -4,8 +4,11 @@
config NEW_LEDS
bool "LED Support"
help
- Say Y to enable Linux LED support. This is not related to standard
- keyboard LEDs which are controlled via the input system.
+ Say Y to enable Linux LED support. This allows control of supported
+ LEDs from both userspace and optionally, by kernel events (triggers).
+
+ This is not related to standard keyboard LEDs which are controlled
+ via the input system.
config LEDS_CLASS
tristate "LED Class Support"
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index b0b5d05..c75d0ef 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -19,6 +19,7 @@
#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/err.h>
+#include <linux/ctype.h>
#include <linux/leds.h>
#include "leds.h"
@@ -43,9 +44,13 @@
ssize_t ret = -EINVAL;
char *after;
unsigned long state = simple_strtoul(buf, &after, 10);
+ size_t count = after - buf;
- if (after - buf > 0) {
- ret = after - buf;
+ if (*after && isspace(*after))
+ count++;
+
+ if (count == size) {
+ ret = count;
led_set_brightness(led_cdev, state);
}
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index f484b5d6..fbf141e 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/sysdev.h>
#include <linux/timer.h>
+#include <linux/ctype.h>
#include <linux/leds.h>
#include "leds.h"
@@ -69,11 +70,15 @@
int ret = -EINVAL;
char *after;
unsigned long state = simple_strtoul(buf, &after, 10);
+ size_t count = after - buf;
- if (after - buf > 0) {
+ if (*after && isspace(*after))
+ count++;
+
+ if (count == size) {
timer_data->delay_on = state;
mod_timer(&timer_data->timer, jiffies + 1);
- ret = after - buf;
+ ret = count;
}
return ret;
@@ -97,11 +102,15 @@
int ret = -EINVAL;
char *after;
unsigned long state = simple_strtoul(buf, &after, 10);
+ size_t count = after - buf;
- if (after - buf > 0) {
+ if (*after && isspace(*after))
+ count++;
+
+ if (count == size) {
timer_data->delay_off = state;
mod_timer(&timer_data->timer, jiffies + 1);
- ret = after - buf;
+ ret = count;
}
return ret;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d7316b8..3ca3cfb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2252,7 +2252,7 @@
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
- else if (cmd_match(page, "repair"))
+ else if (!cmd_match(page, "repair"))
return -EINVAL;
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 914d62b..5dc4bee 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -310,7 +310,7 @@
}
else
data->bytes_xfered =
- (data->blocks * (1 << data->blksz_bits)) -
+ (data->blocks * data->blksz) -
host->pio.len;
}
@@ -575,7 +575,7 @@
au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
{
- int datalen = data->blocks * (1 << data->blksz_bits);
+ int datalen = data->blocks * data->blksz;
if (dma != 0)
host->flags |= HOST_F_DMA;
@@ -596,7 +596,7 @@
if (host->dma.len == 0)
return MMC_ERR_TIMEOUT;
- au_writel((1 << data->blksz_bits) - 1, HOST_BLKSIZE(host));
+ au_writel(data->blksz - 1, HOST_BLKSIZE(host));
if (host->flags & HOST_F_DMA) {
int i;
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 79358e22..a4eb1d0 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -218,8 +218,10 @@
if(!loops)
return 0;
- dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
- loops, where, *pstat, stat_mask);
+ /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
+ if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000))
+ dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
+ loops, where, *pstat, stat_mask);
return loops;
}
@@ -333,6 +335,9 @@
WARN_ON(host->cmd != NULL);
host->cmd = cmd;
+ /* Ensure, that clock are stopped else command programming and start fails */
+ imxmci_stop_clock(host);
+
if (cmd->flags & MMC_RSP_BUSY)
cmdat |= CMD_DAT_CONT_BUSY;
@@ -553,7 +558,7 @@
int trans_done = 0;
unsigned int stat = *pstat;
- if(host->actual_bus_width == MMC_BUS_WIDTH_4)
+ if(host->actual_bus_width != MMC_BUS_WIDTH_4)
burst_len = 16;
else
burst_len = 64;
@@ -591,8 +596,7 @@
stat = MMC_STATUS;
/* Flush extra bytes from FIFO */
- while(flush_len >= 2){
- flush_len -= 2;
+ while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){
i = MMC_BUFFER_ACCESS;
stat = MMC_STATUS;
stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
@@ -746,10 +750,6 @@
data_dir_mask = STATUS_DATA_TRANS_DONE;
}
- imxmci_busy_wait_for_status(host, &stat,
- data_dir_mask,
- 50, "imxmci_tasklet_fnc data");
-
if(stat & data_dir_mask) {
clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
imxmci_data_done(host, stat);
@@ -865,7 +865,11 @@
imxmci_stop_clock(host);
MMC_CLK_RATE = (prescaler<<3) | clk;
- imxmci_start_clock(host);
+ /*
+ * Under my understanding, clock should not be started there, because it would
+ * initiate SDHC sequencer and send last or random command into card
+ */
+ /*imxmci_start_clock(host);*/
dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
} else {
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 1ca2c8b..6201f30 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -951,6 +951,7 @@
data.timeout_ns = card->csd.tacc_ns * 10;
data.timeout_clks = card->csd.tacc_clks * 10;
data.blksz_bits = 3;
+ data.blksz = 1 << 3;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 06bd1f4..e39cc05 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -175,6 +175,7 @@
brq.data.timeout_ns = card->csd.tacc_ns * 10;
brq.data.timeout_clks = card->csd.tacc_clks * 10;
brq.data.blksz_bits = md->block_bits;
+ brq.data.blksz = 1 << md->block_bits;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0;
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index f97b472..b49368f 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -119,7 +119,7 @@
nob = 0xffff;
writel(nob, host->base + MMC_NOB);
- writel(1 << data->blksz_bits, host->base + MMC_BLKLEN);
+ writel(data->blksz, host->base + MMC_BLKLEN);
clks = (unsigned long long)data->timeout_ns * CLOCKRATE;
do_div(clks, 1000000000UL);
@@ -283,7 +283,7 @@
* data blocks as being in error.
*/
if (data->error == MMC_ERR_NONE)
- data->bytes_xfered = data->blocks << data->blksz_bits;
+ data->bytes_xfered = data->blocks * data->blksz;
else
data->bytes_xfered = 0;
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 39b3d97..8167332 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -662,14 +662,14 @@
unsigned long dmaflags;
DBGF("blksz %04x blks %04x flags %08x\n",
- 1 << data->blksz_bits, data->blocks, data->flags);
+ data->blksz, data->blocks, data->flags);
DBGF("tsac %d ms nsac %d clk\n",
data->timeout_ns / 1000000, data->timeout_clks);
/*
* Calculate size.
*/
- host->size = data->blocks << data->blksz_bits;
+ host->size = data->blocks * data->blksz;
/*
* Check timeout values for overflow.
@@ -696,12 +696,12 @@
* Two bytes are needed for each data line.
*/
if (host->bus_width == MMC_BUS_WIDTH_1) {
- blksize = (1 << data->blksz_bits) + 2;
+ blksize = data->blksz + 2;
wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
} else if (host->bus_width == MMC_BUS_WIDTH_4) {
- blksize = (1 << data->blksz_bits) + 2 * 4;
+ blksize = data->blksz + 2 * 4;
wbsd_write_index(host, WBSD_IDX_PBSMSB,
((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 3d30668..d8233e0 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -650,9 +650,11 @@
/* Hardware bug work-around, the chip is unable to do PCI DMA
to/from anything above 1GB :-( */
- if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
+ if (dma_mapping_error(mapping) ||
+ mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
/* Sigh... */
- pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+ if (!dma_mapping_error(mapping))
+ pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
if (skb == NULL)
@@ -660,8 +662,10 @@
mapping = pci_map_single(bp->pdev, skb->data,
RX_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
- if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
- pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(mapping) ||
+ mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
+ if (!dma_mapping_error(mapping))
+ pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
return -ENOMEM;
}
@@ -967,9 +971,10 @@
}
mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- if (mapping + len > B44_DMA_MASK) {
+ if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
/* Chip can't handle DMA to/from >1GB, use bounce buffer */
- pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
+ if (!dma_mapping_error(mapping))
+ pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
GFP_ATOMIC|GFP_DMA);
@@ -978,8 +983,9 @@
mapping = pci_map_single(bp->pdev, bounce_skb->data,
len, PCI_DMA_TODEVICE);
- if (mapping + len > B44_DMA_MASK) {
- pci_unmap_single(bp->pdev, mapping,
+ if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
+ if (!dma_mapping_error(mapping))
+ pci_unmap_single(bp->pdev, mapping,
len, PCI_DMA_TODEVICE);
dev_kfree_skb_any(bounce_skb);
goto err_out;
@@ -1203,7 +1209,8 @@
DMA_TABLE_BYTES,
DMA_BIDIRECTIONAL);
- if (rx_ring_dma + size > B44_DMA_MASK) {
+ if (dma_mapping_error(rx_ring_dma) ||
+ rx_ring_dma + size > B44_DMA_MASK) {
kfree(rx_ring);
goto out_err;
}
@@ -1229,7 +1236,8 @@
DMA_TABLE_BYTES,
DMA_TO_DEVICE);
- if (tx_ring_dma + size > B44_DMA_MASK) {
+ if (dma_mapping_error(tx_ring_dma) ||
+ tx_ring_dma + size > B44_DMA_MASK) {
kfree(tx_ring);
goto out_err;
}
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 1ddefd2..038447fb 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -53,6 +53,7 @@
#define DRV_VERSION "v1.17b"
#define DRV_RELDATE "2006/03/10"
#include "dl2k.h"
+#include <linux/dma-mapping.h>
static char version[] __devinitdata =
KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index f7235c9..705e122 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2891,78 +2891,6 @@
goto out_drain;
}
- if (np->msi_flags & NV_MSI_X_CAPABLE) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
- np->msi_x_entry[i].entry = i;
- }
- if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
- np->msi_flags |= NV_MSI_X_ENABLED;
- if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
- /* Request irq for rx handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_drain;
- }
- /* Request irq for tx handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_drain;
- }
- /* Request irq for link and timer handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_drain;
- }
-
- /* map interrupts to their respective vector */
- writel(0, base + NvRegMSIXMap0);
- writel(0, base + NvRegMSIXMap1);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
- } else {
- /* Request irq for all interrupts */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_drain;
- }
-
- /* map interrupts to vector 0 */
- writel(0, base + NvRegMSIXMap0);
- writel(0, base + NvRegMSIXMap1);
- }
- }
- }
- if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
- if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
- np->msi_flags |= NV_MSI_ENABLED;
- if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
- pci_disable_msi(np->pci_dev);
- np->msi_flags &= ~NV_MSI_ENABLED;
- goto out_drain;
- }
-
- /* map interrupts to vector 0 */
- writel(0, base + NvRegMSIMap0);
- writel(0, base + NvRegMSIMap1);
- /* enable msi vector 0 */
- writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
- }
- }
- if (ret != 0) {
- if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
- goto out_drain;
- }
-
/* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask);
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index 6f7dce8..b67f586 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -149,6 +149,8 @@
int status;
dev = nds[i];
+ if (dev == NULL)
+ continue;
status = pm3386_is_link_up(i);
if (status && !netif_carrier_ok(dev)) {
@@ -191,6 +193,7 @@
static int __init enp2611_init_module(void)
{
+ int ports;
int i;
if (!machine_is_enp2611())
@@ -199,7 +202,8 @@
caleb_reset();
pm3386_reset();
- for (i = 0; i < 3; i++) {
+ ports = pm3386_port_count();
+ for (i = 0; i < ports; i++) {
nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
if (nds[i] == NULL) {
while (--i >= 0)
@@ -215,9 +219,10 @@
ixp2400_msf_init(&enp2611_msf_parameters);
- if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) {
- for (i = 0; i < 3; i++)
- free_netdev(nds[i]);
+ if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
+ for (i = 0; i < ports; i++)
+ if (nds[i])
+ free_netdev(nds[i]);
return -EINVAL;
}
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c
index 5c7ab75..5224651 100644
--- a/drivers/net/ixp2000/pm3386.c
+++ b/drivers/net/ixp2000/pm3386.c
@@ -86,40 +86,53 @@
pm3386_reg_write(port >> 1, reg, value);
}
+int pm3386_secondary_present(void)
+{
+ return pm3386_reg_read(1, 0) == 0x3386;
+}
void pm3386_reset(void)
{
u8 mac[3][6];
+ int secondary;
+
+ secondary = pm3386_secondary_present();
/* Save programmed MAC addresses. */
pm3386_get_mac(0, mac[0]);
pm3386_get_mac(1, mac[1]);
- pm3386_get_mac(2, mac[2]);
+ if (secondary)
+ pm3386_get_mac(2, mac[2]);
/* Assert analog and digital reset. */
pm3386_reg_write(0, 0x002, 0x0060);
- pm3386_reg_write(1, 0x002, 0x0060);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0060);
mdelay(1);
/* Deassert analog reset. */
pm3386_reg_write(0, 0x002, 0x0062);
- pm3386_reg_write(1, 0x002, 0x0062);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0062);
mdelay(10);
/* Deassert digital reset. */
pm3386_reg_write(0, 0x002, 0x0063);
- pm3386_reg_write(1, 0x002, 0x0063);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0063);
mdelay(10);
/* Restore programmed MAC addresses. */
pm3386_set_mac(0, mac[0]);
pm3386_set_mac(1, mac[1]);
- pm3386_set_mac(2, mac[2]);
+ if (secondary)
+ pm3386_set_mac(2, mac[2]);
/* Disable carrier on all ports. */
pm3386_set_carrier(0, 0);
pm3386_set_carrier(1, 0);
- pm3386_set_carrier(2, 0);
+ if (secondary)
+ pm3386_set_carrier(2, 0);
}
static u16 swaph(u16 x)
@@ -127,6 +140,11 @@
return ((x << 8) | (x >> 8)) & 0xffff;
}
+int pm3386_port_count(void)
+{
+ return 2 + pm3386_secondary_present();
+}
+
void pm3386_init_port(int port)
{
int pm = port >> 1;
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h
index fe92bb0..cc4183d 100644
--- a/drivers/net/ixp2000/pm3386.h
+++ b/drivers/net/ixp2000/pm3386.h
@@ -13,6 +13,7 @@
#define __PM3386_H
void pm3386_reset(void);
+int pm3386_port_count(void);
void pm3386_init_port(int port);
void pm3386_get_mac(int port, u8 *mac);
void pm3386_set_mac(int port, u8 *mac);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 448a094..2ea66ac 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1691,17 +1691,6 @@
memset(ei_local->mcfilter, 0xFF, 8);
}
- /*
- * DP8390 manuals don't specify any magic sequence for altering
- * the multicast regs on an already running card. To be safe, we
- * ensure multicast mode is off prior to loading up the new hash
- * table. If this proves to be not enough, we can always resort
- * to stopping the NIC, loading the table and then restarting.
- */
-
- if (netif_running(dev))
- outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
-
outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
for(i = 0; i < 8; i++)
{
@@ -1715,6 +1704,8 @@
outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
else
outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
}
/*
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index a70c2b0..5ca5a1b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,8 +78,7 @@
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
{ PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
@@ -402,7 +401,7 @@
int err;
if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
- p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE)
+ p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE)
return -EINVAL;
skge->rx_ring.count = p->rx_pending;
@@ -2717,8 +2716,7 @@
if (control & BMU_OWN)
break;
- skb = skge_rx_get(skge, e, control, rd->status,
- le16_to_cpu(rd->csum2));
+ skb = skge_rx_get(skge, e, control, rd->status, rd->csum2);
if (likely(skb)) {
dev->last_rx = jiffies;
netif_receive_skb(skb);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ffd267f..60779eb 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.3"
+#define DRV_VERSION "1.4"
#define PFX DRV_NAME " "
/*
@@ -105,6 +105,7 @@
static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
@@ -235,6 +236,7 @@
}
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
sky2_pci_write32(hw, PCI_DEV_REG3, 0);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
reg1 &= P_ASPM_CONTROL_MSK;
@@ -306,7 +308,7 @@
u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
if (sky2->autoneg == AUTONEG_ENABLE &&
- (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
+ !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -1020,7 +1022,25 @@
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u32 ramsize, rxspace, imask;
- int err = -ENOMEM;
+ int cap, err = -ENOMEM;
+ struct net_device *otherdev = hw->dev[sky2->port^1];
+
+ /*
+ * On dual port PCI-X card, there is an problem where status
+ * can be received out of order due to split transactions
+ */
+ if (otherdev && netif_running(otherdev) &&
+ (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
+ struct sky2_port *osky2 = netdev_priv(otherdev);
+ u16 cmd;
+
+ cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
+ cmd &= ~PCI_X_CMD_MAX_SPLIT;
+ sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
+
+ sky2->rx_csum = 0;
+ osky2->rx_csum = 0;
+ }
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
@@ -1899,6 +1919,12 @@
}
}
+/* Is status ring empty or is there more to do? */
+static inline int sky2_more_work(const struct sky2_hw *hw)
+{
+ return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
@@ -2171,19 +2197,19 @@
if (status & Y2_IS_CHK_TXA2)
sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
- if (status & Y2_IS_STAT_BMU)
- sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
-
work_done = sky2_status_intr(hw, work_limit);
*budget -= work_done;
dev0->quota -= work_done;
- if (work_done >= work_limit)
+ if (status & Y2_IS_STAT_BMU)
+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+
+ if (sky2_more_work(hw))
return 1;
netif_rx_complete(dev0);
- status = sky2_read32(hw, B0_Y2_SP_LISR);
+ sky2_read32(hw, B0_Y2_SP_LISR);
return 0;
}
@@ -3067,12 +3093,7 @@
sky2->duplex = -1;
sky2->speed = -1;
sky2->advertising = sky2_supported_modes(hw);
-
- /* Receive checksum disabled for Yukon XL
- * because of observed problems with incorrect
- * values when multiple packets are received in one interrupt
- */
- sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
+ sky2->rx_csum = 1;
spin_lock_init(&sky2->phy_lock);
sky2->tx_pending = TX_DEF_PENDING;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 8012994..8a0bc55 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -214,6 +214,8 @@
enum {
Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
+ Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */
+ Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */
Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 2bd9592..e1b33a2 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -7653,21 +7653,23 @@
cmd->supported |= (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
- if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
+ if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
cmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_MII);
- else
+ cmd->port = PORT_TP;
+ } else {
cmd->supported |= SUPPORTED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ }
cmd->advertising = tp->link_config.advertising;
if (netif_running(dev)) {
cmd->speed = tp->link_config.active_speed;
cmd->duplex = tp->link_config.active_duplex;
}
- cmd->port = 0;
cmd->phy_address = PHY_ADDR;
cmd->transceiver = 0;
cmd->autoneg = tp->link_config.autoneg;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index ba05ded..136a70c 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -850,7 +850,7 @@
break;
skb->dev = dev; /* Mark as being used by this device. */
np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
- skb->len,PCI_DMA_FROMDEVICE);
+ np->rx_buf_sz,PCI_DMA_FROMDEVICE);
np->rx_ring[i].buffer1 = np->rx_addr[i];
np->rx_ring[i].status = DescOwn;
@@ -1316,7 +1316,7 @@
skb->dev = dev; /* Mark as being used by this device. */
np->rx_addr[entry] = pci_map_single(np->pci_dev,
skb->data,
- skb->len, PCI_DMA_FROMDEVICE);
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
np->rx_ring[entry].buffer1 = np->rx_addr[entry];
}
wmb();
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index a6dc53b..fdc2103 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -491,8 +491,6 @@
u8 tx_thresh, rx_thresh;
struct mii_if_info mii_if;
- struct work_struct tx_timeout_task;
- struct work_struct check_media_task;
void __iomem *base;
};
@@ -500,8 +498,6 @@
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
static void rhine_tx_timeout(struct net_device *dev);
-static void rhine_tx_timeout_task(struct net_device *dev);
-static void rhine_check_media_task(struct net_device *dev);
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static void rhine_tx(struct net_device *dev);
@@ -856,12 +852,6 @@
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
- INIT_WORK(&rp->tx_timeout_task,
- (void (*)(void *))rhine_tx_timeout_task, dev);
-
- INIT_WORK(&rp->check_media_task,
- (void (*)(void *))rhine_check_media_task, dev);
-
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
if (rc)
@@ -1108,11 +1098,6 @@
netif_carrier_ok(mii->dev));
}
-static void rhine_check_media_task(struct net_device *dev)
-{
- rhine_check_media(dev, 0);
-}
-
static void init_registers(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1166,8 +1151,8 @@
if (quirks & rqRhineI) {
iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
- /* Do not call from ISR! */
- msleep(1);
+ /* Can be called from ISR. Evil. */
+ mdelay(1);
/* 0x80 must be set immediately before turning it off */
iowrite8(0x80, ioaddr + MIICmd);
@@ -1257,16 +1242,6 @@
static void rhine_tx_timeout(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
-
- /*
- * Move bulk of work outside of interrupt context
- */
- schedule_work(&rp->tx_timeout_task);
-}
-
-static void rhine_tx_timeout_task(struct net_device *dev)
-{
- struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
@@ -1677,7 +1652,7 @@
spin_lock(&rp->lock);
if (intr_status & IntrLinkChange)
- schedule_work(&rp->check_media_task);
+ rhine_check_media(dev, 0);
if (intr_status & IntrStatsMax) {
rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
@@ -1927,9 +1902,6 @@
spin_unlock_irq(&rp->lock);
free_irq(rp->pdev->irq, dev);
-
- flush_scheduled_work();
-
free_rbufs(dev);
free_tbufs(dev);
free_ring(dev);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index e2982a8..7ed18ca 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3271,6 +3271,9 @@
bcm43xx_sysfs_register(bcm);
//FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though...
+ /*FIXME: This should be handled by softmac instead. */
+ schedule_work(&bcm->softmac->associnfo.work);
+
assert(err == 0);
out:
return err;
@@ -3946,9 +3949,6 @@
netif_device_attach(net_dev);
- /*FIXME: This should be handled by softmac instead. */
- schedule_work(&bcm->softmac->associnfo.work);
-
dprintk(KERN_INFO PFX "Device resumed.\n");
return 0;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 6917c6c..c2ecae5 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -33,13 +33,10 @@
acpi_status status;
struct acpi_object_list input;
union acpi_object in_params[4];
- struct acpi_buffer output;
- union acpi_object out_obj;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *out_obj;
u32 osc_dw0;
- /* Setting up output buffer */
- output.length = sizeof(out_obj) + 3*sizeof(u32);
- output.pointer = &out_obj;
/* Setting up input parameters */
input.count = 4;
@@ -61,12 +58,15 @@
"Evaluate _OSC Set fails. Status = 0x%04x\n", status);
return status;
}
- if (out_obj.type != ACPI_TYPE_BUFFER) {
+ out_obj = output.pointer;
+
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
printk(KERN_DEBUG
"Evaluate _OSC returns wrong type\n");
- return AE_TYPE;
+ status = AE_TYPE;
+ goto query_osc_out;
}
- osc_dw0 = *((u32 *) out_obj.buffer.pointer);
+ osc_dw0 = *((u32 *) out_obj->buffer.pointer);
if (osc_dw0) {
if (osc_dw0 & OSC_REQUEST_ERROR)
printk(KERN_DEBUG "_OSC request fails\n");
@@ -76,15 +76,21 @@
printk(KERN_DEBUG "_OSC invalid revision\n");
if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) {
/* Update Global Control Set */
- global_ctrlsets = *((u32 *)(out_obj.buffer.pointer+8));
- return AE_OK;
+ global_ctrlsets = *((u32 *)(out_obj->buffer.pointer+8));
+ status = AE_OK;
+ goto query_osc_out;
}
- return AE_ERROR;
+ status = AE_ERROR;
+ goto query_osc_out;
}
/* Update Global Control Set */
- global_ctrlsets = *((u32 *)(out_obj.buffer.pointer + 8));
- return AE_OK;
+ global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8));
+ status = AE_OK;
+
+query_osc_out:
+ kfree(output.pointer);
+ return status;
}
@@ -96,14 +102,10 @@
acpi_status status;
struct acpi_object_list input;
union acpi_object in_params[4];
- struct acpi_buffer output;
- union acpi_object out_obj;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *out_obj;
u32 osc_dw0;
- /* Setting up output buffer */
- output.length = sizeof(out_obj) + 3*sizeof(u32);
- output.pointer = &out_obj;
-
/* Setting up input parameters */
input.count = 4;
input.pointer = in_params;
@@ -124,12 +126,14 @@
"Evaluate _OSC Set fails. Status = 0x%04x\n", status);
return status;
}
- if (out_obj.type != ACPI_TYPE_BUFFER) {
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
printk(KERN_DEBUG
"Evaluate _OSC returns wrong type\n");
- return AE_TYPE;
+ status = AE_TYPE;
+ goto run_osc_out;
}
- osc_dw0 = *((u32 *) out_obj.buffer.pointer);
+ osc_dw0 = *((u32 *) out_obj->buffer.pointer);
if (osc_dw0) {
if (osc_dw0 & OSC_REQUEST_ERROR)
printk(KERN_DEBUG "_OSC request fails\n");
@@ -139,11 +143,17 @@
printk(KERN_DEBUG "_OSC invalid revision\n");
if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) {
printk(KERN_DEBUG "_OSC FW not grant req. control\n");
- return AE_SUPPORT;
+ status = AE_SUPPORT;
+ goto run_osc_out;
}
- return AE_ERROR;
+ status = AE_ERROR;
+ goto run_osc_out;
}
- return AE_OK;
+ status = AE_OK;
+
+run_osc_out:
+ kfree(output.pointer);
+ return status;
}
/**
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 19e2b17..d378478 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -634,6 +634,9 @@
* non-x86 architectures (yes Via exists on PPC among other places),
* we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
* interrupts delivered properly.
+ *
+ * Some of the on-chip devices are actually '586 devices' so they are
+ * listed here.
*/
static void quirk_via_irq(struct pci_dev *dev)
{
@@ -648,6 +651,10 @@
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
}
}
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_via_irq);
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, quirk_via_irq);
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irq);
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq);
@@ -895,6 +902,7 @@
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge);
+#ifndef CONFIG_ACPI_SLEEP
/*
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
* is not activated. The myth is that Asus said that they do not want the
@@ -906,8 +914,12 @@
* bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
* becomes necessary to do this tweak in two steps -- I've chosen the Host
* bridge as trigger.
+ *
+ * Actually, leaving it unhidden and not redoing the quirk over suspend2ram
+ * will cause thermal management to break down, and causing machine to
+ * overheat.
*/
-static int __initdata asus_hides_smbus = 0;
+static int __initdata asus_hides_smbus;
static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
{
@@ -1050,6 +1062,8 @@
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 );
+#endif
+
/*
* SiS 96x south bridge: BIOS typically hides SMBus device...
*/
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index c53db7c..738b1ef 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -426,7 +426,7 @@
if (!warning_printed) {
printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl "
- "usage.\n");
+ "usage from process: %s.\n", current->comm);
printk(KERN_INFO "pcmcia: This interface will soon be removed from "
"the kernel; please expect breakage unless you upgrade "
"to new tools.\n");
@@ -601,8 +601,12 @@
ret = CS_BAD_ARGS;
else {
struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function);
- ret = pccard_get_configuration_info(s, p_dev, &buf->config);
- pcmcia_put_dev(p_dev);
+ if (p_dev == NULL)
+ ret = CS_BAD_ARGS;
+ else {
+ ret = pccard_get_configuration_info(s, p_dev, &buf->config);
+ pcmcia_put_dev(p_dev);
+ }
}
break;
case DS_GET_FIRST_TUPLE:
@@ -632,8 +636,12 @@
ret = CS_BAD_ARGS;
else {
struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function);
- ret = pccard_get_status(s, p_dev, &buf->status);
- pcmcia_put_dev(p_dev);
+ if (p_dev == NULL)
+ ret = CS_BAD_ARGS;
+ else {
+ ret = pccard_get_status(s, p_dev, &buf->status);
+ pcmcia_put_dev(p_dev);
+ }
}
break;
case DS_VALIDATE_CIS:
@@ -665,9 +673,10 @@
if (!(buf->conf_reg.Function &&
(buf->conf_reg.Function >= s->functions))) {
struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function);
- if (p_dev)
+ if (p_dev) {
ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg);
- pcmcia_put_dev(p_dev);
+ pcmcia_put_dev(p_dev);
+ }
}
break;
case DS_GET_FIRST_REGION:
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 16d1ea7..247ab83 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -589,7 +589,7 @@
return 0;
}
-static u_int __init pd6729_isa_scan(void)
+static u_int __devinit pd6729_isa_scan(void)
{
u_int mask0, mask = 0;
int i;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 6c9ad92..2011567 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -141,13 +141,13 @@
/* try the driver's ioctl interface */
if (ops->ioctl) {
err = ops->ioctl(class_dev->dev, cmd, arg);
- if (err != -EINVAL)
+ if (err != -ENOIOCTLCMD)
return err;
}
/* if the driver does not provide the ioctl interface
* or if that particular ioctl was not implemented
- * (-EINVAL), we will try to emulate here.
+ * (-ENOIOCTLCMD), we will try to emulate here.
*/
switch (cmd) {
@@ -233,7 +233,7 @@
break;
default:
- err = -EINVAL;
+ err = -ENOTTY;
break;
}
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 2bc8aad..a997529 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -247,7 +247,7 @@
rtc_freq = arg;
return 0;
}
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index e1f7e8e..e1fa5fe 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -71,7 +71,7 @@
return 0;
default:
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
}
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 4d49fd5..277596c 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -270,7 +270,7 @@
epoch = arg;
break;
default:
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
return 0;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 5d6b7a5..e65da92 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1348,7 +1348,7 @@
index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
- channel->ccws;
if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) ||
- (irb->scsw.cstat | SCHN_STAT_PCI))
+ (irb->scsw.cstat & SCHN_STAT_PCI))
/* Bloody io subsystem tells us lies about cpa... */
index = (index - 1) & (LCS_NUM_BUFFS - 1);
while (channel->io_idx != index) {
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index bd14720..823dfa7 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -864,6 +864,9 @@
/**
* ata_port_queue_task - Queue port_task
* @ap: The ata_port to queue port_task for
+ * @fn: workqueue function to be scheduled
+ * @data: data value to pass to workqueue function
+ * @delay: delay time for workqueue function
*
* Schedule @fn(@data) for execution after @delay jiffies using
* port_task. There is one port_task per port and it's the
@@ -2739,6 +2742,8 @@
* ata_dev_init_params - Issue INIT DEV PARAMS command
* @ap: Port associated with device @dev
* @dev: Device to which command will be sent
+ * @heads: Number of heads (taskfile parameter)
+ * @sectors: Number of sectors (taskfile parameter)
*
* LOCKING:
* Kernel thread context (may sleep)
@@ -4302,6 +4307,7 @@
* ata_device_suspend - prepare a device for suspend
* @ap: port the device is connected to
* @dev: the device to suspend
+ * @state: target power management state
*
* Flush the cache on the drive, if appropriate, then issue a
* standbynow command.
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index d5fdcb9..9b8bca1 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
#include <asm/io.h>
#define DRV_NAME "sata_mv"
-#define DRV_VERSION "0.6"
+#define DRV_VERSION "0.7"
enum {
/* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -50,6 +50,12 @@
MV_PCI_REG_BASE = 0,
MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
+ MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
+ MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
+ MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
+ MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
+ MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
+
MV_SATAHC0_REG_BASE = 0x20000,
MV_FLASH_CTL = 0x1046c,
MV_GPIO_PORT_CTL = 0x104f0,
@@ -302,9 +308,6 @@
dma_addr_t crpb_dma;
struct mv_sg *sg_tbl;
dma_addr_t sg_tbl_dma;
-
- unsigned req_producer; /* cp of req_in_ptr */
- unsigned rsp_consumer; /* cp of rsp_out_ptr */
u32 pp_flags;
};
@@ -937,8 +940,6 @@
writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
- pp->req_producer = pp->rsp_consumer = 0;
-
/* Don't turn on EDMA here...do it before DMA commands only. Else
* we'll be unable to send non-data, PIO, etc due to restricted access
* to shadow regs.
@@ -1022,16 +1023,16 @@
}
}
-static inline unsigned mv_inc_q_index(unsigned *index)
+static inline unsigned mv_inc_q_index(unsigned index)
{
- *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
- return *index;
+ return (index + 1) & MV_MAX_Q_DEPTH_MASK;
}
static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
{
- *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
+ u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
(last ? CRQB_CMD_LAST : 0);
+ *cmdw = cpu_to_le16(tmp);
}
/**
@@ -1053,15 +1054,11 @@
u16 *cw;
struct ata_taskfile *tf;
u16 flags = 0;
+ unsigned in_index;
if (ATA_PROT_DMA != qc->tf.protocol)
return;
- /* the req producer index should be the same as we remember it */
- WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
- EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
- pp->req_producer);
-
/* Fill in command request block
*/
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1069,13 +1066,17 @@
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
- pp->crqb[pp->req_producer].sg_addr =
- cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
- pp->crqb[pp->req_producer].sg_addr_hi =
- cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
- pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
+ /* get current queue index from hardware */
+ in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
+ >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
- cw = &pp->crqb[pp->req_producer].ata_cmd[0];
+ pp->crqb[in_index].sg_addr =
+ cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
+ pp->crqb[in_index].sg_addr_hi =
+ cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
+ pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
+
+ cw = &pp->crqb[in_index].ata_cmd[0];
tf = &qc->tf;
/* Sadly, the CRQB cannot accomodate all registers--there are
@@ -1144,16 +1145,12 @@
struct mv_port_priv *pp = ap->private_data;
struct mv_crqb_iie *crqb;
struct ata_taskfile *tf;
+ unsigned in_index;
u32 flags = 0;
if (ATA_PROT_DMA != qc->tf.protocol)
return;
- /* the req producer index should be the same as we remember it */
- WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
- EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
- pp->req_producer);
-
/* Fill in Gen IIE command request block
*/
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1162,7 +1159,11 @@
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
- crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
+ /* get current queue index from hardware */
+ in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
+ >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+
+ crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
crqb->flags = cpu_to_le32(flags);
@@ -1210,6 +1211,7 @@
{
void __iomem *port_mmio = mv_ap_base(qc->ap);
struct mv_port_priv *pp = qc->ap->private_data;
+ unsigned in_index;
u32 in_ptr;
if (ATA_PROT_DMA != qc->tf.protocol) {
@@ -1221,23 +1223,20 @@
return ata_qc_issue_prot(qc);
}
- in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+ in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+ in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
- /* the req producer index should be the same as we remember it */
- WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
- pp->req_producer);
/* until we do queuing, the queue should be empty at this point */
- WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
- ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
- EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
+ WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
+ >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
- mv_inc_q_index(&pp->req_producer); /* now incr producer index */
+ in_index = mv_inc_q_index(in_index); /* now incr producer index */
mv_start_dma(port_mmio, pp);
/* and write the request in pointer to kick the EDMA to life */
in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
- in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
+ in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
return 0;
@@ -1260,28 +1259,26 @@
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
+ unsigned out_index;
u32 out_ptr;
u8 ata_status;
- out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+ out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+ out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
- /* the response consumer index should be the same as we remember it */
- WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
- pp->rsp_consumer);
-
- ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT;
+ ata_status = le16_to_cpu(pp->crpb[out_index].flags)
+ >> CRPB_FLAG_STATUS_SHIFT;
/* increment our consumer index... */
- pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
+ out_index = mv_inc_q_index(out_index);
/* and, until we do NCQ, there should only be 1 CRPB waiting */
- WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
- EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
- pp->rsp_consumer);
+ WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
+ >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
/* write out our inc'd consumer index so EDMA knows we're caught up */
out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
- out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
+ out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
/* Return ATA status register for completed CRPB */
@@ -1291,6 +1288,7 @@
/**
* mv_err_intr - Handle error interrupts on the port
* @ap: ATA channel to manipulate
+ * @reset_allowed: bool: 0 == don't trigger from reset here
*
* In most cases, just clear the interrupt and move on. However,
* some cases require an eDMA reset, which is done right before
@@ -1301,7 +1299,7 @@
* LOCKING:
* Inherited from caller.
*/
-static void mv_err_intr(struct ata_port *ap)
+static void mv_err_intr(struct ata_port *ap, int reset_allowed)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 edma_err_cause, serr = 0;
@@ -1323,9 +1321,8 @@
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
/* check for fatal here and recover if needed */
- if (EDMA_ERR_FATAL & edma_err_cause) {
+ if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
mv_stop_and_reset(ap);
- }
}
/**
@@ -1374,12 +1371,12 @@
struct ata_port *ap = host_set->ports[port];
struct mv_port_priv *pp = ap->private_data;
- hard_port = port & MV_PORT_MASK; /* range 0-3 */
+ hard_port = mv_hardport_from_port(port); /* range 0..3 */
handled = 0; /* ensure ata_status is set if handled++ */
/* Note that DEV_IRQ might happen spuriously during EDMA,
- * and should be ignored in such cases. We could mask it,
- * but it's pretty rare and may not be worth the overhead.
+ * and should be ignored in such cases.
+ * The cause of this is still under investigation.
*/
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
/* EDMA: check for response queue interrupt */
@@ -1393,6 +1390,11 @@
ata_status = readb((void __iomem *)
ap->ioaddr.status_addr);
handled = 1;
+ /* ignore spurious intr if drive still BUSY */
+ if (ata_status & ATA_BUSY) {
+ ata_status = 0;
+ handled = 0;
+ }
}
}
@@ -1406,7 +1408,7 @@
shift++; /* skip bit 8 in the HC Main IRQ reg */
}
if ((PORT0_ERR << shift) & relevant) {
- mv_err_intr(ap);
+ mv_err_intr(ap, 1);
err_mask |= AC_ERR_OTHER;
handled = 1;
}
@@ -1448,6 +1450,7 @@
struct ata_host_set *host_set = dev_instance;
unsigned int hc, handled = 0, n_hcs;
void __iomem *mmio = host_set->mmio_base;
+ struct mv_host_priv *hpriv;
u32 irq_stat;
irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
@@ -1469,6 +1472,17 @@
handled++;
}
}
+
+ hpriv = host_set->private_data;
+ if (IS_60XX(hpriv)) {
+ /* deal with the interrupt coalescing bits */
+ if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
+ writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
+ writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
+ writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
+ }
+ }
+
if (PCI_ERR & irq_stat) {
printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
readl(mmio + PCI_IRQ_CAUSE_OFS));
@@ -1867,7 +1881,8 @@
if (IS_60XX(hpriv)) {
u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
- ifctl |= (1 << 12) | (1 << 7);
+ ifctl |= (1 << 7); /* enable gen2i speed */
+ ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
}
@@ -2031,11 +2046,14 @@
ap->host_set->mmio_base, ap, qc, qc->scsicmd,
&qc->scsicmd->cmnd);
- mv_err_intr(ap);
+ mv_err_intr(ap, 0);
mv_stop_and_reset(ap);
- qc->err_mask |= AC_ERR_TIMEOUT;
- ata_eh_qc_complete(qc);
+ WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ if (qc->flags & ATA_QCFLAG_ACTIVE) {
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ ata_eh_qc_complete(qc);
+ }
}
/**
@@ -2229,7 +2247,8 @@
void __iomem *port_mmio = mv_port_base(mmio, port);
u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
- ifctl |= (1 << 12);
+ ifctl |= (1 << 7); /* enable gen2i speed */
+ ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
}
@@ -2330,6 +2349,7 @@
if (rc) {
return rc;
}
+ pci_set_master(pdev);
rc = pci_request_regions(pdev, DRV_NAME);
if (rc) {
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index d40e7c8..56cb490 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4054,7 +4054,7 @@
}
sdev_printk(KERN_WARNING, SDp,
- "Attached scsi tape %s", tape_name(tpnt));
+ "Attached scsi tape %s\n", tape_name(tpnt));
printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n",
tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
queue_dma_alignment(SDp->request_queue) + 1);
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index aeb8153..17839e7 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1907,9 +1907,12 @@
static void uart_change_pm(struct uart_state *state, int pm_state)
{
struct uart_port *port = state->port;
- if (port->ops->pm)
- port->ops->pm(port, pm_state, state->pm_state);
- state->pm_state = pm_state;
+
+ if (state->pm_state != pm_state) {
+ if (port->ops->pm)
+ port->ops->pm(port, pm_state, state->pm_state);
+ state->pm_state = pm_state;
+ }
}
int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 7a75fae..23334c8 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -75,11 +75,45 @@
inexpensive battery powered microcontroller evaluation board.
This same cable can be used to flash new firmware.
+config SPI_MPC83xx
+ tristate "Freescale MPC83xx SPI controller"
+ depends on SPI_MASTER && PPC_83xx && EXPERIMENTAL
+ select SPI_BITBANG
+ help
+ This enables using the Freescale MPC83xx SPI controller in master
+ mode.
+
+ Note, this driver uniquely supports the SPI controller on the MPC83xx
+ family of PowerPC processors. The MPC83xx uses a simple set of shift
+ registers for data (opposed to the CPM based descriptor model).
+
+config SPI_PXA2XX
+ tristate "PXA2xx SSP SPI master"
+ depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL
+ help
+ This enables using a PXA2xx SSP port as a SPI master controller.
+ The driver can be configured to use any SSP port and additional
+ documentation can be found a Documentation/spi/pxa2xx.
+
+config SPI_S3C24XX_GPIO
+ tristate "Samsung S3C24XX series SPI by GPIO"
+ depends on SPI_MASTER && ARCH_S3C2410 && SPI_BITBANG && EXPERIMENTAL
+ help
+ SPI driver for Samsung S3C24XX series ARM SoCs using
+ GPIO lines to provide the SPI bus. This can be used where
+ the inbuilt hardware cannot provide the transfer mode, or
+ where the board is using non hardware connected pins.
#
# Add new SPI master controllers in alphabetical order above this line
#
+config SPI_S3C24XX
+ tristate "Samsung S3C24XX series SPI"
+ depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
+ help
+ SPI driver for Samsung S3C24XX series ARM SoCs
+
#
# There are lots of SPI device types, with sensors and memory
# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c2c87e8..8f4cb67 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -13,6 +13,10 @@
# SPI master controller drivers (bus)
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
+obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
+obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o
+obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
+obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
new file mode 100644
index 0000000..29aec77
--- /dev/null
+++ b/drivers/spi/pxa2xx_spi.c
@@ -0,0 +1,1486 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/delay.h>
+#include <asm/dma.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/pxa2xx_spi.h>
+
+MODULE_AUTHOR("Stephen Street");
+MODULE_DESCRIPTION("PXA2xx SSP SPI Contoller");
+MODULE_LICENSE("GPL");
+
+#define MAX_BUSES 3
+
+#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
+#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
+#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
+static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+
+#define START_STATE ((void*)0)
+#define RUNNING_STATE ((void*)1)
+#define DONE_STATE ((void*)2)
+#define ERROR_STATE ((void*)-1)
+
+#define QUEUE_RUNNING 0
+#define QUEUE_STOPPED 1
+
+struct driver_data {
+ /* Driver model hookup */
+ struct platform_device *pdev;
+
+ /* SPI framework hookup */
+ enum pxa_ssp_type ssp_type;
+ struct spi_master *master;
+
+ /* PXA hookup */
+ struct pxa2xx_spi_master *master_info;
+
+ /* DMA setup stuff */
+ int rx_channel;
+ int tx_channel;
+ u32 *null_dma_buf;
+
+ /* SSP register addresses */
+ void *ioaddr;
+ u32 ssdr_physical;
+
+ /* SSP masks*/
+ u32 dma_cr1;
+ u32 int_cr1;
+ u32 clear_sr;
+ u32 mask_sr;
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ int run;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ struct spi_message* cur_msg;
+ struct spi_transfer* cur_transfer;
+ struct chip_data *cur_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ int dma_mapped;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+ size_t rx_map_len;
+ size_t tx_map_len;
+ u8 n_bytes;
+ u32 dma_width;
+ int cs_change;
+ void (*write)(struct driver_data *drv_data);
+ void (*read)(struct driver_data *drv_data);
+ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
+ void (*cs_control)(u32 command);
+};
+
+struct chip_data {
+ u32 cr0;
+ u32 cr1;
+ u32 to;
+ u32 psp;
+ u32 timeout;
+ u8 n_bytes;
+ u32 dma_width;
+ u32 dma_burst_size;
+ u32 threshold;
+ u32 dma_threshold;
+ u8 enable_dma;
+ u8 bits_per_word;
+ u32 speed_hz;
+ void (*write)(struct driver_data *drv_data);
+ void (*read)(struct driver_data *drv_data);
+ void (*cs_control)(u32 command);
+};
+
+static void pump_messages(void *data);
+
+static int flush(struct driver_data *drv_data)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ void *reg = drv_data->ioaddr;
+
+ do {
+ while (read_SSSR(reg) & SSSR_RNE) {
+ read_SSDR(reg);
+ }
+ } while ((read_SSSR(reg) & SSSR_BSY) && limit--);
+ write_SSSR(SSSR_ROR, reg);
+
+ return limit;
+}
+
+static void restore_state(struct driver_data *drv_data)
+{
+ void *reg = drv_data->ioaddr;
+
+ /* Clear status and disable clock */
+ write_SSSR(drv_data->clear_sr, reg);
+ write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg);
+
+ /* Load the registers */
+ write_SSCR1(drv_data->cur_chip->cr1, reg);
+ write_SSCR0(drv_data->cur_chip->cr0, reg);
+ if (drv_data->ssp_type != PXA25x_SSP) {
+ write_SSTO(0, reg);
+ write_SSPSP(drv_data->cur_chip->psp, reg);
+ }
+}
+
+static void null_cs_control(u32 command)
+{
+}
+
+static void null_writer(struct driver_data *drv_data)
+{
+ void *reg = drv_data->ioaddr;
+ u8 n_bytes = drv_data->n_bytes;
+
+ while ((read_SSSR(reg) & SSSR_TNF)
+ && (drv_data->tx < drv_data->tx_end)) {
+ write_SSDR(0, reg);
+ drv_data->tx += n_bytes;
+ }
+}
+
+static void null_reader(struct driver_data *drv_data)
+{
+ void *reg = drv_data->ioaddr;
+ u8 n_bytes = drv_data->n_bytes;
+
+ while ((read_SSSR(reg) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ read_SSDR(reg);
+ drv_data->rx += n_bytes;
+ }
+}
+
+static void u8_writer(struct driver_data *drv_data)
+{
+ void *reg = drv_data->ioaddr;
+
+ while ((read_SSSR(reg) & SSSR_TNF)
+ && (drv_data->tx < drv_data->tx_end)) {
+ write_SSDR(*(u8 *)(drv_data->tx), reg);
+ ++drv_data->tx;
+ }
+}
+
+static void u8_reader(struct driver_data *drv_data)
+{
+ void *reg = drv_data->ioaddr;
+
+ while ((read_SSSR(reg) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ *(u8 *)(drv_data->rx) = read_SSDR(reg);
+ ++drv_data->rx;
+ }
+}
+
+static void u16_writer(struct driver_data *drv_data)
+{
+ void *reg = drv_data->ioaddr;
+
+ while ((read_SSSR(reg) & SSSR_TNF)
+ && (drv_data->tx < drv_data->tx_end)) {
+ write_SSDR(*(u16 *)(drv_data->tx), reg);
+ drv_data->tx += 2;
+ }
+}
+
+static void u16_reader(struct driver_data *drv_data)
+{
+ void *reg = drv_data->ioaddr;
+
+ while ((read_SSSR(reg) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ *(u16 *)(drv_data->rx) = read_SSDR(reg);
+ drv_data->rx += 2;
+ }
+}
+static void u32_writer(struct driver_data *drv_data)
+{
+ void *reg = drv_data->ioaddr;
+
+ while ((read_SSSR(reg) & SSSR_TNF)
+ && (drv_data->tx < drv_data->tx_end)) {
+ write_SSDR(*(u32 *)(drv_data->tx), reg);
+ drv_data->tx += 4;
+ }
+}
+
+static void u32_reader(struct driver_data *drv_data)
+{
+ void *reg = drv_data->ioaddr;
+
+ while ((read_SSSR(reg) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ *(u32 *)(drv_data->rx) = read_SSDR(reg);
+ drv_data->rx += 4;
+ }
+}
+
+static void *next_transfer(struct driver_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct spi_transfer *trans = drv_data->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ drv_data->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer,
+ transfer_list);
+ return RUNNING_STATE;
+ } else
+ return DONE_STATE;
+}
+
+static int map_dma_buffers(struct driver_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct device *dev = &msg->spi->dev;
+
+ if (!drv_data->cur_chip->enable_dma)
+ return 0;
+
+ if (msg->is_dma_mapped)
+ return drv_data->rx_dma && drv_data->tx_dma;
+
+ if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
+ return 0;
+
+ /* Modify setup if rx buffer is null */
+ if (drv_data->rx == NULL) {
+ *drv_data->null_dma_buf = 0;
+ drv_data->rx = drv_data->null_dma_buf;
+ drv_data->rx_map_len = 4;
+ } else
+ drv_data->rx_map_len = drv_data->len;
+
+
+ /* Modify setup if tx buffer is null */
+ if (drv_data->tx == NULL) {
+ *drv_data->null_dma_buf = 0;
+ drv_data->tx = drv_data->null_dma_buf;
+ drv_data->tx_map_len = 4;
+ } else
+ drv_data->tx_map_len = drv_data->len;
+
+ /* Stream map the rx buffer */
+ drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
+ drv_data->rx_map_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(drv_data->rx_dma))
+ return 0;
+
+ /* Stream map the tx buffer */
+ drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
+ drv_data->tx_map_len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(drv_data->tx_dma)) {
+ dma_unmap_single(dev, drv_data->rx_dma,
+ drv_data->rx_map_len, DMA_FROM_DEVICE);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void unmap_dma_buffers(struct driver_data *drv_data)
+{
+ struct device *dev;
+
+ if (!drv_data->dma_mapped)
+ return;
+
+ if (!drv_data->cur_msg->is_dma_mapped) {
+ dev = &drv_data->cur_msg->spi->dev;
+ dma_unmap_single(dev, drv_data->rx_dma,
+ drv_data->rx_map_len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, drv_data->tx_dma,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ }
+
+ drv_data->dma_mapped = 0;
+}
+
+/* caller already set message->status; dma and pio irqs are blocked */
+static void giveback(struct driver_data *drv_data)
+{
+ struct spi_transfer* last_transfer;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+ msg = drv_data->cur_msg;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ last_transfer = list_entry(msg->transfers.prev,
+ struct spi_transfer,
+ transfer_list);
+
+ if (!last_transfer->cs_change)
+ drv_data->cs_control(PXA2XX_CS_DEASSERT);
+
+ msg->state = NULL;
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static int wait_ssp_rx_stall(void *ioaddr)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--)
+ cpu_relax();
+
+ return limit;
+}
+
+static int wait_dma_channel_stop(int channel)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--)
+ cpu_relax();
+
+ return limit;
+}
+
+static void dma_handler(int channel, void *data, struct pt_regs *regs)
+{
+ struct driver_data *drv_data = data;
+ struct spi_message *msg = drv_data->cur_msg;
+ void *reg = drv_data->ioaddr;
+ u32 irq_status = DCSR(channel) & DMA_INT_MASK;
+ u32 trailing_sssr = 0;
+
+ if (irq_status & DCSR_BUSERR) {
+
+ /* Disable interrupts, clear status and reset DMA */
+ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ if (drv_data->ssp_type != PXA25x_SSP)
+ write_SSTO(0, reg);
+ write_SSSR(drv_data->clear_sr, reg);
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+
+ if (flush(drv_data) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: flush fail\n");
+
+ unmap_dma_buffers(drv_data);
+
+ if (channel == drv_data->tx_channel)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: bad bus address on "
+ "tx channel %d, source %x target = %x\n",
+ channel, DSADR(channel), DTADR(channel));
+ else
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: bad bus address on "
+ "rx channel %d, source %x target = %x\n",
+ channel, DSADR(channel), DTADR(channel));
+
+ msg->state = ERROR_STATE;
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+
+ /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
+ if ((drv_data->ssp_type == PXA25x_SSP)
+ && (channel == drv_data->tx_channel)
+ && (irq_status & DCSR_ENDINTR)) {
+
+ /* Wait for rx to stall */
+ if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: ssp rx stall failed\n");
+
+ /* Clear and disable interrupts on SSP and DMA channels*/
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ write_SSSR(drv_data->clear_sr, reg);
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: dma rx channel stop failed\n");
+
+ unmap_dma_buffers(drv_data);
+
+ /* Read trailing bytes */
+ /* Calculate number of trailing bytes, read them */
+ trailing_sssr = read_SSSR(reg);
+ if ((trailing_sssr & 0xf008) != 0xf000) {
+ drv_data->rx = drv_data->rx_end -
+ (((trailing_sssr >> 12) & 0x0f) + 1);
+ drv_data->read(drv_data);
+ }
+ msg->actual_length += drv_data->len;
+
+ /* Release chip select if requested, transfer delays are
+ * handled in pump_transfers */
+ if (drv_data->cs_change)
+ drv_data->cs_control(PXA2XX_CS_DEASSERT);
+
+ /* Move to next transfer */
+ msg->state = next_transfer(drv_data);
+
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+}
+
+static irqreturn_t dma_transfer(struct driver_data *drv_data)
+{
+ u32 irq_status;
+ u32 trailing_sssr = 0;
+ struct spi_message *msg = drv_data->cur_msg;
+ void *reg = drv_data->ioaddr;
+
+ irq_status = read_SSSR(reg) & drv_data->mask_sr;
+ if (irq_status & SSSR_ROR) {
+ /* Clear and disable interrupts on SSP and DMA channels*/
+ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ if (drv_data->ssp_type != PXA25x_SSP)
+ write_SSTO(0, reg);
+ write_SSSR(drv_data->clear_sr, reg);
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ unmap_dma_buffers(drv_data);
+
+ if (flush(drv_data) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_transfer: flush fail\n");
+
+ dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n");
+
+ drv_data->cur_msg->state = ERROR_STATE;
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ return IRQ_HANDLED;
+ }
+
+ /* Check for false positive timeout */
+ if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) {
+ write_SSSR(SSSR_TINT, reg);
+ return IRQ_HANDLED;
+ }
+
+ if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
+
+ /* Clear and disable interrupts on SSP and DMA channels*/
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ if (drv_data->ssp_type != PXA25x_SSP)
+ write_SSTO(0, reg);
+ write_SSSR(drv_data->clear_sr, reg);
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+
+ if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_transfer: dma rx channel stop failed\n");
+
+ if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_transfer: ssp rx stall failed\n");
+
+ unmap_dma_buffers(drv_data);
+
+ /* Calculate number of trailing bytes, read them */
+ trailing_sssr = read_SSSR(reg);
+ if ((trailing_sssr & 0xf008) != 0xf000) {
+ drv_data->rx = drv_data->rx_end -
+ (((trailing_sssr >> 12) & 0x0f) + 1);
+ drv_data->read(drv_data);
+ }
+ msg->actual_length += drv_data->len;
+
+ /* Release chip select if requested, transfer delays are
+ * handled in pump_transfers */
+ if (drv_data->cs_change)
+ drv_data->cs_control(PXA2XX_CS_DEASSERT);
+
+ /* Move to next transfer */
+ msg->state = next_transfer(drv_data);
+
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ return IRQ_HANDLED;
+ }
+
+ /* Opps problem detected */
+ return IRQ_NONE;
+}
+
+static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ void *reg = drv_data->ioaddr;
+ unsigned long limit = loops_per_jiffy << 1;
+ u32 irq_status;
+ u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
+ drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
+
+ while ((irq_status = read_SSSR(reg) & irq_mask)) {
+
+ if (irq_status & SSSR_ROR) {
+
+ /* Clear and disable interrupts */
+ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
+ if (drv_data->ssp_type != PXA25x_SSP)
+ write_SSTO(0, reg);
+ write_SSSR(drv_data->clear_sr, reg);
+
+ if (flush(drv_data) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "interrupt_transfer: flush fail\n");
+
+ /* Stop the SSP */
+
+ dev_warn(&drv_data->pdev->dev,
+ "interrupt_transfer: fifo overun\n");
+
+ msg->state = ERROR_STATE;
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ return IRQ_HANDLED;
+ }
+
+ /* Look for false positive timeout */
+ if ((irq_status & SSSR_TINT)
+ && (drv_data->rx < drv_data->rx_end))
+ write_SSSR(SSSR_TINT, reg);
+
+ /* Pump data */
+ drv_data->read(drv_data);
+ drv_data->write(drv_data);
+
+ if (drv_data->tx == drv_data->tx_end) {
+ /* Disable tx interrupt */
+ write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
+ irq_mask = drv_data->mask_sr & ~SSSR_TFS;
+
+ /* PXA25x_SSP has no timeout, read trailing bytes */
+ if (drv_data->ssp_type == PXA25x_SSP) {
+ while ((read_SSSR(reg) & SSSR_BSY) && limit--)
+ drv_data->read(drv_data);
+
+ if (limit == 0)
+ dev_err(&drv_data->pdev->dev,
+ "interrupt_transfer: "
+ "trailing byte read failed\n");
+ }
+ }
+
+ if ((irq_status & SSSR_TINT)
+ || (drv_data->rx == drv_data->rx_end)) {
+
+ /* Clear timeout */
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
+ if (drv_data->ssp_type != PXA25x_SSP)
+ write_SSTO(0, reg);
+ write_SSSR(drv_data->clear_sr, reg);
+
+ /* Update total byte transfered */
+ msg->actual_length += drv_data->len;
+
+ /* Release chip select if requested, transfer delays are
+ * handled in pump_transfers */
+ if (drv_data->cs_change)
+ drv_data->cs_control(PXA2XX_CS_DEASSERT);
+
+ /* Move to next transfer */
+ msg->state = next_transfer(drv_data);
+
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+ }
+
+ /* We did something */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ssp_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct driver_data *drv_data = (struct driver_data *)dev_id;
+ void *reg = drv_data->ioaddr;
+
+ if (!drv_data->cur_msg) {
+
+ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
+ if (drv_data->ssp_type != PXA25x_SSP)
+ write_SSTO(0, reg);
+ write_SSSR(drv_data->clear_sr, reg);
+
+ dev_err(&drv_data->pdev->dev, "bad message state "
+ "in interrupt handler");
+
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ return drv_data->transfer_handler(drv_data);
+}
+
+static void pump_transfers(unsigned long data)
+{
+ struct driver_data *drv_data = (struct driver_data *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct chip_data *chip = NULL;
+ void *reg = drv_data->ioaddr;
+ u32 clk_div = 0;
+ u8 bits = 0;
+ u32 speed = 0;
+ u32 cr0;
+
+ /* Get current state information */
+ message = drv_data->cur_msg;
+ transfer = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ /* Handle for abort */
+ if (message->state == ERROR_STATE) {
+ message->status = -EIO;
+ giveback(drv_data);
+ return;
+ }
+
+ /* Handle end of message */
+ if (message->state == DONE_STATE) {
+ message->status = 0;
+ giveback(drv_data);
+ return;
+ }
+
+ /* Delay if requested at end of transfer*/
+ if (message->state == RUNNING_STATE) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ }
+
+ /* Setup the transfer state based on the type of transfer */
+ if (flush(drv_data) == 0) {
+ dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
+ message->status = -EIO;
+ giveback(drv_data);
+ return;
+ }
+ drv_data->n_bytes = chip->n_bytes;
+ drv_data->dma_width = chip->dma_width;
+ drv_data->cs_control = chip->cs_control;
+ drv_data->tx = (void *)transfer->tx_buf;
+ drv_data->tx_end = drv_data->tx + transfer->len;
+ drv_data->rx = transfer->rx_buf;
+ drv_data->rx_end = drv_data->rx + transfer->len;
+ drv_data->rx_dma = transfer->rx_dma;
+ drv_data->tx_dma = transfer->tx_dma;
+ drv_data->len = transfer->len;
+ drv_data->write = drv_data->tx ? chip->write : null_writer;
+ drv_data->read = drv_data->rx ? chip->read : null_reader;
+ drv_data->cs_change = transfer->cs_change;
+
+ /* Change speed and bit per word on a per transfer */
+ if (transfer->speed_hz || transfer->bits_per_word) {
+
+ /* Disable clock */
+ write_SSCR0(chip->cr0 & ~SSCR0_SSE, reg);
+ cr0 = chip->cr0;
+ bits = chip->bits_per_word;
+ speed = chip->speed_hz;
+
+ if (transfer->speed_hz)
+ speed = transfer->speed_hz;
+
+ if (transfer->bits_per_word)
+ bits = transfer->bits_per_word;
+
+ if (reg == SSP1_VIRT)
+ clk_div = SSP1_SerClkDiv(speed);
+ else if (reg == SSP2_VIRT)
+ clk_div = SSP2_SerClkDiv(speed);
+ else if (reg == SSP3_VIRT)
+ clk_div = SSP3_SerClkDiv(speed);
+
+ if (bits <= 8) {
+ drv_data->n_bytes = 1;
+ drv_data->dma_width = DCMD_WIDTH1;
+ drv_data->read = drv_data->read != null_reader ?
+ u8_reader : null_reader;
+ drv_data->write = drv_data->write != null_writer ?
+ u8_writer : null_writer;
+ } else if (bits <= 16) {
+ drv_data->n_bytes = 2;
+ drv_data->dma_width = DCMD_WIDTH2;
+ drv_data->read = drv_data->read != null_reader ?
+ u16_reader : null_reader;
+ drv_data->write = drv_data->write != null_writer ?
+ u16_writer : null_writer;
+ } else if (bits <= 32) {
+ drv_data->n_bytes = 4;
+ drv_data->dma_width = DCMD_WIDTH4;
+ drv_data->read = drv_data->read != null_reader ?
+ u32_reader : null_reader;
+ drv_data->write = drv_data->write != null_writer ?
+ u32_writer : null_writer;
+ }
+
+ cr0 = clk_div
+ | SSCR0_Motorola
+ | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
+ | SSCR0_SSE
+ | (bits > 16 ? SSCR0_EDSS : 0);
+
+ /* Start it back up */
+ write_SSCR0(cr0, reg);
+ }
+
+ message->state = RUNNING_STATE;
+
+ /* Try to map dma buffer and do a dma transfer if successful */
+ if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) {
+
+ /* Ensure we have the correct interrupt handler */
+ drv_data->transfer_handler = dma_transfer;
+
+ /* Setup rx DMA Channel */
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
+ DTADR(drv_data->rx_channel) = drv_data->rx_dma;
+ if (drv_data->rx == drv_data->null_dma_buf)
+ /* No target address increment */
+ DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
+ | drv_data->dma_width
+ | chip->dma_burst_size
+ | drv_data->len;
+ else
+ DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
+ | DCMD_FLOWSRC
+ | drv_data->dma_width
+ | chip->dma_burst_size
+ | drv_data->len;
+
+ /* Setup tx DMA Channel */
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DSADR(drv_data->tx_channel) = drv_data->tx_dma;
+ DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
+ if (drv_data->tx == drv_data->null_dma_buf)
+ /* No source address increment */
+ DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
+ | drv_data->dma_width
+ | chip->dma_burst_size
+ | drv_data->len;
+ else
+ DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
+ | DCMD_FLOWTRG
+ | drv_data->dma_width
+ | chip->dma_burst_size
+ | drv_data->len;
+
+ /* Enable dma end irqs on SSP to detect end of transfer */
+ if (drv_data->ssp_type == PXA25x_SSP)
+ DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
+
+ /* Fix me, need to handle cs polarity */
+ drv_data->cs_control(PXA2XX_CS_ASSERT);
+
+ /* Go baby, go */
+ write_SSSR(drv_data->clear_sr, reg);
+ DCSR(drv_data->rx_channel) |= DCSR_RUN;
+ DCSR(drv_data->tx_channel) |= DCSR_RUN;
+ if (drv_data->ssp_type != PXA25x_SSP)
+ write_SSTO(chip->timeout, reg);
+ write_SSCR1(chip->cr1
+ | chip->dma_threshold
+ | drv_data->dma_cr1,
+ reg);
+ } else {
+ /* Ensure we have the correct interrupt handler */
+ drv_data->transfer_handler = interrupt_transfer;
+
+ /* Fix me, need to handle cs polarity */
+ drv_data->cs_control(PXA2XX_CS_ASSERT);
+
+ /* Go baby, go */
+ write_SSSR(drv_data->clear_sr, reg);
+ if (drv_data->ssp_type != PXA25x_SSP)
+ write_SSTO(chip->timeout, reg);
+ write_SSCR1(chip->cr1
+ | chip->threshold
+ | drv_data->int_cr1,
+ reg);
+ }
+}
+
+static void pump_messages(void *data)
+{
+ struct driver_data *drv_data = data;
+ unsigned long flags;
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&drv_data->lock, flags);
+ if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
+ drv_data->busy = 0;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (drv_data->cur_msg) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ drv_data->cur_msg = list_entry(drv_data->queue.next,
+ struct spi_message, queue);
+ list_del_init(&drv_data->cur_msg->queue);
+
+ /* Initial message state*/
+ drv_data->cur_msg->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer,
+ transfer_list);
+
+ /* Setup the SSP using the per chip configuration */
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+ restore_state(drv_data);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ drv_data->busy = 1;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+}
+
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (drv_data->run == QUEUE_STOPPED) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ list_add_tail(&msg->queue, &drv_data->queue);
+
+ if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return 0;
+}
+
+static int setup(struct spi_device *spi)
+{
+ struct pxa2xx_spi_chip *chip_info = NULL;
+ struct chip_data *chip;
+ struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+ unsigned int clk_div;
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ if (drv_data->ssp_type != PXA25x_SSP
+ && (spi->bits_per_word < 4 || spi->bits_per_word > 32))
+ return -EINVAL;
+ else if (spi->bits_per_word < 4 || spi->bits_per_word > 16)
+ return -EINVAL;
+
+ /* Only alloc (or use chip_info) on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->cs_control = null_cs_control;
+ chip->enable_dma = 0;
+ chip->timeout = SSP_TIMEOUT(1000);
+ chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1);
+ chip->dma_burst_size = drv_data->master_info->enable_dma ?
+ DCMD_BURST8 : 0;
+
+ chip_info = spi->controller_data;
+ }
+
+ /* chip_info isn't always needed */
+ if (chip_info) {
+ if (chip_info->cs_control)
+ chip->cs_control = chip_info->cs_control;
+
+ chip->timeout = SSP_TIMEOUT(chip_info->timeout_microsecs);
+
+ chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold)
+ | SSCR1_TxTresh(chip_info->tx_threshold);
+
+ chip->enable_dma = chip_info->dma_burst_size != 0
+ && drv_data->master_info->enable_dma;
+ chip->dma_threshold = 0;
+
+ if (chip->enable_dma) {
+ if (chip_info->dma_burst_size <= 8) {
+ chip->dma_threshold = SSCR1_RxTresh(8)
+ | SSCR1_TxTresh(8);
+ chip->dma_burst_size = DCMD_BURST8;
+ } else if (chip_info->dma_burst_size <= 16) {
+ chip->dma_threshold = SSCR1_RxTresh(16)
+ | SSCR1_TxTresh(16);
+ chip->dma_burst_size = DCMD_BURST16;
+ } else {
+ chip->dma_threshold = SSCR1_RxTresh(32)
+ | SSCR1_TxTresh(32);
+ chip->dma_burst_size = DCMD_BURST32;
+ }
+ }
+
+
+ if (chip_info->enable_loopback)
+ chip->cr1 = SSCR1_LBM;
+ }
+
+ if (drv_data->ioaddr == SSP1_VIRT)
+ clk_div = SSP1_SerClkDiv(spi->max_speed_hz);
+ else if (drv_data->ioaddr == SSP2_VIRT)
+ clk_div = SSP2_SerClkDiv(spi->max_speed_hz);
+ else if (drv_data->ioaddr == SSP3_VIRT)
+ clk_div = SSP3_SerClkDiv(spi->max_speed_hz);
+ else
+ return -ENODEV;
+ chip->speed_hz = spi->max_speed_hz;
+
+ chip->cr0 = clk_div
+ | SSCR0_Motorola
+ | SSCR0_DataSize(spi->bits_per_word > 16 ?
+ spi->bits_per_word - 16 : spi->bits_per_word)
+ | SSCR0_SSE
+ | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+ chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) << 4)
+ | (((spi->mode & SPI_CPOL) != 0) << 3);
+
+ /* NOTE: PXA25x_SSP _could_ use external clocking ... */
+ if (drv_data->ssp_type != PXA25x_SSP)
+ dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
+ spi->bits_per_word,
+ (CLOCK_SPEED_HZ)
+ / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
+ spi->mode & 0x3);
+ else
+ dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
+ spi->bits_per_word,
+ (CLOCK_SPEED_HZ/2)
+ / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
+ spi->mode & 0x3);
+
+ if (spi->bits_per_word <= 8) {
+ chip->n_bytes = 1;
+ chip->dma_width = DCMD_WIDTH1;
+ chip->read = u8_reader;
+ chip->write = u8_writer;
+ } else if (spi->bits_per_word <= 16) {
+ chip->n_bytes = 2;
+ chip->dma_width = DCMD_WIDTH2;
+ chip->read = u16_reader;
+ chip->write = u16_writer;
+ } else if (spi->bits_per_word <= 32) {
+ chip->cr0 |= SSCR0_EDSS;
+ chip->n_bytes = 4;
+ chip->dma_width = DCMD_WIDTH4;
+ chip->read = u32_reader;
+ chip->write = u32_writer;
+ } else {
+ dev_err(&spi->dev, "invalid wordsize\n");
+ kfree(chip);
+ return -ENODEV;
+ }
+ chip->bits_per_word = spi->bits_per_word;
+
+ spi_set_ctldata(spi, chip);
+
+ return 0;
+}
+
+static void cleanup(const struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+
+ kfree(chip);
+}
+
+static int init_queue(struct driver_data *drv_data)
+{
+ INIT_LIST_HEAD(&drv_data->queue);
+ spin_lock_init(&drv_data->lock);
+
+ drv_data->run = QUEUE_STOPPED;
+ drv_data->busy = 0;
+
+ tasklet_init(&drv_data->pump_transfers,
+ pump_transfers, (unsigned long)drv_data);
+
+ INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
+ drv_data->workqueue = create_singlethread_workqueue(
+ drv_data->master->cdev.dev->bus_id);
+ if (drv_data->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int start_queue(struct driver_data *drv_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -EBUSY;
+ }
+
+ drv_data->run = QUEUE_RUNNING;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ return 0;
+}
+
+static int stop_queue(struct driver_data *drv_data)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ /* This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the drv_data->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead */
+ drv_data->run = QUEUE_STOPPED;
+ while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&drv_data->lock, flags);
+ }
+
+ if (!list_empty(&drv_data->queue) || drv_data->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return status;
+}
+
+static int destroy_queue(struct driver_data *drv_data)
+{
+ int status;
+
+ status = stop_queue(drv_data);
+ if (status != 0)
+ return status;
+
+ destroy_workqueue(drv_data->workqueue);
+
+ return 0;
+}
+
+static int pxa2xx_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pxa2xx_spi_master *platform_info;
+ struct spi_master *master;
+ struct driver_data *drv_data = 0;
+ struct resource *memory_resource;
+ int irq;
+ int status = 0;
+
+ platform_info = dev->platform_data;
+
+ if (platform_info->ssp_type == SSP_UNDEFINED) {
+ dev_err(&pdev->dev, "undefined SSP\n");
+ return -ENODEV;
+ }
+
+ /* Allocate master with space for drv_data and null dma buffer */
+ master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
+ if (!master) {
+ dev_err(&pdev->dev, "can not alloc spi_master\n");
+ return -ENOMEM;
+ }
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->master_info = platform_info;
+ drv_data->pdev = pdev;
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = cleanup;
+ master->setup = setup;
+ master->transfer = transfer;
+
+ drv_data->ssp_type = platform_info->ssp_type;
+ drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
+ sizeof(struct driver_data)), 8);
+
+ /* Setup register addresses */
+ memory_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!memory_resource) {
+ dev_err(&pdev->dev, "memory resources not defined\n");
+ status = -ENODEV;
+ goto out_error_master_alloc;
+ }
+
+ drv_data->ioaddr = (void *)io_p2v((unsigned long)(memory_resource->start));
+ drv_data->ssdr_physical = memory_resource->start + 0x00000010;
+ if (platform_info->ssp_type == PXA25x_SSP) {
+ drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
+ drv_data->dma_cr1 = 0;
+ drv_data->clear_sr = SSSR_ROR;
+ drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
+ } else {
+ drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
+ drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
+ drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
+ drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
+ }
+
+ /* Attach to IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "irq resource not defined\n");
+ status = -ENODEV;
+ goto out_error_master_alloc;
+ }
+
+ status = request_irq(irq, ssp_int, 0, dev->bus_id, drv_data);
+ if (status < 0) {
+ dev_err(&pdev->dev, "can not get IRQ\n");
+ goto out_error_master_alloc;
+ }
+
+ /* Setup DMA if requested */
+ drv_data->tx_channel = -1;
+ drv_data->rx_channel = -1;
+ if (platform_info->enable_dma) {
+
+ /* Get two DMA channels (rx and tx) */
+ drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
+ DMA_PRIO_HIGH,
+ dma_handler,
+ drv_data);
+ if (drv_data->rx_channel < 0) {
+ dev_err(dev, "problem (%d) requesting rx channel\n",
+ drv_data->rx_channel);
+ status = -ENODEV;
+ goto out_error_irq_alloc;
+ }
+ drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
+ DMA_PRIO_MEDIUM,
+ dma_handler,
+ drv_data);
+ if (drv_data->tx_channel < 0) {
+ dev_err(dev, "problem (%d) requesting tx channel\n",
+ drv_data->tx_channel);
+ status = -ENODEV;
+ goto out_error_dma_alloc;
+ }
+
+ if (drv_data->ioaddr == SSP1_VIRT) {
+ DRCMRRXSSDR = DRCMR_MAPVLD
+ | drv_data->rx_channel;
+ DRCMRTXSSDR = DRCMR_MAPVLD
+ | drv_data->tx_channel;
+ } else if (drv_data->ioaddr == SSP2_VIRT) {
+ DRCMRRXSS2DR = DRCMR_MAPVLD
+ | drv_data->rx_channel;
+ DRCMRTXSS2DR = DRCMR_MAPVLD
+ | drv_data->tx_channel;
+ } else if (drv_data->ioaddr == SSP3_VIRT) {
+ DRCMRRXSS3DR = DRCMR_MAPVLD
+ | drv_data->rx_channel;
+ DRCMRTXSS3DR = DRCMR_MAPVLD
+ | drv_data->tx_channel;
+ } else {
+ dev_err(dev, "bad SSP type\n");
+ goto out_error_dma_alloc;
+ }
+ }
+
+ /* Enable SOC clock */
+ pxa_set_cken(platform_info->clock_enable, 1);
+
+ /* Load default SSP configuration */
+ write_SSCR0(0, drv_data->ioaddr);
+ write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr);
+ write_SSCR0(SSCR0_SerClkDiv(2)
+ | SSCR0_Motorola
+ | SSCR0_DataSize(8),
+ drv_data->ioaddr);
+ if (drv_data->ssp_type != PXA25x_SSP)
+ write_SSTO(0, drv_data->ioaddr);
+ write_SSPSP(0, drv_data->ioaddr);
+
+ /* Initial and start queue */
+ status = init_queue(drv_data);
+ if (status != 0) {
+ dev_err(&pdev->dev, "problem initializing queue\n");
+ goto out_error_clock_enabled;
+ }
+ status = start_queue(drv_data);
+ if (status != 0) {
+ dev_err(&pdev->dev, "problem starting queue\n");
+ goto out_error_clock_enabled;
+ }
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+ status = spi_register_master(master);
+ if (status != 0) {
+ dev_err(&pdev->dev, "problem registering spi master\n");
+ goto out_error_queue_alloc;
+ }
+
+ return status;
+
+out_error_queue_alloc:
+ destroy_queue(drv_data);
+
+out_error_clock_enabled:
+ pxa_set_cken(platform_info->clock_enable, 0);
+
+out_error_dma_alloc:
+ if (drv_data->tx_channel != -1)
+ pxa_free_dma(drv_data->tx_channel);
+ if (drv_data->rx_channel != -1)
+ pxa_free_dma(drv_data->rx_channel);
+
+out_error_irq_alloc:
+ free_irq(irq, drv_data);
+
+out_error_master_alloc:
+ spi_master_put(master);
+ return status;
+}
+
+static int pxa2xx_spi_remove(struct platform_device *pdev)
+{
+ struct driver_data *drv_data = platform_get_drvdata(pdev);
+ int irq;
+ int status = 0;
+
+ if (!drv_data)
+ return 0;
+
+ /* Remove the queue */
+ status = destroy_queue(drv_data);
+ if (status != 0)
+ return status;
+
+ /* Disable the SSP at the peripheral and SOC level */
+ write_SSCR0(0, drv_data->ioaddr);
+ pxa_set_cken(drv_data->master_info->clock_enable, 0);
+
+ /* Release DMA */
+ if (drv_data->master_info->enable_dma) {
+ if (drv_data->ioaddr == SSP1_VIRT) {
+ DRCMRRXSSDR = 0;
+ DRCMRTXSSDR = 0;
+ } else if (drv_data->ioaddr == SSP2_VIRT) {
+ DRCMRRXSS2DR = 0;
+ DRCMRTXSS2DR = 0;
+ } else if (drv_data->ioaddr == SSP3_VIRT) {
+ DRCMRRXSS3DR = 0;
+ DRCMRTXSS3DR = 0;
+ }
+ pxa_free_dma(drv_data->tx_channel);
+ pxa_free_dma(drv_data->rx_channel);
+ }
+
+ /* Release IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0)
+ free_irq(irq, drv_data);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(drv_data->master);
+
+ /* Prevent double remove */
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void pxa2xx_spi_shutdown(struct platform_device *pdev)
+{
+ int status = 0;
+
+ if ((status = pxa2xx_spi_remove(pdev)) != 0)
+ dev_err(&pdev->dev, "shutdown failed with %d\n", status);
+}
+
+#ifdef CONFIG_PM
+static int suspend_devices(struct device *dev, void *pm_message)
+{
+ pm_message_t *state = pm_message;
+
+ if (dev->power.power_state.event != state->event) {
+ dev_warn(dev, "pm state does not match request\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct driver_data *drv_data = platform_get_drvdata(pdev);
+ int status = 0;
+
+ /* Check all childern for current power state */
+ if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) {
+ dev_warn(&pdev->dev, "suspend aborted\n");
+ return -1;
+ }
+
+ status = stop_queue(drv_data);
+ if (status != 0)
+ return status;
+ write_SSCR0(0, drv_data->ioaddr);
+ pxa_set_cken(drv_data->master_info->clock_enable, 0);
+
+ return 0;
+}
+
+static int pxa2xx_spi_resume(struct platform_device *pdev)
+{
+ struct driver_data *drv_data = platform_get_drvdata(pdev);
+ int status = 0;
+
+ /* Enable the SSP clock */
+ pxa_set_cken(drv_data->master_info->clock_enable, 1);
+
+ /* Start the queue running */
+ status = start_queue(drv_data);
+ if (status != 0) {
+ dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
+ return status;
+ }
+
+ return 0;
+}
+#else
+#define pxa2xx_spi_suspend NULL
+#define pxa2xx_spi_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver driver = {
+ .driver = {
+ .name = "pxa2xx-spi",
+ .bus = &platform_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = pxa2xx_spi_probe,
+ .remove = __devexit_p(pxa2xx_spi_remove),
+ .shutdown = pxa2xx_spi_shutdown,
+ .suspend = pxa2xx_spi_suspend,
+ .resume = pxa2xx_spi_resume,
+};
+
+static int __init pxa2xx_spi_init(void)
+{
+ platform_driver_register(&driver);
+
+ return 0;
+}
+module_init(pxa2xx_spi_init);
+
+static void __exit pxa2xx_spi_exit(void)
+{
+ platform_driver_unregister(&driver);
+}
+module_exit(pxa2xx_spi_exit);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 94f5e8e..1cea4a6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -338,18 +338,18 @@
* spi_alloc_master - allocate SPI master controller
* @dev: the controller, possibly using the platform_bus
* @size: how much driver-private data to preallocate; the pointer to this
- * memory is in the class_data field of the returned class_device,
+ * memory is in the class_data field of the returned class_device,
* accessible with spi_master_get_devdata().
*
* This call is used only by SPI master controller drivers, which are the
* only ones directly touching chip registers. It's how they allocate
- * an spi_master structure, prior to calling spi_add_master().
+ * an spi_master structure, prior to calling spi_register_master().
*
* This must be called from context that can sleep. It returns the SPI
* master structure on success, else NULL.
*
* The caller is responsible for assigning the bus number and initializing
- * the master's methods before calling spi_add_master(); and (after errors
+ * the master's methods before calling spi_register_master(); and (after errors
* adding the device) calling spi_master_put() to prevent a memory leak.
*/
struct spi_master * __init_or_module
@@ -395,7 +395,7 @@
int __init_or_module
spi_register_master(struct spi_master *master)
{
- static atomic_t dyn_bus_id = ATOMIC_INIT(0);
+ static atomic_t dyn_bus_id = ATOMIC_INIT((1<<16) - 1);
struct device *dev = master->cdev.dev;
int status = -ENODEV;
int dynamic = 0;
@@ -404,7 +404,7 @@
return -ENODEV;
/* convention: dynamically assigned bus IDs count down from the max */
- if (master->bus_num == 0) {
+ if (master->bus_num < 0) {
master->bus_num = atomic_dec_return(&dyn_bus_id);
dynamic = 1;
}
@@ -522,7 +522,8 @@
}
EXPORT_SYMBOL_GPL(spi_sync);
-#define SPI_BUFSIZ (SMP_CACHE_BYTES)
+/* portable code must never pass more than 32 bytes */
+#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
static u8 *buf;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index f037e55..dd2f950 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -138,6 +138,45 @@
return t->len - count;
}
+int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct spi_bitbang_cs *cs = spi->controller_state;
+ u8 bits_per_word;
+ u32 hz;
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ } else {
+ bits_per_word = 0;
+ hz = 0;
+ }
+
+ /* spi_transfer level calls that work per-word */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+ if (bits_per_word <= 8)
+ cs->txrx_bufs = bitbang_txrx_8;
+ else if (bits_per_word <= 16)
+ cs->txrx_bufs = bitbang_txrx_16;
+ else if (bits_per_word <= 32)
+ cs->txrx_bufs = bitbang_txrx_32;
+ else
+ return -EINVAL;
+
+ /* nsecs = (clock period)/2 */
+ if (!hz)
+ hz = spi->max_speed_hz;
+ if (hz) {
+ cs->nsecs = (1000000000/2) / hz;
+ if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer);
+
/**
* spi_bitbang_setup - default setup for per-word I/O loops
*/
@@ -145,8 +184,16 @@
{
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
+ int retval;
- if (!spi->max_speed_hz)
+ bitbang = spi_master_get_devdata(spi->master);
+
+ /* REVISIT: some systems will want to support devices using lsb-first
+ * bit encodings on the wire. In pure software that would be trivial,
+ * just bitbang_txrx_le_cphaX() routines shifting the other way, and
+ * some hardware controllers also have this support.
+ */
+ if ((spi->mode & SPI_LSB_FIRST) != 0)
return -EINVAL;
if (!cs) {
@@ -155,32 +202,20 @@
return -ENOMEM;
spi->controller_state = cs;
}
- bitbang = spi_master_get_devdata(spi->master);
if (!spi->bits_per_word)
spi->bits_per_word = 8;
- /* spi_transfer level calls that work per-word */
- if (spi->bits_per_word <= 8)
- cs->txrx_bufs = bitbang_txrx_8;
- else if (spi->bits_per_word <= 16)
- cs->txrx_bufs = bitbang_txrx_16;
- else if (spi->bits_per_word <= 32)
- cs->txrx_bufs = bitbang_txrx_32;
- else
- return -EINVAL;
-
/* per-word shift register access, in hardware or bitbanging */
cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
if (!cs->txrx_word)
return -EINVAL;
- /* nsecs = (clock period)/2 */
- cs->nsecs = (1000000000/2) / (spi->max_speed_hz);
- if (cs->nsecs > MAX_UDELAY_MS * 1000)
- return -EINVAL;
+ retval = spi_bitbang_setup_transfer(spi, NULL);
+ if (retval < 0)
+ return retval;
- dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n",
+ dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
__FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
spi->bits_per_word, 2 * cs->nsecs);
@@ -246,6 +281,8 @@
unsigned tmp;
unsigned cs_change;
int status;
+ int (*setup_transfer)(struct spi_device *,
+ struct spi_transfer *);
m = container_of(bitbang->queue.next, struct spi_message,
queue);
@@ -262,6 +299,7 @@
tmp = 0;
cs_change = 1;
status = 0;
+ setup_transfer = NULL;
list_for_each_entry (t, &m->transfers, transfer_list) {
if (bitbang->shutdown) {
@@ -269,6 +307,20 @@
break;
}
+ /* override or restore speed and wordsize */
+ if (t->speed_hz || t->bits_per_word) {
+ setup_transfer = bitbang->setup_transfer;
+ if (!setup_transfer) {
+ status = -ENOPROTOOPT;
+ break;
+ }
+ }
+ if (setup_transfer) {
+ status = setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ }
+
/* set up default clock polarity, and activate chip;
* this implicitly updates clock and spi modes as
* previously recorded for this device via setup().
@@ -325,6 +377,10 @@
m->status = status;
m->complete(m->context);
+ /* restore speed and wordsize */
+ if (setup_transfer)
+ setup_transfer(spi, NULL);
+
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
* be for this chip too.
@@ -348,6 +404,7 @@
{
struct spi_bitbang *bitbang;
unsigned long flags;
+ int status = 0;
m->actual_length = 0;
m->status = -EINPROGRESS;
@@ -357,11 +414,15 @@
return -ESHUTDOWN;
spin_lock_irqsave(&bitbang->lock, flags);
- list_add_tail(&m->queue, &bitbang->queue);
- queue_work(bitbang->workqueue, &bitbang->work);
+ if (!spi->max_speed_hz)
+ status = -ENETDOWN;
+ else {
+ list_add_tail(&m->queue, &bitbang->queue);
+ queue_work(bitbang->workqueue, &bitbang->work);
+ }
spin_unlock_irqrestore(&bitbang->lock, flags);
- return 0;
+ return status;
}
EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
@@ -406,6 +467,9 @@
bitbang->use_dma = 0;
bitbang->txrx_bufs = spi_bitbang_bufs;
if (!bitbang->master->setup) {
+ if (!bitbang->setup_transfer)
+ bitbang->setup_transfer =
+ spi_bitbang_setup_transfer;
bitbang->master->setup = spi_bitbang_setup;
bitbang->master->cleanup = spi_bitbang_cleanup;
}
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c
index ff9e5fa..a006a1e 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi_butterfly.c
@@ -321,6 +321,7 @@
* (firmware resets at45, acts as spi slave) or neither (we ignore
* both, AVR uses AT45). Here we expect firmware for the first option.
*/
+
pp->info[0].max_speed_hz = 15 * 1000 * 1000;
strcpy(pp->info[0].modalias, "mtd_dataflash");
pp->info[0].platform_data = &flash;
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
new file mode 100644
index 0000000..5d92a7e
--- /dev/null
+++ b/drivers/spi/spi_mpc83xx.c
@@ -0,0 +1,483 @@
+/*
+ * MPC83xx SPI controller driver.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+
+/* SPI Controller registers */
+struct mpc83xx_spi_reg {
+ u8 res1[0x20];
+ __be32 mode;
+ __be32 event;
+ __be32 mask;
+ __be32 command;
+ __be32 transmit;
+ __be32 receive;
+};
+
+/* SPI Controller mode register definitions */
+#define SPMODE_CI_INACTIVEHIGH (1 << 29)
+#define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
+#define SPMODE_DIV16 (1 << 27)
+#define SPMODE_REV (1 << 26)
+#define SPMODE_MS (1 << 25)
+#define SPMODE_ENABLE (1 << 24)
+#define SPMODE_LEN(x) ((x) << 20)
+#define SPMODE_PM(x) ((x) << 16)
+
+/*
+ * Default for SPI Mode:
+ * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
+ */
+#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
+ SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
+
+/* SPIE register values */
+#define SPIE_NE 0x00000200 /* Not empty */
+#define SPIE_NF 0x00000100 /* Not full */
+
+/* SPIM register values */
+#define SPIM_NE 0x00000200 /* Not empty */
+#define SPIM_NF 0x00000100 /* Not full */
+
+/* SPI Controller driver's private data. */
+struct mpc83xx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ struct mpc83xx_spi_reg __iomem *base;
+
+ /* rx & tx bufs from the spi_transfer */
+ const void *tx;
+ void *rx;
+
+ /* functions to deal with different sized buffers */
+ void (*get_rx) (u32 rx_data, struct mpc83xx_spi *);
+ u32(*get_tx) (struct mpc83xx_spi *);
+
+ unsigned int count;
+ u32 irq;
+
+ unsigned nsecs; /* (clock cycle time)/2 */
+
+ u32 sysclk;
+ void (*activate_cs) (u8 cs, u8 polarity);
+ void (*deactivate_cs) (u8 cs, u8 polarity);
+};
+
+static inline void mpc83xx_spi_write_reg(__be32 __iomem * reg, u32 val)
+{
+ out_be32(reg, val);
+}
+
+static inline u32 mpc83xx_spi_read_reg(__be32 __iomem * reg)
+{
+ return in_be32(reg);
+}
+
+#define MPC83XX_SPI_RX_BUF(type) \
+void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \
+{ \
+ type * rx = mpc83xx_spi->rx; \
+ *rx++ = (type)data; \
+ mpc83xx_spi->rx = rx; \
+}
+
+#define MPC83XX_SPI_TX_BUF(type) \
+u32 mpc83xx_spi_tx_buf_##type(struct mpc83xx_spi *mpc83xx_spi) \
+{ \
+ u32 data; \
+ const type * tx = mpc83xx_spi->tx; \
+ data = *tx++; \
+ mpc83xx_spi->tx = tx; \
+ return data; \
+}
+
+MPC83XX_SPI_RX_BUF(u8)
+MPC83XX_SPI_RX_BUF(u16)
+MPC83XX_SPI_RX_BUF(u32)
+MPC83XX_SPI_TX_BUF(u8)
+MPC83XX_SPI_TX_BUF(u16)
+MPC83XX_SPI_TX_BUF(u32)
+
+static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
+{
+ struct mpc83xx_spi *mpc83xx_spi;
+ u8 pol = spi->mode & SPI_CS_HIGH ? 1 : 0;
+
+ mpc83xx_spi = spi_master_get_devdata(spi->master);
+
+ if (value == BITBANG_CS_INACTIVE) {
+ if (mpc83xx_spi->deactivate_cs)
+ mpc83xx_spi->deactivate_cs(spi->chip_select, pol);
+ }
+
+ if (value == BITBANG_CS_ACTIVE) {
+ u32 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode);
+ u32 len = spi->bits_per_word;
+ if (len == 32)
+ len = 0;
+ else
+ len = len - 1;
+
+ /* mask out bits we are going to set */
+ regval &= ~0x38ff0000;
+
+ if (spi->mode & SPI_CPHA)
+ regval |= SPMODE_CP_BEGIN_EDGECLK;
+ if (spi->mode & SPI_CPOL)
+ regval |= SPMODE_CI_INACTIVEHIGH;
+
+ regval |= SPMODE_LEN(len);
+
+ if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) {
+ u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64);
+ regval |= SPMODE_PM(pm) | SPMODE_DIV16;
+ } else {
+ u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4);
+ regval |= SPMODE_PM(pm);
+ }
+
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
+ if (mpc83xx_spi->activate_cs)
+ mpc83xx_spi->activate_cs(spi->chip_select, pol);
+ }
+}
+
+static
+int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct mpc83xx_spi *mpc83xx_spi;
+ u32 regval;
+ u8 bits_per_word;
+ u32 hz;
+
+ mpc83xx_spi = spi_master_get_devdata(spi->master);
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ } else {
+ bits_per_word = 0;
+ hz = 0;
+ }
+
+ /* spi_transfer level calls that work per-word */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+
+ /* Make sure its a bit width we support [4..16, 32] */
+ if ((bits_per_word < 4)
+ || ((bits_per_word > 16) && (bits_per_word != 32)))
+ return -EINVAL;
+
+ if (bits_per_word <= 8) {
+ mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
+ mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
+ } else if (bits_per_word <= 16) {
+ mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16;
+ mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16;
+ } else if (bits_per_word <= 32) {
+ mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32;
+ mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32;
+ } else
+ return -EINVAL;
+
+ /* nsecs = (clock period)/2 */
+ if (!hz)
+ hz = spi->max_speed_hz;
+ mpc83xx_spi->nsecs = (1000000000 / 2) / hz;
+ if (mpc83xx_spi->nsecs > MAX_UDELAY_MS * 1000)
+ return -EINVAL;
+
+ if (bits_per_word == 32)
+ bits_per_word = 0;
+ else
+ bits_per_word = bits_per_word - 1;
+
+ regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode);
+
+ /* Mask out bits_per_wordgth */
+ regval &= 0xff0fffff;
+ regval |= SPMODE_LEN(bits_per_word);
+
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
+
+ return 0;
+}
+
+static int mpc83xx_spi_setup(struct spi_device *spi)
+{
+ struct spi_bitbang *bitbang;
+ struct mpc83xx_spi *mpc83xx_spi;
+ int retval;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ bitbang = spi_master_get_devdata(spi->master);
+ mpc83xx_spi = spi_master_get_devdata(spi->master);
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ retval = mpc83xx_spi_setup_transfer(spi, NULL);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n",
+ __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
+ spi->bits_per_word, 2 * mpc83xx_spi->nsecs);
+
+ /* NOTE we _need_ to call chipselect() early, ideally with adapter
+ * setup, unless the hardware defaults cooperate to avoid confusion
+ * between normal (active low) and inverted chipselects.
+ */
+
+ /* deselect chip (low or high) */
+ spin_lock(&bitbang->lock);
+ if (!bitbang->busy) {
+ bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(mpc83xx_spi->nsecs);
+ }
+ spin_unlock(&bitbang->lock);
+
+ return 0;
+}
+
+static int mpc83xx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct mpc83xx_spi *mpc83xx_spi;
+ u32 word;
+
+ mpc83xx_spi = spi_master_get_devdata(spi->master);
+
+ mpc83xx_spi->tx = t->tx_buf;
+ mpc83xx_spi->rx = t->rx_buf;
+ mpc83xx_spi->count = t->len;
+ INIT_COMPLETION(mpc83xx_spi->done);
+
+ /* enable rx ints */
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, SPIM_NE);
+
+ /* transmit word */
+ word = mpc83xx_spi->get_tx(mpc83xx_spi);
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit, word);
+
+ wait_for_completion(&mpc83xx_spi->done);
+
+ /* disable rx ints */
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0);
+
+ return t->len - mpc83xx_spi->count;
+}
+
+irqreturn_t mpc83xx_spi_irq(s32 irq, void *context_data,
+ struct pt_regs * ptregs)
+{
+ struct mpc83xx_spi *mpc83xx_spi = context_data;
+ u32 event;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* Get interrupt events(tx/rx) */
+ event = mpc83xx_spi_read_reg(&mpc83xx_spi->base->event);
+
+ /* We need handle RX first */
+ if (event & SPIE_NE) {
+ u32 rx_data = mpc83xx_spi_read_reg(&mpc83xx_spi->base->receive);
+
+ if (mpc83xx_spi->rx)
+ mpc83xx_spi->get_rx(rx_data, mpc83xx_spi);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if ((event & SPIE_NF) == 0)
+ /* spin until TX is done */
+ while (((event =
+ mpc83xx_spi_read_reg(&mpc83xx_spi->base->event)) &
+ SPIE_NF) == 0)
+ cpu_relax();
+
+ mpc83xx_spi->count -= 1;
+ if (mpc83xx_spi->count) {
+ if (mpc83xx_spi->tx) {
+ u32 word = mpc83xx_spi->get_tx(mpc83xx_spi);
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit,
+ word);
+ }
+ } else {
+ complete(&mpc83xx_spi->done);
+ }
+
+ /* Clear the events */
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->event, event);
+
+ return ret;
+}
+
+static int __init mpc83xx_spi_probe(struct platform_device *dev)
+{
+ struct spi_master *master;
+ struct mpc83xx_spi *mpc83xx_spi;
+ struct fsl_spi_platform_data *pdata;
+ struct resource *r;
+ u32 regval;
+ int ret = 0;
+
+ /* Get resources(memory, IRQ) associated with the device */
+ master = spi_alloc_master(&dev->dev, sizeof(struct mpc83xx_spi));
+
+ if (master == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ platform_set_drvdata(dev, master);
+ pdata = dev->dev.platform_data;
+
+ if (pdata == NULL) {
+ ret = -ENODEV;
+ goto free_master;
+ }
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto free_master;
+ }
+
+ mpc83xx_spi = spi_master_get_devdata(master);
+ mpc83xx_spi->bitbang.master = spi_master_get(master);
+ mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect;
+ mpc83xx_spi->bitbang.setup_transfer = mpc83xx_spi_setup_transfer;
+ mpc83xx_spi->bitbang.txrx_bufs = mpc83xx_spi_bufs;
+ mpc83xx_spi->sysclk = pdata->sysclk;
+ mpc83xx_spi->activate_cs = pdata->activate_cs;
+ mpc83xx_spi->deactivate_cs = pdata->deactivate_cs;
+ mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
+ mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
+
+ mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup;
+ init_completion(&mpc83xx_spi->done);
+
+ mpc83xx_spi->base = ioremap(r->start, r->end - r->start + 1);
+ if (mpc83xx_spi->base == NULL) {
+ ret = -ENOMEM;
+ goto put_master;
+ }
+
+ mpc83xx_spi->irq = platform_get_irq(dev, 0);
+
+ if (mpc83xx_spi->irq < 0) {
+ ret = -ENXIO;
+ goto unmap_io;
+ }
+
+ /* Register for SPI Interrupt */
+ ret = request_irq(mpc83xx_spi->irq, mpc83xx_spi_irq,
+ 0, "mpc83xx_spi", mpc83xx_spi);
+
+ if (ret != 0)
+ goto unmap_io;
+
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->max_chipselect;
+
+ /* SPI controller initializations */
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, 0);
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0);
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->command, 0);
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->event, 0xffffffff);
+
+ /* Enable SPI interface */
+ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
+
+ ret = spi_bitbang_start(&mpc83xx_spi->bitbang);
+
+ if (ret != 0)
+ goto free_irq;
+
+ printk(KERN_INFO
+ "%s: MPC83xx SPI Controller driver at 0x%p (irq = %d)\n",
+ dev->dev.bus_id, mpc83xx_spi->base, mpc83xx_spi->irq);
+
+ return ret;
+
+free_irq:
+ free_irq(mpc83xx_spi->irq, mpc83xx_spi);
+unmap_io:
+ iounmap(mpc83xx_spi->base);
+put_master:
+ spi_master_put(master);
+free_master:
+ kfree(master);
+err:
+ return ret;
+}
+
+static int __devexit mpc83xx_spi_remove(struct platform_device *dev)
+{
+ struct mpc83xx_spi *mpc83xx_spi;
+ struct spi_master *master;
+
+ master = platform_get_drvdata(dev);
+ mpc83xx_spi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&mpc83xx_spi->bitbang);
+ free_irq(mpc83xx_spi->irq, mpc83xx_spi);
+ iounmap(mpc83xx_spi->base);
+ spi_master_put(mpc83xx_spi->bitbang.master);
+
+ return 0;
+}
+
+static struct platform_driver mpc83xx_spi_driver = {
+ .probe = mpc83xx_spi_probe,
+ .remove = __devexit_p(mpc83xx_spi_remove),
+ .driver = {
+ .name = "mpc83xx_spi",
+ },
+};
+
+static int __init mpc83xx_spi_init(void)
+{
+ return platform_driver_register(&mpc83xx_spi_driver);
+}
+
+static void __exit mpc83xx_spi_exit(void)
+{
+ platform_driver_unregister(&mpc83xx_spi_driver);
+}
+
+module_init(mpc83xx_spi_init);
+module_exit(mpc83xx_spi_exit);
+
+MODULE_AUTHOR("Kumar Gala");
+MODULE_DESCRIPTION("Simple MPC83xx SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
new file mode 100644
index 0000000..9de4b5a
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx.c
@@ -0,0 +1,453 @@
+/* linux/drivers/spi/spi_s3c24xx.c
+ *
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+
+//#define DEBUG
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/hardware.h>
+
+#include <asm/arch/regs-gpio.h>
+#include <asm/arch/regs-spi.h>
+#include <asm/arch/spi.h>
+
+struct s3c24xx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *regs;
+ int irq;
+ int len;
+ int count;
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+
+ struct clk *clk;
+ struct resource *ioarea;
+ struct spi_master *master;
+ struct spi_device *curdev;
+ struct device *dev;
+ struct s3c2410_spi_info *pdata;
+};
+
+#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
+#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
+
+static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
+{
+ struct s3c24xx_spi *hw = to_hw(spi);
+ unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ unsigned int spcon;
+
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ if (hw->pdata->set_cs)
+ hw->pdata->set_cs(hw->pdata, value, cspol);
+ else
+ s3c2410_gpio_setpin(hw->pdata->pin_cs, cspol ^ 1);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ spcon = readb(hw->regs + S3C2410_SPCON);
+
+ if (spi->mode & SPI_CPHA)
+ spcon |= S3C2410_SPCON_CPHA_FMTB;
+ else
+ spcon &= ~S3C2410_SPCON_CPHA_FMTB;
+
+ if (spi->mode & SPI_CPOL)
+ spcon |= S3C2410_SPCON_CPOL_HIGH;
+ else
+ spcon &= ~S3C2410_SPCON_CPOL_HIGH;
+
+ spcon |= S3C2410_SPCON_ENSCK;
+
+ /* write new configration */
+
+ writeb(spcon, hw->regs + S3C2410_SPCON);
+
+ if (hw->pdata->set_cs)
+ hw->pdata->set_cs(hw->pdata, value, cspol);
+ else
+ s3c2410_gpio_setpin(hw->pdata->pin_cs, cspol);
+
+ break;
+
+ }
+}
+
+static int s3c24xx_spi_setupxfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct s3c24xx_spi *hw = to_hw(spi);
+ unsigned int bpw;
+ unsigned int hz;
+ unsigned int div;
+
+ bpw = t ? t->bits_per_word : spi->bits_per_word;
+ hz = t ? t->speed_hz : spi->max_speed_hz;
+
+ if (bpw != 8) {
+ dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw);
+ return -EINVAL;
+ }
+
+ div = clk_get_rate(hw->clk) / hz;
+
+ /* is clk = pclk / (2 * (pre+1)), or is it
+ * clk = (pclk * 2) / ( pre + 1) */
+
+ div = (div / 2) - 1;
+
+ if (div < 0)
+ div = 1;
+
+ if (div > 255)
+ div = 255;
+
+ dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", div, hz);
+ writeb(div, hw->regs + S3C2410_SPPRE);
+
+ spin_lock(&hw->bitbang.lock);
+ if (!hw->bitbang.busy) {
+ hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
+ /* need to ndelay for 0.5 clocktick ? */
+ }
+ spin_unlock(&hw->bitbang.lock);
+
+ return 0;
+}
+
+static int s3c24xx_spi_setup(struct spi_device *spi)
+{
+ int ret;
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ if ((spi->mode & SPI_LSB_FIRST) != 0)
+ return -EINVAL;
+
+ ret = s3c24xx_spi_setupxfer(spi, NULL);
+ if (ret < 0) {
+ dev_err(&spi->dev, "setupxfer returned %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n",
+ __FUNCTION__, spi->mode, spi->bits_per_word,
+ spi->max_speed_hz);
+
+ return 0;
+}
+
+static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
+{
+ return hw->tx ? hw->tx[count] : 0xff;
+}
+
+static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct s3c24xx_spi *hw = to_hw(spi);
+
+ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
+ t->tx_buf, t->rx_buf, t->len);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->count = 0;
+
+ /* send the first byte */
+ writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
+ wait_for_completion(&hw->done);
+
+ return hw->count;
+}
+
+static irqreturn_t s3c24xx_spi_irq(int irq, void *dev, struct pt_regs *regs)
+{
+ struct s3c24xx_spi *hw = dev;
+ unsigned int spsta = readb(hw->regs + S3C2410_SPSTA);
+ unsigned int count = hw->count;
+
+ if (spsta & S3C2410_SPSTA_DCOL) {
+ dev_dbg(hw->dev, "data-collision\n");
+ complete(&hw->done);
+ goto irq_done;
+ }
+
+ if (!(spsta & S3C2410_SPSTA_READY)) {
+ dev_dbg(hw->dev, "spi not ready for tx?\n");
+ complete(&hw->done);
+ goto irq_done;
+ }
+
+ hw->count++;
+
+ if (hw->rx)
+ hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
+
+ count++;
+
+ if (count < hw->len)
+ writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
+ else
+ complete(&hw->done);
+
+ irq_done:
+ return IRQ_HANDLED;
+}
+
+static int s3c24xx_spi_probe(struct platform_device *pdev)
+{
+ struct s3c24xx_spi *hw;
+ struct spi_master *master;
+ struct spi_board_info *bi;
+ struct resource *res;
+ int err = 0;
+ int i;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "No memory for spi_master\n");
+ err = -ENOMEM;
+ goto err_nomem;
+ }
+
+ hw = spi_master_get_devdata(master);
+ memset(hw, 0, sizeof(struct s3c24xx_spi));
+
+ hw->master = spi_master_get(master);
+ hw->pdata = pdev->dev.platform_data;
+ hw->dev = &pdev->dev;
+
+ if (hw->pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ err = -ENOENT;
+ goto err_no_pdata;
+ }
+
+ platform_set_drvdata(pdev, hw);
+ init_completion(&hw->done);
+
+ /* setup the state for the bitbang driver */
+
+ hw->bitbang.master = hw->master;
+ hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer;
+ hw->bitbang.chipselect = s3c24xx_spi_chipsel;
+ hw->bitbang.txrx_bufs = s3c24xx_spi_txrx;
+ hw->bitbang.master->setup = s3c24xx_spi_setup;
+
+ dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
+
+ /* find and map our resources */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ err = -ENOENT;
+ goto err_no_iores;
+ }
+
+ hw->ioarea = request_mem_region(res->start, (res->end - res->start)+1,
+ pdev->name);
+
+ if (hw->ioarea == NULL) {
+ dev_err(&pdev->dev, "Cannot reserve region\n");
+ err = -ENXIO;
+ goto err_no_iores;
+ }
+
+ hw->regs = ioremap(res->start, (res->end - res->start)+1);
+ if (hw->regs == NULL) {
+ dev_err(&pdev->dev, "Cannot map IO\n");
+ err = -ENXIO;
+ goto err_no_iomap;
+ }
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ specified\n");
+ err = -ENOENT;
+ goto err_no_irq;
+ }
+
+ err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ goto err_no_irq;
+ }
+
+ hw->clk = clk_get(&pdev->dev, "spi");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock for device\n");
+ err = PTR_ERR(hw->clk);
+ goto err_no_clk;
+ }
+
+ /* for the moment, permanently enable the clock */
+
+ clk_enable(hw->clk);
+
+ /* program defaults into the registers */
+
+ writeb(0xff, hw->regs + S3C2410_SPPRE);
+ writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN);
+ writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON);
+
+ /* setup any gpio we can */
+
+ if (!hw->pdata->set_cs) {
+ s3c2410_gpio_setpin(hw->pdata->pin_cs, 1);
+ s3c2410_gpio_cfgpin(hw->pdata->pin_cs, S3C2410_GPIO_OUTPUT);
+ }
+
+ /* register our spi controller */
+
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register SPI master\n");
+ goto err_register;
+ }
+
+ dev_dbg(hw->dev, "shutdown=%d\n", hw->bitbang.shutdown);
+
+ /* register all the devices associated */
+
+ bi = &hw->pdata->board_info[0];
+ for (i = 0; i < hw->pdata->board_size; i++, bi++) {
+ dev_info(hw->dev, "registering %s\n", bi->modalias);
+
+ bi->controller_data = hw;
+ spi_new_device(master, bi);
+ }
+
+ return 0;
+
+ err_register:
+ clk_disable(hw->clk);
+ clk_put(hw->clk);
+
+ err_no_clk:
+ free_irq(hw->irq, hw);
+
+ err_no_irq:
+ iounmap(hw->regs);
+
+ err_no_iomap:
+ release_resource(hw->ioarea);
+ kfree(hw->ioarea);
+
+ err_no_iores:
+ err_no_pdata:
+ spi_master_put(hw->master);;
+
+ err_nomem:
+ return err;
+}
+
+static int s3c24xx_spi_remove(struct platform_device *dev)
+{
+ struct s3c24xx_spi *hw = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+
+ spi_unregister_master(hw->master);
+
+ clk_disable(hw->clk);
+ clk_put(hw->clk);
+
+ free_irq(hw->irq, hw);
+ iounmap(hw->regs);
+
+ release_resource(hw->ioarea);
+ kfree(hw->ioarea);
+
+ spi_master_put(hw->master);
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+
+static int s3c24xx_spi_suspend(struct platform_device *pdev, pm_message_t msg)
+{
+ struct s3c24xx_spi *hw = platform_get_drvdata(dev);
+
+ clk_disable(hw->clk);
+ return 0;
+}
+
+static int s3c24xx_spi_resume(struct platform_device *pdev)
+{
+ struct s3c24xx_spi *hw = platform_get_drvdata(dev);
+
+ clk_enable(hw->clk);
+ return 0;
+}
+
+#else
+#define s3c24xx_spi_suspend NULL
+#define s3c24xx_spi_resume NULL
+#endif
+
+static struct platform_driver s3c24xx_spidrv = {
+ .probe = s3c24xx_spi_probe,
+ .remove = s3c24xx_spi_remove,
+ .suspend = s3c24xx_spi_suspend,
+ .resume = s3c24xx_spi_resume,
+ .driver = {
+ .name = "s3c2410-spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s3c24xx_spi_init(void)
+{
+ return platform_driver_register(&s3c24xx_spidrv);
+}
+
+static void __exit s3c24xx_spi_exit(void)
+{
+ platform_driver_unregister(&s3c24xx_spidrv);
+}
+
+module_init(s3c24xx_spi_init);
+module_exit(s3c24xx_spi_exit);
+
+MODULE_DESCRIPTION("S3C24XX SPI Driver");
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
new file mode 100644
index 0000000..aacdceb
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -0,0 +1,188 @@
+/* linux/drivers/spi/spi_s3c24xx_gpio.c
+ *
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ *
+ * S3C24XX GPIO based SPI driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <asm/arch/regs-gpio.h>
+#include <asm/arch/spi-gpio.h>
+#include <asm/arch/hardware.h>
+
+struct s3c2410_spigpio {
+ struct spi_bitbang bitbang;
+
+ struct s3c2410_spigpio_info *info;
+ struct platform_device *dev;
+};
+
+static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi)
+{
+ return spi->controller_data;
+}
+
+static inline void setsck(struct spi_device *dev, int on)
+{
+ struct s3c2410_spigpio *sg = spidev_to_sg(dev);
+ s3c2410_gpio_setpin(sg->info->pin_clk, on ? 1 : 0);
+}
+
+static inline void setmosi(struct spi_device *dev, int on)
+{
+ struct s3c2410_spigpio *sg = spidev_to_sg(dev);
+ s3c2410_gpio_setpin(sg->info->pin_mosi, on ? 1 : 0);
+}
+
+static inline u32 getmiso(struct spi_device *dev)
+{
+ struct s3c2410_spigpio *sg = spidev_to_sg(dev);
+ return s3c2410_gpio_getpin(sg->info->pin_miso) ? 1 : 0;
+}
+
+#define spidelay(x) ndelay(x)
+
+#define EXPAND_BITBANG_TXRX
+#include <linux/spi/spi_bitbang.h>
+
+
+static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+}
+
+static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits);
+}
+
+static void s3c2410_spigpio_chipselect(struct spi_device *dev, int value)
+{
+ struct s3c2410_spigpio *sg = spidev_to_sg(dev);
+
+ if (sg->info && sg->info->chip_select)
+ (sg->info->chip_select)(sg->info, value);
+}
+
+static int s3c2410_spigpio_probe(struct platform_device *dev)
+{
+ struct spi_master *master;
+ struct s3c2410_spigpio *sp;
+ int ret;
+ int i;
+
+ master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio));
+ if (master == NULL) {
+ dev_err(&dev->dev, "failed to allocate spi master\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sp = spi_master_get_devdata(master);
+
+ platform_set_drvdata(dev, sp);
+
+ /* copy in the plkatform data */
+ sp->info = dev->dev.platform_data;
+
+ /* setup spi bitbang adaptor */
+ sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.chipselect = s3c2410_spigpio_chipselect;
+
+ sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0;
+ sp->bitbang.txrx_word[SPI_MODE_1] = s3c2410_spigpio_txrx_mode1;
+
+ /* set state of spi pins */
+ s3c2410_gpio_setpin(sp->info->pin_clk, 0);
+ s3c2410_gpio_setpin(sp->info->pin_mosi, 0);
+
+ s3c2410_gpio_cfgpin(sp->info->pin_clk, S3C2410_GPIO_OUTPUT);
+ s3c2410_gpio_cfgpin(sp->info->pin_mosi, S3C2410_GPIO_OUTPUT);
+ s3c2410_gpio_cfgpin(sp->info->pin_miso, S3C2410_GPIO_INPUT);
+
+ ret = spi_bitbang_start(&sp->bitbang);
+ if (ret)
+ goto err_no_bitbang;
+
+ /* register the chips to go with the board */
+
+ for (i = 0; i < sp->info->board_size; i++) {
+ dev_info(&dev->dev, "registering %p: %s\n",
+ &sp->info->board_info[i],
+ sp->info->board_info[i].modalias);
+
+ sp->info->board_info[i].controller_data = sp;
+ spi_new_device(master, sp->info->board_info + i);
+ }
+
+ return 0;
+
+ err_no_bitbang:
+ spi_master_put(sp->bitbang.master);
+ err:
+ return ret;
+
+}
+
+static int s3c2410_spigpio_remove(struct platform_device *dev)
+{
+ struct s3c2410_spigpio *sp = platform_get_drvdata(dev);
+
+ spi_bitbang_stop(&sp->bitbang);
+ spi_master_put(sp->bitbang.master);
+
+ return 0;
+}
+
+/* all gpio should be held over suspend/resume, so we should
+ * not need to deal with this
+*/
+
+#define s3c2410_spigpio_suspend NULL
+#define s3c2410_spigpio_resume NULL
+
+
+static struct platform_driver s3c2410_spigpio_drv = {
+ .probe = s3c2410_spigpio_probe,
+ .remove = s3c2410_spigpio_remove,
+ .suspend = s3c2410_spigpio_suspend,
+ .resume = s3c2410_spigpio_resume,
+ .driver = {
+ .name = "s3c24xx-spi-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s3c2410_spigpio_init(void)
+{
+ return platform_driver_register(&s3c2410_spigpio_drv);
+}
+
+static void __exit s3c2410_spigpio_exit(void)
+{
+ platform_driver_unregister(&s3c2410_spigpio_drv);
+}
+
+module_init(s3c2410_spigpio_init);
+module_exit(s3c2410_spigpio_exit);
+
+MODULE_DESCRIPTION("S3C24XX SPI Driver");
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/input/hiddev.c b/drivers/usb/input/hiddev.c
index 6dd6666..c4670e1 100644
--- a/drivers/usb/input/hiddev.c
+++ b/drivers/usb/input/hiddev.c
@@ -317,6 +317,7 @@
}
schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 334b1db..27597c5 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -29,12 +29,15 @@
static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count)
{
- int rc = -ENXIO, power;
+ int rc = -ENXIO;
char *endp;
struct backlight_device *bd = to_backlight_device(cdev);
+ int power = simple_strtoul(buf, &endp, 0);
+ size_t size = endp - buf;
- power = simple_strtoul(buf, &endp, 0);
- if (*endp && !isspace(*endp))
+ if (*endp && isspace(*endp))
+ size++;
+ if (size != count)
return -EINVAL;
down(&bd->sem);
@@ -65,12 +68,15 @@
static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count)
{
- int rc = -ENXIO, brightness;
+ int rc = -ENXIO;
char *endp;
struct backlight_device *bd = to_backlight_device(cdev);
+ int brightness = simple_strtoul(buf, &endp, 0);
+ size_t size = endp - buf;
- brightness = simple_strtoul(buf, &endp, 0);
- if (*endp && !isspace(*endp))
+ if (*endp && isspace(*endp))
+ size++;
+ if (size != count)
return -EINVAL;
down(&bd->sem);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 86908a6..bc8ab00 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -31,12 +31,15 @@
static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count)
{
- int rc, power;
+ int rc = -ENXIO;
char *endp;
struct lcd_device *ld = to_lcd_device(cdev);
+ int power = simple_strtoul(buf, &endp, 0);
+ size_t size = endp - buf;
- power = simple_strtoul(buf, &endp, 0);
- if (*endp && !isspace(*endp))
+ if (*endp && isspace(*endp))
+ size++;
+ if (size != count)
return -EINVAL;
down(&ld->sem);
@@ -44,8 +47,7 @@
pr_debug("lcd: set power to %d\n", power);
ld->props->set_power(ld, power);
rc = count;
- } else
- rc = -ENXIO;
+ }
up(&ld->sem);
return rc;
@@ -53,14 +55,12 @@
static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf)
{
- int rc;
+ int rc = -ENXIO;
struct lcd_device *ld = to_lcd_device(cdev);
down(&ld->sem);
if (likely(ld->props && ld->props->get_contrast))
rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld));
- else
- rc = -ENXIO;
up(&ld->sem);
return rc;
@@ -68,12 +68,15 @@
static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count)
{
- int rc, contrast;
+ int rc = -ENXIO;
char *endp;
struct lcd_device *ld = to_lcd_device(cdev);
+ int contrast = simple_strtoul(buf, &endp, 0);
+ size_t size = endp - buf;
- contrast = simple_strtoul(buf, &endp, 0);
- if (*endp && !isspace(*endp))
+ if (*endp && isspace(*endp))
+ size++;
+ if (size != count)
return -EINVAL;
down(&ld->sem);
@@ -81,8 +84,7 @@
pr_debug("lcd: set contrast to %d\n", contrast);
ld->props->set_contrast(ld, contrast);
rc = count;
- } else
- rc = -ENXIO;
+ }
up(&ld->sem);
return rc;
@@ -90,14 +92,12 @@
static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf)
{
- int rc;
+ int rc = -ENXIO;
struct lcd_device *ld = to_lcd_device(cdev);
down(&ld->sem);
if (likely(ld->props))
rc = sprintf(buf, "%d\n", ld->props->max_contrast);
- else
- rc = -ENXIO;
up(&ld->sem);
return rc;
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 788297e..44aa2ff 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -76,8 +76,8 @@
*
* Experiment with v_offset to find out which works best for you.
*/
-static u32 v_offset_default __initdata; /* For 32 MiB Aper size, 8 should be the default */
-static u32 voffset __initdata = 0;
+static u32 v_offset_default __devinitdata; /* For 32 MiB Aper size, 8 should be the default */
+static u32 voffset __devinitdata;
static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor);
static int __devinit i810fb_init_pci (struct pci_dev *dev,
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c
index 8073a73..440272a 100644
--- a/drivers/video/matrox/g450_pll.c
+++ b/drivers/video/matrox/g450_pll.c
@@ -316,14 +316,24 @@
case M_PIXEL_PLL_B:
case M_PIXEL_PLL_C:
{
- u_int8_t tmp;
+ u_int8_t tmp, xpwrctrl;
unsigned long flags;
matroxfb_DAC_lock_irqsave(flags);
+
+ xpwrctrl = matroxfb_DAC_in(PMINFO M1064_XPWRCTRL);
+ matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl & ~M1064_XPWRCTRL_PANELPDN);
+ mga_outb(M_SEQ_INDEX, M_SEQ1);
+ mga_outb(M_SEQ_DATA, mga_inb(M_SEQ_DATA) | M_SEQ1_SCROFF);
tmp = matroxfb_DAC_in(PMINFO M1064_XPIXCLKCTRL);
+ tmp |= M1064_XPIXCLKCTRL_DIS;
if (!(tmp & M1064_XPIXCLKCTRL_PLL_UP)) {
- matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp | M1064_XPIXCLKCTRL_PLL_UP);
+ tmp |= M1064_XPIXCLKCTRL_PLL_UP;
}
+ matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp);
+ matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0);
+ matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl);
+
matroxfb_DAC_unlock_irqrestore(flags);
}
{
@@ -418,6 +428,15 @@
frequency to higher - with <= lowest wins, while
with < highest one wins */
if (delta <= deltaarray[idx-1]) {
+ /* all else being equal except VCO,
+ * choose VCO not near (within 1/16th or so) VCOmin
+ * (freqs near VCOmin aren't as stable)
+ */
+ if (delta == deltaarray[idx-1]
+ && vco != g450_mnp2vco(PMINFO mnparray[idx-1])
+ && vco < (pi->vcomin * 17 / 16)) {
+ break;
+ }
mnparray[idx] = mnparray[idx-1];
deltaarray[idx] = deltaarray[idx-1];
} else {
diff --git a/drivers/video/matrox/matroxfb_DAC1064.h b/drivers/video/matrox/matroxfb_DAC1064.h
index 2e7238a..56513a5 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.h
+++ b/drivers/video/matrox/matroxfb_DAC1064.h
@@ -40,6 +40,7 @@
#define M1064_XCURCOL1RED 0x0C
#define M1064_XCURCOL1GREEN 0x0D
#define M1064_XCURCOL1BLUE 0x0E
+#define M1064_XDVICLKCTRL 0x0F
#define M1064_XCURCOL2RED 0x10
#define M1064_XCURCOL2GREEN 0x11
#define M1064_XCURCOL2BLUE 0x12
@@ -144,6 +145,7 @@
#define M1064_XVIDPLLN 0x8F
#define M1064_XPWRCTRL 0xA0
+#define M1064_XPWRCTRL_PANELPDN 0x04
#define M1064_XPANMODE 0xA2
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index 3a3e180..b717371 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -672,6 +672,8 @@
#define M_SEQ_INDEX 0x1FC4
#define M_SEQ_DATA 0x1FC5
+#define M_SEQ1 0x01
+#define M_SEQ1_SCROFF 0x20
#define M_MISC_REG_READ 0x1FCC
diff --git a/fs/9p/fcall.c b/fs/9p/fcall.c
index 71742ba1..6f26178 100644
--- a/fs/9p/fcall.c
+++ b/fs/9p/fcall.c
@@ -98,23 +98,20 @@
static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc,
struct v9fs_fcall *rc, int err)
{
- int fid;
+ int fid, id;
struct v9fs_session_info *v9ses;
- if (err)
- return;
-
+ id = 0;
fid = tc->params.tclunk.fid;
+ if (rc)
+ id = rc->id;
+
kfree(tc);
-
- if (!rc)
- return;
-
- v9ses = a;
- if (rc->id == RCLUNK)
- v9fs_put_idpool(fid, &v9ses->fidpool);
-
kfree(rc);
+ if (id == RCLUNK) {
+ v9ses = a;
+ v9fs_put_idpool(fid, &v9ses->fidpool);
+ }
}
/**
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 3e5b124..f4407eb 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -50,15 +50,23 @@
Wpending = 8, /* can write */
};
+enum {
+ None,
+ Flushing,
+ Flushed,
+};
+
struct v9fs_mux_poll_task;
struct v9fs_req {
+ spinlock_t lock;
int tag;
struct v9fs_fcall *tcall;
struct v9fs_fcall *rcall;
int err;
v9fs_mux_req_callback cb;
void *cba;
+ int flush;
struct list_head req_list;
};
@@ -96,8 +104,8 @@
struct v9fs_mux_rpc {
struct v9fs_mux_data *m;
- struct v9fs_req *req;
int err;
+ struct v9fs_fcall *tcall;
struct v9fs_fcall *rcall;
wait_queue_head_t wqueue;
};
@@ -524,10 +532,9 @@
static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
{
- int ecode, tag;
+ int ecode;
struct v9fs_str *ename;
- tag = req->tag;
if (!req->err && req->rcall->id == RERROR) {
ecode = req->rcall->params.rerror.errno;
ename = &req->rcall->params.rerror.error;
@@ -553,23 +560,6 @@
if (!req->err)
req->err = -EIO;
}
-
- if (req->err == ERREQFLUSH)
- return;
-
- if (req->cb) {
- dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
- req->tcall, req->rcall);
-
- (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
- req->cb = NULL;
- } else
- kfree(req->rcall);
-
- v9fs_mux_put_tag(m, tag);
-
- wake_up(&m->equeue);
- kfree(req);
}
/**
@@ -669,17 +659,26 @@
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
if (rreq->tag == rcall->tag) {
req = rreq;
- req->rcall = rcall;
- list_del(&req->req_list);
- spin_unlock(&m->lock);
- process_request(m, req);
+ if (req->flush != Flushing)
+ list_del(&req->req_list);
break;
}
-
}
+ spin_unlock(&m->lock);
- if (!req) {
- spin_unlock(&m->lock);
+ if (req) {
+ req->rcall = rcall;
+ process_request(m, req);
+
+ if (req->flush != Flushing) {
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ else
+ kfree(req->rcall);
+
+ wake_up(&m->equeue);
+ }
+ } else {
if (err >= 0 && rcall->id != RFLUSH)
dprintk(DEBUG_ERROR,
"unexpected response mux %p id %d tag %d\n",
@@ -746,7 +745,6 @@
return ERR_PTR(-ENOMEM);
v9fs_set_tag(tc, n);
-
if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
char buf[150];
@@ -754,12 +752,14 @@
printk(KERN_NOTICE "<<< %p %s\n", m, buf);
}
+ spin_lock_init(&req->lock);
req->tag = n;
req->tcall = tc;
req->rcall = NULL;
req->err = 0;
req->cb = cb;
req->cba = cba;
+ req->flush = None;
spin_lock(&m->lock);
list_add_tail(&req->req_list, &m->unsent_req_list);
@@ -776,72 +776,108 @@
return req;
}
-static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc,
- struct v9fs_fcall *rc, int err)
+static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req)
+{
+ v9fs_mux_put_tag(m, req->tag);
+ kfree(req);
+}
+
+static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a)
{
v9fs_mux_req_callback cb;
int tag;
struct v9fs_mux_data *m;
- struct v9fs_req *req, *rptr;
+ struct v9fs_req *req, *rreq, *rptr;
m = a;
- dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
- rc, err, tc->params.tflush.oldtag);
+ dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
+ freq->tcall, freq->rcall, freq->err,
+ freq->tcall->params.tflush.oldtag);
spin_lock(&m->lock);
cb = NULL;
- tag = tc->params.tflush.oldtag;
- list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
- if (req->tag == tag) {
+ tag = freq->tcall->params.tflush.oldtag;
+ req = NULL;
+ list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
+ if (rreq->tag == tag) {
+ req = rreq;
list_del(&req->req_list);
- if (req->cb) {
- cb = req->cb;
- req->cb = NULL;
- spin_unlock(&m->lock);
- (*cb) (req->cba, req->tcall, req->rcall,
- req->err);
- }
- kfree(req);
- wake_up(&m->equeue);
break;
}
}
+ spin_unlock(&m->lock);
- if (!cb)
- spin_unlock(&m->lock);
+ if (req) {
+ spin_lock(&req->lock);
+ req->flush = Flushed;
+ spin_unlock(&req->lock);
- v9fs_mux_put_tag(m, tag);
- kfree(tc);
- kfree(rc);
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ else
+ kfree(req->rcall);
+
+ wake_up(&m->equeue);
+ }
+
+ kfree(freq->tcall);
+ kfree(freq->rcall);
+ v9fs_mux_free_request(m, freq);
}
-static void
+static int
v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
{
struct v9fs_fcall *fc;
+ struct v9fs_req *rreq, *rptr;
dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
+ /* if a response was received for a request, do nothing */
+ spin_lock(&req->lock);
+ if (req->rcall || req->err) {
+ spin_unlock(&req->lock);
+ dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req);
+ return 0;
+ }
+
+ req->flush = Flushing;
+ spin_unlock(&req->lock);
+
+ spin_lock(&m->lock);
+ /* if the request is not sent yet, just remove it from the list */
+ list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
+ if (rreq->tag == req->tag) {
+ dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req);
+ list_del(&rreq->req_list);
+ req->flush = Flushed;
+ spin_unlock(&m->lock);
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ return 0;
+ }
+ }
+ spin_unlock(&m->lock);
+
+ clear_thread_flag(TIF_SIGPENDING);
fc = v9fs_create_tflush(req->tag);
v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
+ return 1;
}
static void
-v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
+v9fs_mux_rpc_cb(struct v9fs_req *req, void *a)
{
struct v9fs_mux_rpc *r;
- if (err == ERREQFLUSH) {
- kfree(rc);
- dprintk(DEBUG_MUX, "err req flush\n");
- return;
- }
-
+ dprintk(DEBUG_MUX, "req %p r %p\n", req, a);
r = a;
- dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
- tc, rc, err);
- r->rcall = rc;
- r->err = err;
+ r->rcall = req->rcall;
+ r->err = req->err;
+
+ if (req->flush!=None && !req->err)
+ r->err = -ERESTARTSYS;
+
wake_up(&r->wqueue);
}
@@ -856,12 +892,13 @@
v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
struct v9fs_fcall **rc)
{
- int err;
+ int err, sigpending;
unsigned long flags;
struct v9fs_req *req;
struct v9fs_mux_rpc r;
r.err = 0;
+ r.tcall = tc;
r.rcall = NULL;
r.m = m;
init_waitqueue_head(&r.wqueue);
@@ -869,48 +906,50 @@
if (rc)
*rc = NULL;
+ sigpending = 0;
+ if (signal_pending(current)) {
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+ }
+
req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
if (IS_ERR(req)) {
err = PTR_ERR(req);
dprintk(DEBUG_MUX, "error %d\n", err);
- return PTR_ERR(req);
+ return err;
}
- r.req = req;
- dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
- req->tag, &r, req);
err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
if (r.err < 0)
err = r.err;
if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
- spin_lock(&m->lock);
- req->tcall = NULL;
- req->err = ERREQFLUSH;
- spin_unlock(&m->lock);
+ if (v9fs_mux_flush_request(m, req)) {
+ /* wait until we get response of the flush message */
+ do {
+ clear_thread_flag(TIF_SIGPENDING);
+ err = wait_event_interruptible(r.wqueue,
+ r.rcall || r.err);
+ } while (!r.rcall && !r.err && err==-ERESTARTSYS &&
+ m->trans->status==Connected && !m->err);
+ }
+ sigpending = 1;
+ }
- clear_thread_flag(TIF_SIGPENDING);
- v9fs_mux_flush_request(m, req);
+ if (sigpending) {
spin_lock_irqsave(¤t->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
- if (!err) {
- if (r.rcall)
- dprintk(DEBUG_MUX, "got response id %d tag %d\n",
- r.rcall->id, r.rcall->tag);
-
- if (rc)
- *rc = r.rcall;
- else
- kfree(r.rcall);
- } else {
+ if (rc)
+ *rc = r.rcall;
+ else
kfree(r.rcall);
- dprintk(DEBUG_MUX, "got error %d\n", err);
- if (err > 0)
- err = -EIO;
- }
+
+ v9fs_mux_free_request(m, req);
+ if (err > 0)
+ err = -EIO;
return err;
}
@@ -951,12 +990,15 @@
struct v9fs_req *req, *rtmp;
LIST_HEAD(cancel_list);
- dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
+ dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err);
m->err = err;
spin_lock(&m->lock);
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
}
+ list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
+ list_move(&req->req_list, &cancel_list);
+ }
spin_unlock(&m->lock);
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
@@ -965,11 +1007,9 @@
req->err = err;
if (req->cb)
- (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
+ (*req->cb) (req, req->cba);
else
kfree(req->rcall);
-
- kfree(req);
}
wake_up(&m->equeue);
diff --git a/fs/9p/mux.h b/fs/9p/mux.h
index e90bfd3..fb10c50 100644
--- a/fs/9p/mux.h
+++ b/fs/9p/mux.h
@@ -24,6 +24,7 @@
*/
struct v9fs_mux_data;
+struct v9fs_req;
/**
* v9fs_mux_req_callback - callback function that is called when the
@@ -36,8 +37,7 @@
* @rc - response call
* @err - error code (non-zero if error occured)
*/
-typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc,
- struct v9fs_fcall *rc, int err);
+typedef void (*v9fs_mux_req_callback)(struct v9fs_req *req, void *a);
int v9fs_mux_global_init(void);
void v9fs_mux_global_exit(void);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 083dcfc..1a8e460 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -72,11 +72,17 @@
return -ENOSPC;
}
- err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, NULL);
+ err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, &fcall);
if (err < 0) {
dprintk(DEBUG_ERROR, "rewalk didn't work\n");
- goto put_fid;
+ if (fcall && fcall->id == RWALK)
+ goto clunk_fid;
+ else {
+ v9fs_put_idpool(fid, &v9ses->fidpool);
+ goto free_fcall;
+ }
}
+ kfree(fcall);
/* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */
/* translate open mode appropriately */
@@ -109,8 +115,7 @@
clunk_fid:
v9fs_t_clunk(v9ses, fid);
-put_fid:
- v9fs_put_idpool(fid, &v9ses->fidpool);
+free_fcall:
kfree(fcall);
return err;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 133db36..2cb87ba 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -270,7 +270,10 @@
err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall);
if (err < 0) {
PRINT_FCALL_ERROR("clone error", fcall);
- goto put_fid;
+ if (fcall && fcall->id == RWALK)
+ goto clunk_fid;
+ else
+ goto put_fid;
}
kfree(fcall);
@@ -322,6 +325,9 @@
&fcall);
if (err < 0) {
+ if (fcall && fcall->id == RWALK)
+ goto clunk_fid;
+
PRINT_FCALL_ERROR("walk error", fcall);
v9fs_put_idpool(nfid, &v9ses->fidpool);
goto error;
@@ -640,19 +646,26 @@
}
result = v9fs_t_walk(v9ses, dirfidnum, newfid,
- (char *)dentry->d_name.name, NULL);
+ (char *)dentry->d_name.name, &fcall);
+
if (result < 0) {
- v9fs_put_idpool(newfid, &v9ses->fidpool);
+ if (fcall && fcall->id == RWALK)
+ v9fs_t_clunk(v9ses, newfid);
+ else
+ v9fs_put_idpool(newfid, &v9ses->fidpool);
+
if (result == -ENOENT) {
d_add(dentry, NULL);
dprintk(DEBUG_VFS,
"Return negative dentry %p count %d\n",
dentry, atomic_read(&dentry->d_count));
+ kfree(fcall);
return NULL;
}
dprintk(DEBUG_ERROR, "walk error:%d\n", result);
goto FreeFcall;
}
+ kfree(fcall);
result = v9fs_t_stat(v9ses, newfid, &fcall);
if (result < 0) {
diff --git a/fs/Makefile b/fs/Makefile
index 83bf478..078d3d1 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -45,6 +45,7 @@
obj-$(CONFIG_PROC_FS) += proc/
obj-y += partitions/
obj-$(CONFIG_SYSFS) += sysfs/
+obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-y += devpts/
obj-$(CONFIG_PROFILING) += dcookies.o
@@ -100,5 +101,4 @@
obj-$(CONFIG_HOSTFS) += hostfs/
obj-$(CONFIG_HPPFS) += hppfs/
obj-$(CONFIG_DEBUG_FS) += debugfs/
-obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-$(CONFIG_OCFS2_FS) += ocfs2/
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 57c4903..d6603d0 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -74,8 +74,8 @@
struct autofs_wait_queue *next;
autofs_wqt_t wait_queue_token;
/* We use the following to see what we are waiting for */
- int hash;
- int len;
+ unsigned int hash;
+ unsigned int len;
char *name;
u32 dev;
u64 ino;
@@ -85,7 +85,6 @@
pid_t tgid;
/* This is for status reporting upon return */
int status;
- atomic_t notify;
atomic_t wait_ctr;
};
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 84e030c..5100f98 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -327,6 +327,7 @@
static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
int oz_mode = autofs4_oz_mode(sbi);
unsigned int lookup_type;
int status;
@@ -340,13 +341,8 @@
if (oz_mode || !lookup_type)
goto done;
- /*
- * If a request is pending wait for it.
- * If it's a mount then it won't be expired till at least
- * a liitle later and if it's an expire then we might need
- * to mount it again.
- */
- if (autofs4_ispending(dentry)) {
+ /* If an expire request is pending wait for it. */
+ if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) {
DPRINTK("waiting for active request %p name=%.*s",
dentry, dentry->d_name.len, dentry->d_name.name);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 142ab6a..ce103e7 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -189,14 +189,30 @@
return len;
}
+static struct autofs_wait_queue *
+autofs4_find_wait(struct autofs_sb_info *sbi,
+ char *name, unsigned int hash, unsigned int len)
+{
+ struct autofs_wait_queue *wq;
+
+ for (wq = sbi->queues; wq; wq = wq->next) {
+ if (wq->hash == hash &&
+ wq->len == len &&
+ wq->name && !memcmp(wq->name, name, len))
+ break;
+ }
+ return wq;
+}
+
int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
enum autofs_notify notify)
{
+ struct autofs_info *ino;
struct autofs_wait_queue *wq;
char *name;
unsigned int len = 0;
unsigned int hash = 0;
- int status;
+ int status, type;
/* In catatonic mode, we don't wait for nobody */
if (sbi->catatonic)
@@ -223,21 +239,41 @@
return -EINTR;
}
- for (wq = sbi->queues ; wq ; wq = wq->next) {
- if (wq->hash == dentry->d_name.hash &&
- wq->len == len &&
- wq->name && !memcmp(wq->name, name, len))
- break;
+ wq = autofs4_find_wait(sbi, name, hash, len);
+ ino = autofs4_dentry_ino(dentry);
+ if (!wq && ino && notify == NFY_NONE) {
+ /*
+ * Either we've betean the pending expire to post it's
+ * wait or it finished while we waited on the mutex.
+ * So we need to wait till either, the wait appears
+ * or the expire finishes.
+ */
+
+ while (ino->flags & AUTOFS_INF_EXPIRING) {
+ mutex_unlock(&sbi->wq_mutex);
+ schedule_timeout_interruptible(HZ/10);
+ if (mutex_lock_interruptible(&sbi->wq_mutex)) {
+ kfree(name);
+ return -EINTR;
+ }
+ wq = autofs4_find_wait(sbi, name, hash, len);
+ if (wq)
+ break;
+ }
+
+ /*
+ * Not ideal but the status has already gone. Of the two
+ * cases where we wait on NFY_NONE neither depend on the
+ * return status of the wait.
+ */
+ if (!wq) {
+ kfree(name);
+ mutex_unlock(&sbi->wq_mutex);
+ return 0;
+ }
}
if (!wq) {
- /* Can't wait for an expire if there's no mount */
- if (notify == NFY_NONE && !d_mountpoint(dentry)) {
- kfree(name);
- mutex_unlock(&sbi->wq_mutex);
- return -ENOENT;
- }
-
/* Create a new wait queue */
wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
if (!wq) {
@@ -263,20 +299,7 @@
wq->tgid = current->tgid;
wq->status = -EINTR; /* Status return if interrupted */
atomic_set(&wq->wait_ctr, 2);
- atomic_set(&wq->notify, 1);
mutex_unlock(&sbi->wq_mutex);
- } else {
- atomic_inc(&wq->wait_ctr);
- mutex_unlock(&sbi->wq_mutex);
- kfree(name);
- DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
- (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
- }
-
- if (notify != NFY_NONE && atomic_read(&wq->notify)) {
- int type;
-
- atomic_dec(&wq->notify);
if (sbi->version < 5) {
if (notify == NFY_MOUNT)
@@ -299,6 +322,12 @@
/* autofs4_notify_daemon() may block */
autofs4_notify_daemon(sbi, wq, type);
+ } else {
+ atomic_inc(&wq->wait_ctr);
+ mutex_unlock(&sbi->wq_mutex);
+ kfree(name);
+ DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
+ (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
}
/* wq->name is NULL if and only if the lock is already released */
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 69f44dc..b1c902e 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -428,7 +428,6 @@
loff_t fpos;
unsigned long start_code, end_code;
int ret;
- int exec_fileno;
hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */
inode = bprm->file->f_dentry->d_inode;
@@ -502,21 +501,12 @@
goto err;
}
- /* check file descriptor */
- exec_fileno = get_unused_fd();
- if (exec_fileno < 0) {
- ret = -EMFILE;
- goto err;
- }
- get_file(bprm->file);
- fd_install(exec_fileno, bprm->file);
-
/* Flush all traces of the currently running executable */
if (id == 0) {
result = flush_old_exec(bprm);
if (result) {
ret = result;
- goto err_close;
+ goto err;
}
/* OK, This is the point of no return */
@@ -548,7 +538,7 @@
textpos = (unsigned long) -ENOMEM;
printk("Unable to mmap process text, errno %d\n", (int)-textpos);
ret = textpos;
- goto err_close;
+ goto err;
}
down_write(¤t->mm->mmap_sem);
@@ -564,7 +554,7 @@
(int)-datapos);
do_munmap(current->mm, textpos, text_len);
ret = realdatastart;
- goto err_close;
+ goto err;
}
datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long);
@@ -587,7 +577,7 @@
do_munmap(current->mm, textpos, text_len);
do_munmap(current->mm, realdatastart, data_len + extra);
ret = result;
- goto err_close;
+ goto err;
}
reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len));
@@ -606,7 +596,7 @@
printk("Unable to allocate RAM for process text/data, errno %d\n",
(int)-textpos);
ret = textpos;
- goto err_close;
+ goto err;
}
realdatastart = textpos + ntohl(hdr->data_start);
@@ -652,7 +642,7 @@
do_munmap(current->mm, textpos, text_len + data_len + extra +
MAX_SHARED_LIBS * sizeof(unsigned long));
ret = result;
- goto err_close;
+ goto err;
}
}
@@ -717,7 +707,7 @@
addr = calc_reloc(*rp, libinfo, id, 0);
if (addr == RELOC_FAILED) {
ret = -ENOEXEC;
- goto err_close;
+ goto err;
}
*rp = addr;
}
@@ -747,7 +737,7 @@
rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1);
if (rp == (unsigned long *)RELOC_FAILED) {
ret = -ENOEXEC;
- goto err_close;
+ goto err;
}
/* Get the pointer's value. */
@@ -762,7 +752,7 @@
addr = calc_reloc(addr, libinfo, id, 0);
if (addr == RELOC_FAILED) {
ret = -ENOEXEC;
- goto err_close;
+ goto err;
}
/* Write back the relocated pointer. */
@@ -783,8 +773,6 @@
stack_len);
return 0;
-err_close:
- sys_close(exec_fileno);
err:
return ret;
}
diff --git a/fs/compat.c b/fs/compat.c
index 970888a..b1f6478 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1913,7 +1913,7 @@
}
if (sigmask) {
- if (sigsetsize |= sizeof(compat_sigset_t))
+ if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
return -EFAULT;
@@ -2030,109 +2030,115 @@
struct knfsd_fh cr32_getfs;
};
-static int compat_nfs_svc_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+static int compat_nfs_svc_trans(struct nfsctl_arg *karg,
+ struct compat_nfsctl_arg __user *arg)
{
- int err;
-
- err = access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc));
- err |= get_user(karg->ca_version, &arg->ca32_version);
- err |= __get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port);
- err |= __get_user(karg->ca_svc.svc_nthreads, &arg->ca32_svc.svc32_nthreads);
- return (err) ? -EFAULT : 0;
+ if (!access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc)) ||
+ get_user(karg->ca_version, &arg->ca32_version) ||
+ __get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port) ||
+ __get_user(karg->ca_svc.svc_nthreads,
+ &arg->ca32_svc.svc32_nthreads))
+ return -EFAULT;
+ return 0;
}
-static int compat_nfs_clnt_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+static int compat_nfs_clnt_trans(struct nfsctl_arg *karg,
+ struct compat_nfsctl_arg __user *arg)
{
- int err;
+ if (!access_ok(VERIFY_READ, &arg->ca32_client,
+ sizeof(arg->ca32_client)) ||
+ get_user(karg->ca_version, &arg->ca32_version) ||
+ __copy_from_user(&karg->ca_client.cl_ident[0],
+ &arg->ca32_client.cl32_ident[0],
+ NFSCLNT_IDMAX) ||
+ __get_user(karg->ca_client.cl_naddr,
+ &arg->ca32_client.cl32_naddr) ||
+ __copy_from_user(&karg->ca_client.cl_addrlist[0],
+ &arg->ca32_client.cl32_addrlist[0],
+ (sizeof(struct in_addr) * NFSCLNT_ADDRMAX)) ||
+ __get_user(karg->ca_client.cl_fhkeytype,
+ &arg->ca32_client.cl32_fhkeytype) ||
+ __get_user(karg->ca_client.cl_fhkeylen,
+ &arg->ca32_client.cl32_fhkeylen) ||
+ __copy_from_user(&karg->ca_client.cl_fhkey[0],
+ &arg->ca32_client.cl32_fhkey[0],
+ NFSCLNT_KEYMAX))
+ return -EFAULT;
- err = access_ok(VERIFY_READ, &arg->ca32_client, sizeof(arg->ca32_client));
- err |= get_user(karg->ca_version, &arg->ca32_version);
- err |= __copy_from_user(&karg->ca_client.cl_ident[0],
- &arg->ca32_client.cl32_ident[0],
- NFSCLNT_IDMAX);
- err |= __get_user(karg->ca_client.cl_naddr, &arg->ca32_client.cl32_naddr);
- err |= __copy_from_user(&karg->ca_client.cl_addrlist[0],
- &arg->ca32_client.cl32_addrlist[0],
- (sizeof(struct in_addr) * NFSCLNT_ADDRMAX));
- err |= __get_user(karg->ca_client.cl_fhkeytype,
- &arg->ca32_client.cl32_fhkeytype);
- err |= __get_user(karg->ca_client.cl_fhkeylen,
- &arg->ca32_client.cl32_fhkeylen);
- err |= __copy_from_user(&karg->ca_client.cl_fhkey[0],
- &arg->ca32_client.cl32_fhkey[0],
- NFSCLNT_KEYMAX);
-
- return (err) ? -EFAULT : 0;
+ return 0;
}
-static int compat_nfs_exp_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+static int compat_nfs_exp_trans(struct nfsctl_arg *karg,
+ struct compat_nfsctl_arg __user *arg)
{
- int err;
-
- err = access_ok(VERIFY_READ, &arg->ca32_export, sizeof(arg->ca32_export));
- err |= get_user(karg->ca_version, &arg->ca32_version);
- err |= __copy_from_user(&karg->ca_export.ex_client[0],
- &arg->ca32_export.ex32_client[0],
- NFSCLNT_IDMAX);
- err |= __copy_from_user(&karg->ca_export.ex_path[0],
- &arg->ca32_export.ex32_path[0],
- NFS_MAXPATHLEN);
- err |= __get_user(karg->ca_export.ex_dev,
- &arg->ca32_export.ex32_dev);
- err |= __get_user(karg->ca_export.ex_ino,
- &arg->ca32_export.ex32_ino);
- err |= __get_user(karg->ca_export.ex_flags,
- &arg->ca32_export.ex32_flags);
- err |= __get_user(karg->ca_export.ex_anon_uid,
- &arg->ca32_export.ex32_anon_uid);
- err |= __get_user(karg->ca_export.ex_anon_gid,
- &arg->ca32_export.ex32_anon_gid);
+ if (!access_ok(VERIFY_READ, &arg->ca32_export,
+ sizeof(arg->ca32_export)) ||
+ get_user(karg->ca_version, &arg->ca32_version) ||
+ __copy_from_user(&karg->ca_export.ex_client[0],
+ &arg->ca32_export.ex32_client[0],
+ NFSCLNT_IDMAX) ||
+ __copy_from_user(&karg->ca_export.ex_path[0],
+ &arg->ca32_export.ex32_path[0],
+ NFS_MAXPATHLEN) ||
+ __get_user(karg->ca_export.ex_dev,
+ &arg->ca32_export.ex32_dev) ||
+ __get_user(karg->ca_export.ex_ino,
+ &arg->ca32_export.ex32_ino) ||
+ __get_user(karg->ca_export.ex_flags,
+ &arg->ca32_export.ex32_flags) ||
+ __get_user(karg->ca_export.ex_anon_uid,
+ &arg->ca32_export.ex32_anon_uid) ||
+ __get_user(karg->ca_export.ex_anon_gid,
+ &arg->ca32_export.ex32_anon_gid))
+ return -EFAULT;
SET_UID(karg->ca_export.ex_anon_uid, karg->ca_export.ex_anon_uid);
SET_GID(karg->ca_export.ex_anon_gid, karg->ca_export.ex_anon_gid);
- return (err) ? -EFAULT : 0;
+ return 0;
}
-static int compat_nfs_getfd_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+static int compat_nfs_getfd_trans(struct nfsctl_arg *karg,
+ struct compat_nfsctl_arg __user *arg)
{
- int err;
+ if (!access_ok(VERIFY_READ, &arg->ca32_getfd,
+ sizeof(arg->ca32_getfd)) ||
+ get_user(karg->ca_version, &arg->ca32_version) ||
+ __copy_from_user(&karg->ca_getfd.gd_addr,
+ &arg->ca32_getfd.gd32_addr,
+ (sizeof(struct sockaddr))) ||
+ __copy_from_user(&karg->ca_getfd.gd_path,
+ &arg->ca32_getfd.gd32_path,
+ (NFS_MAXPATHLEN+1)) ||
+ __get_user(karg->ca_getfd.gd_version,
+ &arg->ca32_getfd.gd32_version))
+ return -EFAULT;
- err = access_ok(VERIFY_READ, &arg->ca32_getfd, sizeof(arg->ca32_getfd));
- err |= get_user(karg->ca_version, &arg->ca32_version);
- err |= __copy_from_user(&karg->ca_getfd.gd_addr,
- &arg->ca32_getfd.gd32_addr,
- (sizeof(struct sockaddr)));
- err |= __copy_from_user(&karg->ca_getfd.gd_path,
- &arg->ca32_getfd.gd32_path,
- (NFS_MAXPATHLEN+1));
- err |= __get_user(karg->ca_getfd.gd_version,
- &arg->ca32_getfd.gd32_version);
-
- return (err) ? -EFAULT : 0;
+ return 0;
}
-static int compat_nfs_getfs_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg)
+static int compat_nfs_getfs_trans(struct nfsctl_arg *karg,
+ struct compat_nfsctl_arg __user *arg)
{
- int err;
+ if (!access_ok(VERIFY_READ,&arg->ca32_getfs,sizeof(arg->ca32_getfs)) ||
+ get_user(karg->ca_version, &arg->ca32_version) ||
+ __copy_from_user(&karg->ca_getfs.gd_addr,
+ &arg->ca32_getfs.gd32_addr,
+ (sizeof(struct sockaddr))) ||
+ __copy_from_user(&karg->ca_getfs.gd_path,
+ &arg->ca32_getfs.gd32_path,
+ (NFS_MAXPATHLEN+1)) ||
+ __get_user(karg->ca_getfs.gd_maxlen,
+ &arg->ca32_getfs.gd32_maxlen))
+ return -EFAULT;
- err = access_ok(VERIFY_READ, &arg->ca32_getfs, sizeof(arg->ca32_getfs));
- err |= get_user(karg->ca_version, &arg->ca32_version);
- err |= __copy_from_user(&karg->ca_getfs.gd_addr,
- &arg->ca32_getfs.gd32_addr,
- (sizeof(struct sockaddr)));
- err |= __copy_from_user(&karg->ca_getfs.gd_path,
- &arg->ca32_getfs.gd32_path,
- (NFS_MAXPATHLEN+1));
- err |= __get_user(karg->ca_getfs.gd_maxlen,
- &arg->ca32_getfs.gd32_maxlen);
-
- return (err) ? -EFAULT : 0;
+ return 0;
}
/* This really doesn't need translations, we are only passing
* back a union which contains opaque nfs file handle data.
*/
-static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, union compat_nfsctl_res __user *res)
+static int compat_nfs_getfh_res_trans(union nfsctl_res *kres,
+ union compat_nfsctl_res __user *res)
{
int err;
@@ -2141,8 +2147,9 @@
return (err) ? -EFAULT : 0;
}
-asmlinkage long compat_sys_nfsservctl(int cmd, struct compat_nfsctl_arg __user *arg,
- union compat_nfsctl_res __user *res)
+asmlinkage long compat_sys_nfsservctl(int cmd,
+ struct compat_nfsctl_arg __user *arg,
+ union compat_nfsctl_res __user *res)
{
struct nfsctl_arg *karg;
union nfsctl_res *kres;
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 5638c8f..5f95218 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -505,13 +505,15 @@
int i;
if (group->default_groups) {
- /* FYI, we're faking mkdir here
+ /*
+ * FYI, we're faking mkdir here
* I'm not sure we need this semaphore, as we're called
* from our parent's mkdir. That holds our parent's
* i_mutex, so afaik lookup cannot continue through our
* parent to find us, let alone mess with our tree.
* That said, taking our i_mutex is closer to mkdir
- * emulation, and shouldn't hurt. */
+ * emulation, and shouldn't hurt.
+ */
mutex_lock(&dentry->d_inode->i_mutex);
for (i = 0; group->default_groups[i]; i++) {
@@ -546,20 +548,34 @@
item->ci_group = NULL;
item->ci_parent = NULL;
+
+ /* Drop the reference for ci_entry */
config_item_put(item);
+ /* Drop the reference for ci_parent */
config_group_put(group);
}
}
static void link_obj(struct config_item *parent_item, struct config_item *item)
{
- /* Parent seems redundant with group, but it makes certain
- * traversals much nicer. */
+ /*
+ * Parent seems redundant with group, but it makes certain
+ * traversals much nicer.
+ */
item->ci_parent = parent_item;
+
+ /*
+ * We hold a reference on the parent for the child's ci_parent
+ * link.
+ */
item->ci_group = config_group_get(to_config_group(parent_item));
list_add_tail(&item->ci_entry, &item->ci_group->cg_children);
+ /*
+ * We hold a reference on the child for ci_entry on the parent's
+ * cg_children
+ */
config_item_get(item);
}
@@ -684,6 +700,10 @@
type = parent_item->ci_type;
BUG_ON(!type);
+ /*
+ * If ->drop_item() exists, it is responsible for the
+ * config_item_put().
+ */
if (type->ct_group_ops && type->ct_group_ops->drop_item)
type->ct_group_ops->drop_item(to_config_group(parent_item),
item);
@@ -694,23 +714,28 @@
static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
- int ret;
+ int ret, module_got = 0;
struct config_group *group;
struct config_item *item;
struct config_item *parent_item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
struct config_item_type *type;
- struct module *owner;
+ struct module *owner = NULL;
char *name;
- if (dentry->d_parent == configfs_sb->s_root)
- return -EPERM;
+ if (dentry->d_parent == configfs_sb->s_root) {
+ ret = -EPERM;
+ goto out;
+ }
sd = dentry->d_parent->d_fsdata;
- if (!(sd->s_type & CONFIGFS_USET_DIR))
- return -EPERM;
+ if (!(sd->s_type & CONFIGFS_USET_DIR)) {
+ ret = -EPERM;
+ goto out;
+ }
+ /* Get a working ref for the duration of this function */
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
subsys = to_config_group(parent_item)->cg_subsys;
@@ -719,15 +744,16 @@
if (!type || !type->ct_group_ops ||
(!type->ct_group_ops->make_group &&
!type->ct_group_ops->make_item)) {
- config_item_put(parent_item);
- return -EPERM; /* What lack-of-mkdir returns */
+ ret = -EPERM; /* Lack-of-mkdir returns -EPERM */
+ goto out_put;
}
name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL);
if (!name) {
- config_item_put(parent_item);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_put;
}
+
snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name);
down(&subsys->su_sem);
@@ -748,40 +774,67 @@
kfree(name);
if (!item) {
- config_item_put(parent_item);
- return -ENOMEM;
+ /*
+ * If item == NULL, then link_obj() was never called.
+ * There are no extra references to clean up.
+ */
+ ret = -ENOMEM;
+ goto out_put;
}
- ret = -EINVAL;
+ /*
+ * link_obj() has been called (via link_group() for groups).
+ * From here on out, errors must clean that up.
+ */
+
type = item->ci_type;
- if (type) {
- owner = type->ct_owner;
- if (try_module_get(owner)) {
- if (group) {
- ret = configfs_attach_group(parent_item,
- item,
- dentry);
- } else {
- ret = configfs_attach_item(parent_item,
- item,
- dentry);
- }
-
- if (ret) {
- down(&subsys->su_sem);
- if (group)
- unlink_group(group);
- else
- unlink_obj(item);
- client_drop_item(parent_item, item);
- up(&subsys->su_sem);
-
- config_item_put(parent_item);
- module_put(owner);
- }
- }
+ if (!type) {
+ ret = -EINVAL;
+ goto out_unlink;
}
+ owner = type->ct_owner;
+ if (!try_module_get(owner)) {
+ ret = -EINVAL;
+ goto out_unlink;
+ }
+
+ /*
+ * I hate doing it this way, but if there is
+ * an error, module_put() probably should
+ * happen after any cleanup.
+ */
+ module_got = 1;
+
+ if (group)
+ ret = configfs_attach_group(parent_item, item, dentry);
+ else
+ ret = configfs_attach_item(parent_item, item, dentry);
+
+out_unlink:
+ if (ret) {
+ /* Tear down everything we built up */
+ down(&subsys->su_sem);
+ if (group)
+ unlink_group(group);
+ else
+ unlink_obj(item);
+ client_drop_item(parent_item, item);
+ up(&subsys->su_sem);
+
+ if (module_got)
+ module_put(owner);
+ }
+
+out_put:
+ /*
+ * link_obj()/link_group() took a reference from child->parent,
+ * so the parent is safely pinned. We can drop our working
+ * reference.
+ */
+ config_item_put(parent_item);
+
+out:
return ret;
}
@@ -801,6 +854,7 @@
if (sd->s_type & CONFIGFS_USET_DEFAULT)
return -EPERM;
+ /* Get a working ref until we have the child */
parent_item = configfs_get_config_item(dentry->d_parent);
subsys = to_config_group(parent_item)->cg_subsys;
BUG_ON(!subsys);
@@ -817,6 +871,7 @@
return ret;
}
+ /* Get a working ref for the duration of this function */
item = configfs_get_config_item(dentry);
/* Drop reference from above, item already holds one. */
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index b06b54f..4c39009 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -102,7 +102,7 @@
if (acceptable(context, result))
return result;
if (S_ISDIR(result->d_inode->i_mode)) {
- /* there is no other dentry, so fail */
+ err = -EACCES;
goto err_result;
}
diff --git a/fs/inotify.c b/fs/inotify.c
index 1f50302..732ec4b 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -848,7 +848,11 @@
inode = watch->inode;
mutex_lock(&inode->inotify_mutex);
mutex_lock(&dev->mutex);
- remove_watch_no_event(watch, dev);
+
+ /* make sure we didn't race with another list removal */
+ if (likely(idr_find(&dev->idr, watch->wd)))
+ remove_watch_no_event(watch, dev);
+
mutex_unlock(&dev->mutex);
mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch);
@@ -890,8 +894,7 @@
mutex_lock(&dev->mutex);
/* make sure that we did not race */
- watch = idr_find(&dev->idr, wd);
- if (likely(watch))
+ if (likely(idr_find(&dev->idr, wd) == watch))
remove_watch(watch, dev);
mutex_unlock(&dev->mutex);
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index d4d0c41..1d46677 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -438,7 +438,8 @@
if (c->mtd->point) {
err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
if (!err && retlen < tn->csize) {
- JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize);
+ JFFS2_WARNING("MTD point returned len too short: %zu "
+ "instead of %u.\n", retlen, tn->csize);
c->mtd->unpoint(c->mtd, buffer, ofs, len);
} else if (err)
JFFS2_WARNING("MTD point failed: error code %d.\n", err);
@@ -461,7 +462,8 @@
}
if (retlen != len) {
- JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len);
+ JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n",
+ ofs, retlen, len);
err = -EIO;
goto free_out;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 2c5f1f8..bf478ad 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -899,13 +899,11 @@
/*
* do loopback mount.
*/
-static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags)
+static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
{
struct nameidata old_nd;
struct vfsmount *mnt = NULL;
- int recurse = flags & MS_REC;
int err = mount_is_safe(nd);
-
if (err)
return err;
if (!old_name || !*old_name)
@@ -939,7 +937,6 @@
spin_unlock(&vfsmount_lock);
release_mounts(&umount_list);
}
- mnt->mnt_flags = mnt_flags;
out:
up_write(&namespace_sem);
@@ -1353,7 +1350,7 @@
retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
data_page);
else if (flags & MS_BIND)
- retval = do_loopback(&nd, dev_name, flags, mnt_flags);
+ retval = do_loopback(&nd, dev_name, flags & MS_REC);
else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
retval = do_change_type(&nd, flags);
else if (flags & MS_MOVE)
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6aa92d0..1d65f13 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1922,11 +1922,10 @@
value = kmalloc(size, GFP_KERNEL);
if (!value)
return -ENOMEM;
- size = posix_acl_to_xattr(acl, value, size);
- if (size < 0) {
- error = size;
+ error = posix_acl_to_xattr(acl, value, size);
+ if (error < 0)
goto getout;
- }
+ size = error;
} else
size = 0;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0d858d0..47152bf 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -276,13 +276,29 @@
return ret;
}
+/* This can also be called from ocfs2_write_zero_page() which has done
+ * it's own cluster locking. */
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+ unsigned from, unsigned to)
+{
+ int ret;
+
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ ret = block_prepare_write(page, from, to, ocfs2_get_block);
+
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ return ret;
+}
+
/*
* ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
* from loopback. It must be able to perform its own locking around
* ocfs2_get_block().
*/
-int ocfs2_prepare_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
+static int ocfs2_prepare_write(struct file *file, struct page *page,
+ unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
int ret;
@@ -295,11 +311,7 @@
goto out;
}
- down_read(&OCFS2_I(inode)->ip_alloc_sem);
-
- ret = block_prepare_write(page, from, to, ocfs2_get_block);
-
- up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ ret = ocfs2_prepare_write_nolock(inode, page, from, to);
ocfs2_meta_unlock(inode, 0);
out:
@@ -625,11 +637,31 @@
int ret;
mlog_entry_void();
+
+ /*
+ * We get PR data locks even for O_DIRECT. This allows
+ * concurrent O_DIRECT I/O but doesn't let O_DIRECT with
+ * extending and buffered zeroing writes race. If they did
+ * race then the buffered zeroing could be written back after
+ * the O_DIRECT I/O. It's one thing to tell people not to mix
+ * buffered and O_DIRECT writes, but expecting them to
+ * understand that file extension is also an implicit buffered
+ * write is too much. By getting the PR we force writeback of
+ * the buffered zeroing before proceeding.
+ */
+ ret = ocfs2_data_lock(inode, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ ocfs2_data_unlock(inode, 0);
+
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
inode->i_sb->s_bdev, iov, offset,
nr_segs,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io);
+out:
mlog_exit(ret);
return ret;
}
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index d40456d..e88c3f0 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -22,8 +22,8 @@
#ifndef OCFS2_AOPS_H
#define OCFS2_AOPS_H
-int ocfs2_prepare_write(struct file *file, struct page *page,
- unsigned from, unsigned to);
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+ unsigned from, unsigned to);
struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
struct page *page,
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 4601fc2..1a5c690 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -569,7 +569,7 @@
ret = -ENOMEM;
ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!ctxt.new_ent) {
mlog_errno(ret);
return ret;
@@ -583,14 +583,14 @@
if (ctxt.need_left && !ctxt.left_ent) {
ctxt.left_ent =
kmem_cache_alloc(ocfs2_em_ent_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!ctxt.left_ent)
break;
}
if (ctxt.need_right && !ctxt.right_ent) {
ctxt.right_ent =
kmem_cache_alloc(ocfs2_em_ent_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!ctxt.right_ent)
break;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 581eb45..a9559c8 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -613,7 +613,8 @@
/* Some parts of this taken from generic_cont_expand, which turned out
* to be too fragile to do exactly what we need without us having to
- * worry about recursive locking in ->commit_write(). */
+ * worry about recursive locking in ->prepare_write() and
+ * ->commit_write(). */
static int ocfs2_write_zero_page(struct inode *inode,
u64 size)
{
@@ -641,7 +642,7 @@
goto out;
}
- ret = ocfs2_prepare_write(NULL, page, offset, offset);
+ ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
@@ -695,13 +696,26 @@
return ret;
}
+/*
+ * A tail_to_skip value > 0 indicates that we're being called from
+ * ocfs2_file_aio_write(). This has the following implications:
+ *
+ * - we don't want to update i_size
+ * - di_bh will be NULL, which is fine because it's only used in the
+ * case where we want to update i_size.
+ * - ocfs2_zero_extend() will then only be filling the hole created
+ * between i_size and the start of the write.
+ */
static int ocfs2_extend_file(struct inode *inode,
struct buffer_head *di_bh,
- u64 new_i_size)
+ u64 new_i_size,
+ size_t tail_to_skip)
{
int ret = 0;
u32 clusters_to_add;
+ BUG_ON(!tail_to_skip && !di_bh);
+
/* setattr sometimes calls us like this. */
if (new_i_size == 0)
goto out;
@@ -714,27 +728,44 @@
OCFS2_I(inode)->ip_clusters;
if (clusters_to_add) {
+ /*
+ * protect the pages that ocfs2_zero_extend is going to
+ * be pulling into the page cache.. we do this before the
+ * metadata extend so that we don't get into the situation
+ * where we've extended the metadata but can't get the data
+ * lock to zero.
+ */
+ ret = ocfs2_data_lock(inode, 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
ret = ocfs2_extend_allocation(inode, clusters_to_add);
if (ret < 0) {
mlog_errno(ret);
- goto out;
+ goto out_unlock;
}
- ret = ocfs2_zero_extend(inode, new_i_size);
+ ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
if (ret < 0) {
mlog_errno(ret);
- goto out;
+ goto out_unlock;
}
- }
-
- /* No allocation required, we just use this helper to
- * do a trivial update of i_size. */
- ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
}
+ if (!tail_to_skip) {
+ /* We're being called from ocfs2_setattr() which wants
+ * us to update i_size */
+ ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+
+out_unlock:
+ if (clusters_to_add) /* this is the only case in which we lock */
+ ocfs2_data_unlock(inode, 1);
+
out:
return ret;
}
@@ -793,7 +824,7 @@
if (i_size_read(inode) > attr->ia_size)
status = ocfs2_truncate_file(inode, bh, attr->ia_size);
else
- status = ocfs2_extend_file(inode, bh, attr->ia_size);
+ status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -1049,21 +1080,12 @@
if (!clusters)
break;
- ret = ocfs2_extend_allocation(inode, clusters);
+ ret = ocfs2_extend_file(inode, NULL, newsize, count);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
-
- /* Fill any holes which would've been created by this
- * write. If we're O_APPEND, this will wind up
- * (correctly) being a noop. */
- ret = ocfs2_zero_extend(inode, (u64) newsize - count);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
break;
}
@@ -1146,6 +1168,22 @@
ocfs2_iocb_set_rw_locked(iocb);
}
+ /*
+ * We're fine letting folks race truncates and extending
+ * writes with read across the cluster, just like they can
+ * locally. Hence no rw_lock during read.
+ *
+ * Take and drop the meta data lock to update inode fields
+ * like i_size. This allows the checks down below
+ * generic_file_aio_read() a chance of actually working.
+ */
+ ret = ocfs2_meta_lock(inode, NULL, NULL, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto bail;
+ }
+ ocfs2_meta_unlock(inode, 0);
+
ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos);
if (ret == -EINVAL)
mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 6a610ae..eebc3cf 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -117,7 +117,7 @@
{
struct ocfs2_journal_handle *retval = NULL;
- retval = kcalloc(1, sizeof(*retval), GFP_KERNEL);
+ retval = kcalloc(1, sizeof(*retval), GFP_NOFS);
if (!retval) {
mlog(ML_ERROR, "Failed to allocate memory for journal "
"handle!\n");
@@ -870,9 +870,11 @@
if (p_blocks > CONCURRENT_JOURNAL_FILL)
p_blocks = CONCURRENT_JOURNAL_FILL;
+ /* We are reading journal data which should not
+ * be put in the uptodate cache */
status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb),
p_blkno, p_blocks, bhs, 0,
- inode);
+ NULL);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -982,7 +984,7 @@
{
struct ocfs2_la_recovery_item *item;
- item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL);
+ item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
if (!item) {
/* Though we wish to avoid it, we are in fact safe in
* skipping local alloc cleanup as fsck.ocfs2 is more
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 04a684d..b8a00a7 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -337,7 +337,7 @@
(unsigned long long)oi->ip_blkno,
(unsigned long long)block, expand_tree);
- new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL);
+ new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
if (!new) {
mlog_errno(-ENOMEM);
return;
@@ -349,7 +349,7 @@
* has no way of tracking that. */
for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!tree[i]) {
mlog_errno(-ENOMEM);
goto out_free;
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index 53049a2..ee42765 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -586,7 +586,7 @@
{
struct ocfs2_net_wait_ctxt *w;
- w = kcalloc(1, sizeof(*w), GFP_KERNEL);
+ w = kcalloc(1, sizeof(*w), GFP_NOFS);
if (!w) {
mlog_errno(-ENOMEM);
goto bail;
@@ -749,7 +749,7 @@
BUG_ON(!ocfs2_is_valid_vote_request(type));
- request = kcalloc(1, sizeof(*request), GFP_KERNEL);
+ request = kcalloc(1, sizeof(*request), GFP_NOFS);
if (!request) {
mlog_errno(-ENOMEM);
} else {
@@ -1129,7 +1129,7 @@
struct ocfs2_super *osb = data;
struct ocfs2_vote_work *work;
- work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL);
+ work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS);
if (!work) {
status = -ENOMEM;
mlog_errno(status);
diff --git a/fs/open.c b/fs/open.c
index 53ec28c..317b7c7 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -1124,7 +1124,6 @@
prevent_tail_call(ret);
return ret;
}
-EXPORT_SYMBOL_GPL(sys_openat);
#ifndef __alpha__
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 45ae7dd..7ef1f09 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -533,6 +533,7 @@
devfs_remove_disk(disk);
+ kobject_uevent(&disk->kobj, KOBJ_REMOVE);
if (disk->holder_dir)
kobject_unregister(disk->holder_dir);
if (disk->slave_dir)
@@ -545,7 +546,7 @@
kfree(disk_name);
}
put_device(disk->driverfs_dev);
+ disk->driverfs_dev = NULL;
}
- kobject_uevent(&disk->kobj, KOBJ_REMOVE);
kobject_del(&disk->kobj);
}
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index 34c7a11..70d9c5a 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -434,6 +434,11 @@
if (dentry->d_name.len > SMB_MAXNAMELEN)
goto out;
+ /* Do not allow lookup of names with backslashes in */
+ error = -EINVAL;
+ if (memchr(dentry->d_name.name, '\\', dentry->d_name.len))
+ goto out;
+
lock_kernel();
error = smb_proc_getattr(dentry, &finfo);
#ifdef SMBFS_PARANOIA
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index c71c375..c71dd27 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -339,9 +339,11 @@
/*
* On timeout or on interrupt we want to try and remove the
* request from the recvq/xmitq.
+ * First check if the request is still part of a queue. (May
+ * have been removed by some error condition)
*/
smb_lock_server(server);
- if (!(req->rq_flags & SMB_REQ_RECEIVED)) {
+ if (!list_empty(&req->rq_queue)) {
list_del_init(&req->rq_queue);
smb_rput(req);
}
diff --git a/include/asm-arm/arch-pxa/pxa2xx_spi.h b/include/asm-arm/arch-pxa/pxa2xx_spi.h
new file mode 100644
index 0000000..915590c3
--- /dev/null
+++ b/include/asm-arm/arch-pxa/pxa2xx_spi.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef PXA2XX_SPI_H_
+#define PXA2XX_SPI_H_
+
+#define PXA2XX_CS_ASSERT (0x01)
+#define PXA2XX_CS_DEASSERT (0x02)
+
+#if defined(CONFIG_PXA25x)
+#define CLOCK_SPEED_HZ 3686400
+#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/2/(x+1))<<8)&0x0000ff00)
+#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#define SSP_TIMEOUT_SCALE (2712)
+#elif defined(CONFIG_PXA27x)
+#define CLOCK_SPEED_HZ 13000000
+#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#define SSP_TIMEOUT_SCALE (769)
+#endif
+
+#define SSP_TIMEOUT(x) ((x*10000)/SSP_TIMEOUT_SCALE)
+#define SSP1_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(1)))))
+#define SSP2_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(2)))))
+#define SSP3_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(3)))))
+
+enum pxa_ssp_type {
+ SSP_UNDEFINED = 0,
+ PXA25x_SSP, /* pxa 210, 250, 255, 26x */
+ PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
+ PXA27x_SSP,
+};
+
+/* device.platform_data for SSP controller devices */
+struct pxa2xx_spi_master {
+ enum pxa_ssp_type ssp_type;
+ u32 clock_enable;
+ u16 num_chipselect;
+ u8 enable_dma;
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct pxa2xx_spi_chip {
+ u8 tx_threshold;
+ u8 rx_threshold;
+ u8 dma_burst_size;
+ u32 timeout_microsecs;
+ u8 enable_loopback;
+ void (*cs_control)(u32 command);
+};
+
+#endif /*PXA2XX_SPI_H_*/
diff --git a/include/asm-arm/arch-s3c2410/spi-gpio.h b/include/asm-arm/arch-s3c2410/spi-gpio.h
new file mode 100644
index 0000000..258c00b
--- /dev/null
+++ b/include/asm-arm/arch-s3c2410/spi-gpio.h
@@ -0,0 +1,31 @@
+/* linux/include/asm-arm/arch-s3c2410/spi.h
+ *
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410 - SPI Controller platfrom_device info
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_SPIGPIO_H
+#define __ASM_ARCH_SPIGPIO_H __FILE__
+
+struct s3c2410_spigpio_info;
+struct spi_board_info;
+
+struct s3c2410_spigpio_info {
+ unsigned long pin_clk;
+ unsigned long pin_mosi;
+ unsigned long pin_miso;
+
+ unsigned long board_size;
+ struct spi_board_info *board_info;
+
+ void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs);
+};
+
+
+#endif /* __ASM_ARCH_SPIGPIO_H */
diff --git a/include/asm-arm/arch-s3c2410/spi.h b/include/asm-arm/arch-s3c2410/spi.h
new file mode 100644
index 0000000..4029a1a
--- /dev/null
+++ b/include/asm-arm/arch-s3c2410/spi.h
@@ -0,0 +1,29 @@
+/* linux/include/asm-arm/arch-s3c2410/spi.h
+ *
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410 - SPI Controller platform_device info
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_SPI_H
+#define __ASM_ARCH_SPI_H __FILE__
+
+struct s3c2410_spi_info;
+struct spi_board_info;
+
+struct s3c2410_spi_info {
+ unsigned long pin_cs; /* simple gpio cs */
+
+ unsigned long board_size;
+ struct spi_board_info *board_info;
+
+ void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
+};
+
+
+#endif /* __ASM_ARCH_SPI_H */
diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h
index a9c75b2..8425260 100644
--- a/include/asm-arm/procinfo.h
+++ b/include/asm-arm/procinfo.h
@@ -45,8 +45,6 @@
#endif /* __ASSEMBLY__ */
-#define PROC_INFO_SZ 48
-
#define HWCAP_SWP 1
#define HWCAP_HALF 2
#define HWCAP_THUMB 4
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 43ad4e5..406ca97 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -142,6 +142,9 @@
: "cc");
}
+/* write_can_lock - would write_trylock() succeed? */
+#define __raw_write_can_lock(x) ((x)->lock == 0x80000000)
+
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -198,4 +201,7 @@
#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
+/* read_can_lock - would read_trylock() succeed? */
+#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
+
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index 657d582..41c2792 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -296,8 +296,14 @@
#define __NR_pselect6 301
#define __NR_ppoll 302
#define __NR_unshare 303
+#define __NR_set_robust_list 304
+#define __NR_get_robust_list 305
+#define __NR_splice 306
+#define __NR_sync_file_range 307
+#define __NR_tee 308
+#define __NR_vmsplice 309
-#define NR_syscalls 304
+#define NR_syscalls 310
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 2d71608..33d8f20 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -19,5 +19,4 @@
void (*cont)(const struct firmware *fw, void *context));
void release_firmware(const struct firmware *fw);
-void register_firmware(const char *name, const u8 *data, size_t size);
#endif
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index a3a0e07..16fbe59 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -110,5 +110,16 @@
#define FSL_USB2_PORT0_ENABLED 0x00000001
#define FSL_USB2_PORT1_ENABLED 0x00000002
+struct fsl_spi_platform_data {
+ u32 initial_spmode; /* initial SPMODE value */
+ u16 bus_num;
+
+ /* board specific information */
+ u16 max_chipselect;
+ void (*activate_cs)(u8 cs, u8 polarity);
+ void (*deactivate_cs)(u8 cs, u8 polarity);
+ u32 sysclk;
+};
+
#endif /* _FSL_DEVICE_H_ */
#endif /* __KERNEL__ */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e1bd084..f4fc576 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -124,6 +124,7 @@
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(char *ptr, char **retptr);
+extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int session_of_pgrp(int pgrp);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index bdc556d..03a14a3 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -69,6 +69,7 @@
unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */
unsigned int timeout_clks; /* data timeout (in clocks) */
unsigned int blksz_bits; /* data block size */
+ unsigned int blksz; /* data block size */
unsigned int blocks; /* number of blocks */
unsigned int error; /* data error */
unsigned int flags;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b5c2112..3674035 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -22,6 +22,7 @@
#else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif
+#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
struct free_area {
struct list_head free_list;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5673008..970284f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -132,6 +132,7 @@
}
extern int rcu_pending(int cpu);
+extern int rcu_needs_cpu(int cpu);
/**
* rcu_read_lock - mark the beginning of an RCU read-side critical section.
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3af03b1..2d985d5 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -150,6 +150,7 @@
extern void kfree(const void *);
extern unsigned int ksize(const void *);
+extern int slab_is_available(void);
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index b05f146..e928c0d 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -31,18 +31,23 @@
* @master: SPI controller used with the device.
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
+ * The spi_transfer.speed_hz can override this for each transfer.
* @chip-select: Chipselect, distinguishing chips handled by "master".
* @mode: The spi mode defines how data is clocked out and in.
* This may be changed by the device's driver.
+ * The "active low" default for chipselect mode can be overridden,
+ * as can the "MSB first" default for each word in a transfer.
* @bits_per_word: Data transfers involve one or more words; word sizes
- * like eight or 12 bits are common. In-memory wordsizes are
+ * like eight or 12 bits are common. In-memory wordsizes are
* powers of two bytes (e.g. 20 bit samples use 32 bits).
- * This may be changed by the device's driver.
+ * This may be changed by the device's driver, or left at the
+ * default (0) indicating protocol words are eight bit bytes.
+ * The spi_transfer.bits_per_word can override this for each transfer.
* @irq: Negative, or the number passed to request_irq() to receive
- * interrupts from this device.
+ * interrupts from this device.
* @controller_state: Controller's runtime state
* @controller_data: Board-specific definitions for controller, such as
- * FIFO initialization parameters; from board_info.controller_data
+ * FIFO initialization parameters; from board_info.controller_data
*
* An spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
@@ -65,6 +70,7 @@
#define SPI_MODE_2 (SPI_CPOL|0)
#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
#define SPI_CS_HIGH 0x04 /* chipselect active high? */
+#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
u8 bits_per_word;
int irq;
void *controller_state;
@@ -73,7 +79,6 @@
// likely need more hooks for more protocol options affecting how
// the controller talks to each chip, like:
- // - bit order (default is wordwise msb-first)
// - memory packing (12 bit samples into low bits, others zeroed)
// - priority
// - drop chipselect after each word
@@ -143,13 +148,13 @@
* struct spi_master - interface to SPI master controller
* @cdev: class interface to this driver
* @bus_num: board-specific (and often SOC-specific) identifier for a
- * given SPI controller.
+ * given SPI controller.
* @num_chipselect: chipselects are used to distinguish individual
- * SPI slaves, and are numbered from zero to num_chipselects.
- * each slave has a chipselect signal, but it's common that not
- * every chipselect is connected to a slave.
+ * SPI slaves, and are numbered from zero to num_chipselects.
+ * each slave has a chipselect signal, but it's common that not
+ * every chipselect is connected to a slave.
* @setup: updates the device mode and clocking records used by a
- * device's SPI controller; protocol code may call this.
+ * device's SPI controller; protocol code may call this.
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
*
@@ -167,13 +172,13 @@
struct spi_master {
struct class_device cdev;
- /* other than zero (== assign one dynamically), bus_num is fully
+ /* other than negative (== assign one dynamically), bus_num is fully
* board-specific. usually that simplifies to being SOC-specific.
- * example: one SOC has three SPI controllers, numbered 1..3,
+ * example: one SOC has three SPI controllers, numbered 0..2,
* and one board's schematics might show it using SPI-2. software
* would normally use bus_num=2 for that controller.
*/
- u16 bus_num;
+ s16 bus_num;
/* chipselects will be integral to many controllers; some others
* might use board-specific GPIOs.
@@ -268,10 +273,14 @@
* @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped
* @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped
* @len: size of rx and tx buffers (in bytes)
+ * @speed_hz: Select a speed other then the device default for this
+ * transfer. If 0 the default (from spi_device) is used.
+ * @bits_per_word: select a bits_per_word other then the device default
+ * for this transfer. If 0 the default (from spi_device) is used.
* @cs_change: affects chipselect after this transfer completes
* @delay_usecs: microseconds to delay after this transfer before
- * (optionally) changing the chipselect status, then starting
- * the next transfer or completing this spi_message.
+ * (optionally) changing the chipselect status, then starting
+ * the next transfer or completing this spi_message.
* @transfer_list: transfers are sequenced through spi_message.transfers
*
* SPI transfers always write the same number of bytes as they read.
@@ -322,7 +331,9 @@
dma_addr_t rx_dma;
unsigned cs_change:1;
+ u8 bits_per_word;
u16 delay_usecs;
+ u32 speed_hz;
struct list_head transfer_list;
};
@@ -356,7 +367,7 @@
* and its transfers, ignore them until its completion callback.
*/
struct spi_message {
- struct list_head transfers;
+ struct list_head transfers;
struct spi_device *spi;
@@ -374,7 +385,7 @@
*/
/* completion is reported through a callback */
- void (*complete)(void *context);
+ void (*complete)(void *context);
void *context;
unsigned actual_length;
int status;
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index c961fe9..16ce178 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -30,6 +30,12 @@
struct spi_master *master;
+ /* setup_transfer() changes clock and/or wordsize to match settings
+ * for this transfer; zeroes restore defaults from spi_device.
+ */
+ int (*setup_transfer)(struct spi_device *spi,
+ struct spi_transfer *t);
+
void (*chipselect)(struct spi_device *spi, int is_on);
#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */
#define BITBANG_CS_INACTIVE 0
@@ -51,6 +57,8 @@
extern int spi_bitbang_setup(struct spi_device *spi);
extern void spi_bitbang_cleanup(const struct spi_device *spi);
extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m);
+extern int spi_bitbang_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t);
/* start or stop queue processing */
extern int spi_bitbang_start(struct spi_bitbang *spi);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 5b1fdf1..f03c247 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -296,7 +296,7 @@
#define read_swap_cache_async(swp,vma,addr) NULL
#define lookup_swap_cache(swp) NULL
#define valid_swaphandles(swp, off) 0
-#define can_share_swap_page(p) 0
+#define can_share_swap_page(p) (page_mapcount(p) == 1)
#define move_to_swap_cache(p, swp) 1
#define move_from_swap_cache(p, i, m) 1
#define __delete_from_swap_cache(p) /*NOTHING*/
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index b0666d6..4901ee4 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -211,6 +211,7 @@
#define NEIGH_UPDATE_F_ADMIN 0x80000000
extern void neigh_table_init(struct neigh_table *tbl);
+extern void neigh_table_init_no_netlink(struct neigh_table *tbl);
extern int neigh_table_clear(struct neigh_table *tbl);
extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
const void *pkey,
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 34a1a09..807d6f1 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -99,6 +99,7 @@
SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */
+ SCTP_CMD_SET_SK_ERR, /* Set sk_err */
SCTP_CMD_LAST
} sctp_verb_t;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index e673b2c..aa6033c 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -461,12 +461,12 @@
* there is room for a param header too.
*/
#define sctp_walk_params(pos, chunk, member)\
-_sctp_walk_params((pos), (chunk), WORD_ROUND(ntohs((chunk)->chunk_hdr.length)), member)
+_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
#define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\
pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\
- pos.v <= (void *)chunk + end - WORD_ROUND(ntohs(pos.p->length)) &&\
+ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
pos.v += WORD_ROUND(ntohs(pos.p->length)))
@@ -477,7 +477,7 @@
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\
(void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\
- (void *)err <= (void *)chunk_hdr + end - WORD_ROUND(ntohs(err->length)) &&\
+ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
diff --git a/init/do_mounts.c b/init/do_mounts.c
index adb7cad..f4b7b9d 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -310,6 +310,11 @@
panic("VFS: Unable to mount root fs on %s", b);
}
+
+ printk("No filesystem could mount root, tried: ");
+ for (p = fs_names; *p; p += strlen(p)+1)
+ printk(" %s", p);
+ printk("\n");
panic("VFS: Unable to mount root fs on %s", __bdevname(ROOT_DEV, b));
out:
putname(fs_names);
diff --git a/init/initramfs.c b/init/initramfs.c
index 679d870..f81cfa4 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -26,10 +26,12 @@
/* link hash */
+#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
+
static __initdata struct hash {
int ino, minor, major;
struct hash *next;
- char *name;
+ char name[N_ALIGN(PATH_MAX)];
} *head[32];
static inline int hash(int major, int minor, int ino)
@@ -57,7 +59,7 @@
q->ino = ino;
q->minor = minor;
q->major = major;
- q->name = name;
+ strcpy(q->name, name);
q->next = NULL;
*p = q;
return NULL;
@@ -133,8 +135,6 @@
count -= n;
}
-#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
-
static __initdata char *collected;
static __initdata int remains;
static __initdata char *collect;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 72248d1..ab81fdd 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2231,19 +2231,25 @@
* So only GFP_KERNEL allocations, if all nodes in the cpuset are
* short of memory, might require taking the callback_mutex mutex.
*
- * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages()
- * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing
- * hardwall cpusets - no allocation on a node outside the cpuset is
- * allowed (unless in interrupt, of course).
+ * The first call here from mm/page_alloc:get_page_from_freelist()
+ * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, so
+ * no allocation on a node outside the cpuset is allowed (unless in
+ * interrupt, of course).
*
- * The second loop doesn't even call here for GFP_ATOMIC requests
- * (if the __alloc_pages() local variable 'wait' is set). That check
- * and the checks below have the combined affect in the second loop of
- * the __alloc_pages() routine that:
+ * The second pass through get_page_from_freelist() doesn't even call
+ * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
+ * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
+ * in alloc_flags. That logic and the checks below have the combined
+ * affect that:
* in_interrupt - any node ok (current task context irrelevant)
* GFP_ATOMIC - any node ok
* GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
* GFP_USER - only nodes in current tasks mems allowed ok.
+ *
+ * Rule:
+ * Don't call cpuset_zone_allowed() if you can't sleep, unless you
+ * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
+ * the code that might scan up ancestor cpusets and sleep.
**/
int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
@@ -2255,6 +2261,7 @@
if (in_interrupt())
return 1;
node = z->zone_pgdat->node_id;
+ might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
if (node_isset(node, current->mems_allowed))
return 1;
if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
diff --git a/kernel/extable.c b/kernel/extable.c
index 7501b53..7fe2628 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -40,7 +40,7 @@
return e;
}
-static int core_kernel_text(unsigned long addr)
+int core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr <= (unsigned long)_etext)
diff --git a/kernel/module.c b/kernel/module.c
index d24deb0..bbe0486 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -705,14 +705,14 @@
void symbol_put_addr(void *addr)
{
- unsigned long flags;
+ struct module *modaddr;
- spin_lock_irqsave(&modlist_lock, flags);
- if (!kernel_text_address((unsigned long)addr))
+ if (core_kernel_text((unsigned long)addr))
+ return;
+
+ if (!(modaddr = module_text_address((unsigned long)addr)))
BUG();
-
- module_put(module_text_address((unsigned long)addr));
- spin_unlock_irqrestore(&modlist_lock, flags);
+ module_put(modaddr);
}
EXPORT_SYMBOL_GPL(symbol_put_addr);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 6d32ff2..2058f88 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -479,12 +479,31 @@
return 0;
}
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so. This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
int rcu_pending(int cpu)
{
return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
__rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
}
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so. This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
+
+ return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
+}
+
void rcu_check_callbacks(int cpu, int user)
{
if (user ||
diff --git a/kernel/timer.c b/kernel/timer.c
index 67eaf0f..9e49dee 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -541,6 +541,22 @@
}
spin_unlock(&base->lock);
+ /*
+ * It can happen that other CPUs service timer IRQs and increment
+ * jiffies, but we have not yet got a local timer tick to process
+ * the timer wheels. In that case, the expiry time can be before
+ * jiffies, but since the high-resolution timer here is relative to
+ * jiffies, the default expression when high-resolution timers are
+ * not active,
+ *
+ * time_before(MAX_JIFFY_OFFSET + jiffies, expires)
+ *
+ * would falsely evaluate to true. If that is the case, just
+ * return jiffies so that we can immediately fire the local timer
+ */
+ if (time_before(expires, jiffies))
+ return jiffies;
+
if (time_before(hr_expires, expires))
return hr_expires;
diff --git a/lib/kobject.c b/lib/kobject.c
index b46350c..687ab41 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -198,14 +198,14 @@
/* be noisy on error issues */
if (error == -EEXIST)
- printk("kobject_add failed for %s with -EEXIST, "
+ pr_debug("kobject_add failed for %s with -EEXIST, "
"don't try to register things with the "
"same name in the same directory.\n",
kobject_name(kobj));
else
- printk("kobject_add failed for %s (%d)\n",
+ pr_debug("kobject_add failed for %s (%d)\n",
kobject_name(kobj), error);
- dump_stack();
+ /* dump_stack(); */
}
return error;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ea77c99..253a450 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -39,6 +39,7 @@
#include <linux/mempolicy.h>
#include <asm/tlbflush.h>
+#include <asm/div64.h>
#include "internal.h"
/*
@@ -950,7 +951,7 @@
goto got_pg;
do {
- if (cpuset_zone_allowed(*z, gfp_mask))
+ if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL))
wakeup_kswapd(*z, order);
} while (*(++z));
@@ -969,7 +970,8 @@
alloc_flags |= ALLOC_HARDER;
if (gfp_mask & __GFP_HIGH)
alloc_flags |= ALLOC_HIGH;
- alloc_flags |= ALLOC_CPUSET;
+ if (wait)
+ alloc_flags |= ALLOC_CPUSET;
/*
* Go through the zonelist again. Let __GFP_HIGH and allocations
@@ -2123,14 +2125,22 @@
#ifdef CONFIG_FLAT_NODE_MEM_MAP
/* ia64 gets its own node_mem_map, before this, without bootmem */
if (!pgdat->node_mem_map) {
- unsigned long size;
+ unsigned long size, start, end;
struct page *map;
- size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
+ /*
+ * The zone's endpoints aren't required to be MAX_ORDER
+ * aligned but the node_mem_map endpoints must be in order
+ * for the buddy allocator to function correctly.
+ */
+ start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
+ end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+ end = ALIGN(end, MAX_ORDER_NR_PAGES);
+ size = (end - start) * sizeof(struct page);
map = alloc_remap(pgdat->node_id, size);
if (!map)
map = alloc_bootmem_node(pgdat, size);
- pgdat->node_mem_map = map;
+ pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
}
#ifdef CONFIG_FLATMEM
/*
@@ -2566,9 +2576,11 @@
}
for_each_zone(zone) {
- unsigned long tmp;
+ u64 tmp;
+
spin_lock_irqsave(&zone->lru_lock, flags);
- tmp = (pages_min * zone->present_pages) / lowmem_pages;
+ tmp = (u64)pages_min * zone->present_pages;
+ do_div(tmp, lowmem_pages);
if (is_highmem(zone)) {
/*
* __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -2595,8 +2607,8 @@
zone->pages_min = tmp;
}
- zone->pages_low = zone->pages_min + tmp / 4;
- zone->pages_high = zone->pages_min + tmp / 2;
+ zone->pages_low = zone->pages_min + (tmp >> 2);
+ zone->pages_high = zone->pages_min + (tmp >> 1);
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
diff --git a/mm/slab.c b/mm/slab.c
index c32af7e..d31a06b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -700,6 +700,14 @@
FULL
} g_cpucache_up;
+/*
+ * used by boot code to determine if it can use slab based allocator
+ */
+int slab_is_available(void)
+{
+ return g_cpucache_up == FULL;
+}
+
static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -2192,11 +2200,14 @@
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
- if (l3) {
+ if (l3 && l3->alien)
+ drain_alien_cache(cachep, l3->alien);
+ }
+
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (l3)
drain_array(cachep, l3, l3->shared, 1, node);
- if (l3->alien)
- drain_alien_cache(cachep, l3->alien);
- }
}
}
diff --git a/mm/sparse.c b/mm/sparse.c
index d7c32de..100040c 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -32,7 +32,7 @@
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- if (system_state == SYSTEM_RUNNING)
+ if (slab_is_available())
section = kmalloc_node(array_size, GFP_KERNEL, nid);
else
section = alloc_bootmem_node(NODE_DATA(nid), array_size);
@@ -87,11 +87,8 @@
unsigned long root_nr;
struct mem_section* root;
- for (root_nr = 0;
- root_nr < NR_MEM_SECTIONS;
- root_nr += SECTIONS_PER_ROOT) {
- root = __nr_to_section(root_nr);
-
+ for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
+ root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
if (!root)
continue;
diff --git a/net/802/tr.c b/net/802/tr.c
index afd8385..e9dc803 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -643,6 +643,5 @@
module_init(rif_init);
-EXPORT_SYMBOL(tr_source_route);
EXPORT_SYMBOL(tr_type_trans);
EXPORT_SYMBOL(alloc_trdev);
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 1a786bf..72d8529 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -963,7 +963,7 @@
static int __init atm_clip_init(void)
{
struct proc_dir_entry *p;
- neigh_table_init(&clip_tbl);
+ neigh_table_init_no_netlink(&clip_tbl);
clip_tbl_hook = &clip_tbl;
register_atm_ioctl(&clip_ioctl_ops);
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index d159c92..466ed34 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -168,7 +168,7 @@
if (info->bitmask & EBT_LOG_NFLOG)
nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
- info->prefix);
+ "%s", info->prefix);
else
ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
info->prefix);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 4cf878e..50a8c73 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1326,8 +1326,7 @@
kfree(parms);
}
-
-void neigh_table_init(struct neigh_table *tbl)
+void neigh_table_init_no_netlink(struct neigh_table *tbl)
{
unsigned long now = jiffies;
unsigned long phsize;
@@ -1383,10 +1382,27 @@
tbl->last_flush = now;
tbl->last_rand = now + tbl->parms.reachable_time * 20;
+}
+
+void neigh_table_init(struct neigh_table *tbl)
+{
+ struct neigh_table *tmp;
+
+ neigh_table_init_no_netlink(tbl);
write_lock(&neigh_tbl_lock);
+ for (tmp = neigh_tables; tmp; tmp = tmp->next) {
+ if (tmp->family == tbl->family)
+ break;
+ }
tbl->next = neigh_tables;
neigh_tables = tbl;
write_unlock(&neigh_tbl_lock);
+
+ if (unlikely(tmp)) {
+ printk(KERN_ERR "NEIGH: Registering multiple tables for "
+ "family %d\n", tbl->family);
+ dump_stack();
+ }
}
int neigh_table_clear(struct neigh_table *tbl)
@@ -2657,6 +2673,7 @@
EXPORT_SYMBOL(neigh_resolve_output);
EXPORT_SYMBOL(neigh_table_clear);
EXPORT_SYMBOL(neigh_table_init);
+EXPORT_SYMBOL(neigh_table_init_no_netlink);
EXPORT_SYMBOL(neigh_update);
EXPORT_SYMBOL(neigh_update_hhs);
EXPORT_SYMBOL(pneigh_enqueue);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index c2d92f9..d0d1919 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -948,7 +948,7 @@
write_lock_bh(&t->lock);
private = t->private;
- if (private->number != paddc->num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
index 6c4899d..96ceaba 100644
--- a/net/ipv4/netfilter/ip_nat_proto_gre.c
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -49,15 +49,15 @@
const union ip_conntrack_manip_proto *min,
const union ip_conntrack_manip_proto *max)
{
- u_int32_t key;
+ __be16 key;
if (maniptype == IP_NAT_MANIP_SRC)
key = tuple->src.u.gre.key;
else
key = tuple->dst.u.gre.key;
- return ntohl(key) >= ntohl(min->gre.key)
- && ntohl(key) <= ntohl(max->gre.key);
+ return ntohs(key) >= ntohs(min->gre.key)
+ && ntohs(key) <= ntohs(max->gre.key);
}
/* generate unique tuple ... */
@@ -81,14 +81,14 @@
min = 1;
range_size = 0xffff;
} else {
- min = ntohl(range->min.gre.key);
- range_size = ntohl(range->max.gre.key) - min + 1;
+ min = ntohs(range->min.gre.key);
+ range_size = ntohs(range->max.gre.key) - min + 1;
}
DEBUGP("min = %u, range_size = %u\n", min, range_size);
for (i = 0; i < range_size; i++, key++) {
- *keyptr = htonl(min + key % range_size);
+ *keyptr = htons(min + key % range_size);
if (!ip_nat_used_tuple(tuple, conntrack))
return 1;
}
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 39fd4c2..b98f7b0 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -428,7 +428,7 @@
if (loginfo->logflags & IPT_LOG_NFLOG)
nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
- loginfo->prefix);
+ "%s", loginfo->prefix);
else
ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
loginfo->prefix);
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 1438432..b847ee4 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -821,6 +821,7 @@
/* Create our proc 'status' entry. */
curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent);
if (!curr_table->status_proc) {
+ vfree(hold);
printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n");
/* Destroy the created table */
spin_lock_bh(&recent_lock);
@@ -845,7 +846,6 @@
spin_unlock_bh(&recent_lock);
vfree(curr_table->time_info);
vfree(curr_table->hash_table);
- vfree(hold);
vfree(curr_table->table);
vfree(curr_table);
return 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9f0cca4..4a538bc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1662,6 +1662,8 @@
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
+ if (IsReno(tp))
+ tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1);
/* clear xmit_retrans hint */
if (tp->retransmit_skb_hint &&
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0a67303..2e72f89 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1103,7 +1103,7 @@
write_lock_bh(&t->lock);
private = t->private;
- if (private->number != paddc->num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index a96c0de..73c6300 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -439,7 +439,7 @@
if (loginfo->logflags & IP6T_LOG_NFLOG)
nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
- loginfo->prefix);
+ "%s", loginfo->prefix);
else
ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
loginfo->prefix);
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 94dbdb8..4f6b84c 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -40,7 +40,7 @@
memset(eui64, 0, sizeof(eui64));
- if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
+ if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
if (skb->nh.ipv6h->version == 0x6) {
memcpy(eui64, eth_hdr(skb)->h_source, 3);
memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 2dbf134..811d998 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -944,9 +944,9 @@
return rc;
}
-static int ipx_map_frame_type(unsigned char type)
+static __be16 ipx_map_frame_type(unsigned char type)
{
- int rc = 0;
+ __be16 rc = 0;
switch (type) {
case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break;
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 6777444..a394c6f 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -119,7 +119,7 @@
return rc;
}
-static int ipxrtr_delete(long net)
+static int ipxrtr_delete(__u32 net)
{
struct ipx_route *r, *tmp;
int rc;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index c60273c..61cdda4 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -321,7 +321,7 @@
nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
{
spin_lock_bh(&inst->lock);
- inst->flags = ntohs(flags);
+ inst->flags = flags;
spin_unlock_bh(&inst->lock);
return 0;
@@ -902,7 +902,7 @@
if (nfula[NFULA_CFG_FLAGS-1]) {
u_int16_t flags =
*(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]);
- nfulnl_set_flags(inst, ntohl(flags));
+ nfulnl_set_flags(inst, ntohs(flags));
}
out_put:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 31eb837..138ea92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -193,8 +193,10 @@
netif_running(dev) &&
netif_carrier_ok(dev)) {
if (netif_queue_stopped(dev) &&
- (jiffies - dev->trans_start) > dev->watchdog_timeo) {
- printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
+ time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
+
+ printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
+ dev->name);
dev->tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d117ebc..1662f9c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -73,6 +73,8 @@
const union sctp_addr *peer,
struct sctp_transport **pt);
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
/* Calculate the SCTP checksum of an SCTP packet. */
static inline int sctp_rcv_checksum(struct sk_buff *skb)
@@ -186,7 +188,6 @@
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{
- sock_put(sk);
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
@@ -197,7 +198,6 @@
sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep);
- sock_hold(sk);
rcvr = &ep->base;
}
@@ -253,25 +253,18 @@
*/
sctp_bh_lock_sock(sk);
- /* It is possible that the association could have moved to a different
- * socket if it is peeled off. If so, update the sk.
- */
- if (sk != rcvr->sk) {
- sctp_bh_lock_sock(rcvr->sk);
- sctp_bh_unlock_sock(sk);
- sk = rcvr->sk;
- }
-
if (sock_owned_by_user(sk))
- sk_add_backlog(sk, skb);
+ sctp_add_backlog(sk, skb);
else
- sctp_backlog_rcv(sk, skb);
+ sctp_inq_push(&chunk->rcvr->inqueue, chunk);
- /* Release the sock and the sock ref we took in the lookup calls.
- * The asoc/ep ref will be released in sctp_backlog_rcv.
- */
sctp_bh_unlock_sock(sk);
- sock_put(sk);
+
+ /* Release the asoc/ep ref we took in the lookup calls. */
+ if (asoc)
+ sctp_association_put(asoc);
+ else
+ sctp_endpoint_put(ep);
return 0;
@@ -280,8 +273,7 @@
return 0;
discard_release:
- /* Release any structures we may be holding. */
- sock_put(sk);
+ /* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
@@ -290,56 +282,87 @@
goto discard_it;
}
-/* Handle second half of inbound skb processing. If the sock was busy,
- * we may have need to delay processing until later when the sock is
- * released (on the backlog). If not busy, we call this routine
- * directly from the bottom half.
+/* Process the backlog queue of the socket. Every skb on
+ * the backlog holds a ref on an association or endpoint.
+ * We hold this ref throughout the state machine to make
+ * sure that the structure we need is still around.
*/
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
- struct sctp_inq *inqueue = NULL;
+ struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
struct sctp_ep_common *rcvr = NULL;
+ int backloged = 0;
rcvr = chunk->rcvr;
- BUG_TRAP(rcvr->sk == sk);
+ /* If the rcvr is dead then the association or endpoint
+ * has been deleted and we can safely drop the chunk
+ * and refs that we are holding.
+ */
+ if (rcvr->dead) {
+ sctp_chunk_free(chunk);
+ goto done;
+ }
- if (rcvr->dead) {
- sctp_chunk_free(chunk);
- } else {
- inqueue = &chunk->rcvr->inqueue;
- sctp_inq_push(inqueue, chunk);
- }
+ if (unlikely(rcvr->sk != sk)) {
+ /* In this case, the association moved from one socket to
+ * another. We are currently sitting on the backlog of the
+ * old socket, so we need to move.
+ * However, since we are here in the process context we
+ * need to take make sure that the user doesn't own
+ * the new socket when we process the packet.
+ * If the new socket is user-owned, queue the chunk to the
+ * backlog of the new socket without dropping any refs.
+ * Otherwise, we can safely push the chunk on the inqueue.
+ */
- /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */
- if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
- sctp_association_put(sctp_assoc(rcvr));
- else
- sctp_endpoint_put(sctp_ep(rcvr));
-
+ sk = rcvr->sk;
+ sctp_bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk)) {
+ sk_add_backlog(sk, skb);
+ backloged = 1;
+ } else
+ sctp_inq_push(inqueue, chunk);
+
+ sctp_bh_unlock_sock(sk);
+
+ /* If the chunk was backloged again, don't drop refs */
+ if (backloged)
+ return 0;
+ } else {
+ sctp_inq_push(inqueue, chunk);
+ }
+
+done:
+ /* Release the refs we took in sctp_add_backlog */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_put(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_put(sctp_ep(rcvr));
+ else
+ BUG();
+
return 0;
}
-void sctp_backlog_migrate(struct sctp_association *assoc,
- struct sock *oldsk, struct sock *newsk)
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
- struct sk_buff *skb;
- struct sctp_chunk *chunk;
+ struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+ struct sctp_ep_common *rcvr = chunk->rcvr;
- skb = oldsk->sk_backlog.head;
- oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
- while (skb != NULL) {
- struct sk_buff *next = skb->next;
+ /* Hold the assoc/ep while hanging on the backlog queue.
+ * This way, we know structures we need will not disappear from us
+ */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_hold(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_hold(sctp_ep(rcvr));
+ else
+ BUG();
- chunk = SCTP_INPUT_CB(skb)->chunk;
- skb->next = NULL;
- if (&assoc->base == chunk->rcvr)
- sk_add_backlog(newsk, skb);
- else
- sk_add_backlog(oldsk, skb);
- skb = next;
- }
+ sk_add_backlog(sk, skb);
}
/* Handle icmp frag needed error. */
@@ -412,7 +435,7 @@
union sctp_addr daddr;
struct sctp_af *af;
struct sock *sk = NULL;
- struct sctp_association *asoc = NULL;
+ struct sctp_association *asoc;
struct sctp_transport *transport = NULL;
*app = NULL; *tpp = NULL;
@@ -453,7 +476,6 @@
return sk;
out:
- sock_put(sk);
if (asoc)
sctp_association_put(asoc);
return NULL;
@@ -463,7 +485,6 @@
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{
sctp_bh_unlock_sock(sk);
- sock_put(sk);
if (asoc)
sctp_association_put(asoc);
}
@@ -490,7 +511,7 @@
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
struct sock *sk;
- struct sctp_association *asoc;
+ struct sctp_association *asoc = NULL;
struct sctp_transport *transport;
struct inet_sock *inet;
char *saveip, *savesctp;
@@ -716,7 +737,6 @@
hit:
sctp_endpoint_hold(ep);
- sock_hold(epb->sk);
read_unlock(&head->lock);
return ep;
}
@@ -818,7 +838,6 @@
hit:
*pt = transport;
sctp_association_hold(asoc);
- sock_hold(epb->sk);
read_unlock(&head->lock);
return asoc;
}
@@ -846,7 +865,6 @@
struct sctp_transport *transport;
if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
- sock_put(asoc->base.sk);
sctp_association_put(asoc);
return 1;
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8d1dc24..c5beb2a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -498,10 +498,6 @@
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- /* Set sk_err to ECONNRESET on a 1-1 style socket. */
- if (!sctp_style(asoc->base.sk, UDP))
- asoc->base.sk->sk_err = ECONNRESET;
-
/* SEND_FAILED sent later when cleaning up the association. */
asoc->outqueue.error = error;
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
@@ -838,6 +834,15 @@
return;
}
+/* Helper function to set sk_err on a 1-1 style socket. */
+static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
+{
+ struct sock *sk = asoc->base.sk;
+
+ if (!sctp_style(sk, UDP))
+ sk->sk_err = error;
+}
+
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
@@ -1458,6 +1463,9 @@
local_cork = 0;
asoc->peer.retran_path = t;
break;
+ case SCTP_CMD_SET_SK_ERR:
+ sctp_cmd_set_sk_err(asoc, cmd->obj.error);
+ break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8cdba51..8bc2792 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -93,7 +93,7 @@
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
- __u16 error,
+ __u16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport);
@@ -448,7 +448,7 @@
__u32 init_tag;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
- sctp_disposition_t ret;
+ __u16 error;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -480,11 +480,9 @@
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
- return SCTP_DISPOSITION_DELETE_TCB;
+ return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM,
+ ECONNREFUSED, asoc,
+ chunk->transport);
}
/* Verify the INIT chunk before processing it. */
@@ -511,27 +509,16 @@
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
- SCTP_NULL());
- return SCTP_DISPOSITION_CONSUME;
+ error = SCTP_ERROR_INV_PARAM;
} else {
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
- SCTP_NULL());
- return SCTP_DISPOSITION_NOMEM;
+ error = SCTP_ERROR_NO_RESOURCE;
}
} else {
- ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
- commands);
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
- SCTP_NULL());
- return ret;
+ sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+ error = SCTP_ERROR_INV_PARAM;
}
+ return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
+ asoc, chunk->transport);
}
/* Tag the variable length parameters. Note that we never
@@ -886,6 +873,8 @@
struct sctp_transport *transport = (struct sctp_transport *) arg;
if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -1030,6 +1019,12 @@
commands);
hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
+ /* Make sure that the length of the parameter is what we expect */
+ if (ntohs(hbinfo->param_hdr.length) !=
+ sizeof(sctp_sender_hb_info_t)) {
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
from_addr = hbinfo->daddr;
link = sctp_assoc_lookup_paddr(asoc, &from_addr);
@@ -2126,6 +2121,8 @@
int attempts = asoc->init_err_counter + 1;
if (attempts > asoc->max_init_attempts) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_STALE_COOKIE));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -2262,6 +2259,7 @@
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -2306,7 +2304,8 @@
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
- return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport);
+ return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
+ chunk->transport);
}
/*
@@ -2318,7 +2317,8 @@
void *arg,
sctp_cmd_seq_t *commands)
{
- return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc,
+ return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
+ ENOPROTOOPT, asoc,
(struct sctp_transport *)arg);
}
@@ -2343,7 +2343,7 @@
* This is common code called by several sctp_sf_*_abort() functions above.
*/
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
- __u16 error,
+ __u16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport)
{
@@ -2353,6 +2353,7 @@
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
/* CMD_INIT_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(error));
@@ -3336,6 +3337,8 @@
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3362,6 +3365,8 @@
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3714,9 +3719,13 @@
if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
} else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -4034,6 +4043,8 @@
* TCB. This is a departure from our typical NOMEM handling.
*/
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4175,6 +4186,8 @@
* TCB. This is a departure from our typical NOMEM handling.
*/
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4543,6 +4556,8 @@
struct sctp_transport *transport = arg;
if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4662,6 +4677,8 @@
SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d"
" max_init_attempts: %d\n",
attempts, asoc->max_init_attempts);
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -4711,6 +4728,8 @@
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
} else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -4742,6 +4761,8 @@
SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4817,6 +4838,8 @@
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -4870,6 +4893,8 @@
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -5309,6 +5334,8 @@
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_DATA));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b6e4b89..174d4d3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1057,6 +1057,7 @@
inet_sk(sk)->dport = htons(asoc->peer.port);
af = sctp_get_af_specific(to.sa.sa_family);
af->to_sk_daddr(&to, sk);
+ sk->sk_err = 0;
timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
err = sctp_wait_for_connect(asoc, &timeo);
@@ -1228,7 +1229,7 @@
ep = sctp_sk(sk)->ep;
- /* Walk all associations on a socket, not on an endpoint. */
+ /* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1241,13 +1242,13 @@
if (sctp_state(asoc, CLOSED)) {
sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ continue;
+ }
+ }
- } else if (sock_flag(sk, SOCK_LINGER) &&
- !sk->sk_lingertime)
- sctp_primitive_ABORT(asoc, NULL);
- else
- sctp_primitive_SHUTDOWN(asoc, NULL);
- } else
+ if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)
+ sctp_primitive_ABORT(asoc, NULL);
+ else
sctp_primitive_SHUTDOWN(asoc, NULL);
}
@@ -5317,6 +5318,7 @@
*/
sctp_release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
+ BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk);
*timeo_p = current_timeo;
@@ -5604,12 +5606,14 @@
*/
newsp->type = type;
- spin_lock_bh(&oldsk->sk_lock.slock);
- /* Migrate the backlog from oldsk to newsk. */
- sctp_backlog_migrate(assoc, oldsk, newsk);
- /* Migrate the association to the new socket. */
+ /* Mark the new socket "in-use" by the user so that any packets
+ * that may arrive on the association after we've moved it are
+ * queued to the backlog. This prevents a potential race between
+ * backlog processing on the old socket and new-packet processing
+ * on the new socket.
+ */
+ sctp_lock_sock(newsk);
sctp_assoc_migrate(assoc, newsk);
- spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
@@ -5618,6 +5622,7 @@
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
+ sctp_release_sock(newsk);
}
/* This proto struct describes the ULP interface for SCTP. */
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 6d04504..d0f86ed 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -697,29 +697,79 @@
/* Walk through all sections */
for (i = 0; i < hdr->e_shnum; i++) {
- Elf_Rela *rela;
- Elf_Rela *start = (void *)hdr + sechdrs[i].sh_offset;
- Elf_Rela *stop = (void*)start + sechdrs[i].sh_size;
- const char *name = secstrings + sechdrs[i].sh_name +
- strlen(".rela");
+ const char *name = secstrings + sechdrs[i].sh_name;
+ const char *secname;
+ Elf_Rela r;
+ unsigned int r_sym;
/* We want to process only relocation sections and not .init */
- if (section_ref_ok(name) || (sechdrs[i].sh_type != SHT_RELA))
- continue;
-
- for (rela = start; rela < stop; rela++) {
- Elf_Rela r;
- const char *secname;
- r.r_offset = TO_NATIVE(rela->r_offset);
- r.r_info = TO_NATIVE(rela->r_info);
- r.r_addend = TO_NATIVE(rela->r_addend);
- sym = elf->symtab_start + ELF_R_SYM(r.r_info);
- /* Skip special sections */
- if (sym->st_shndx >= SHN_LORESERVE)
+ if (sechdrs[i].sh_type == SHT_RELA) {
+ Elf_Rela *rela;
+ Elf_Rela *start = (void *)hdr + sechdrs[i].sh_offset;
+ Elf_Rela *stop = (void*)start + sechdrs[i].sh_size;
+ name += strlen(".rela");
+ if (section_ref_ok(name))
continue;
- secname = secstrings + sechdrs[sym->st_shndx].sh_name;
- if (section(secname))
- warn_sec_mismatch(modname, name, elf, sym, r);
+ for (rela = start; rela < stop; rela++) {
+ r.r_offset = TO_NATIVE(rela->r_offset);
+#if KERNEL_ELFCLASS == ELFCLASS64
+ if (hdr->e_machine == EM_MIPS) {
+ r_sym = ELF64_MIPS_R_SYM(rela->r_info);
+ r_sym = TO_NATIVE(r_sym);
+ } else {
+ r.r_info = TO_NATIVE(rela->r_info);
+ r_sym = ELF_R_SYM(r.r_info);
+ }
+#else
+ r.r_info = TO_NATIVE(rela->r_info);
+ r_sym = ELF_R_SYM(r.r_info);
+#endif
+ r.r_addend = TO_NATIVE(rela->r_addend);
+ sym = elf->symtab_start + r_sym;
+ /* Skip special sections */
+ if (sym->st_shndx >= SHN_LORESERVE)
+ continue;
+
+ secname = secstrings +
+ sechdrs[sym->st_shndx].sh_name;
+ if (section(secname))
+ warn_sec_mismatch(modname, name,
+ elf, sym, r);
+ }
+ } else if (sechdrs[i].sh_type == SHT_REL) {
+ Elf_Rel *rel;
+ Elf_Rel *start = (void *)hdr + sechdrs[i].sh_offset;
+ Elf_Rel *stop = (void*)start + sechdrs[i].sh_size;
+ name += strlen(".rel");
+ if (section_ref_ok(name))
+ continue;
+
+ for (rel = start; rel < stop; rel++) {
+ r.r_offset = TO_NATIVE(rel->r_offset);
+#if KERNEL_ELFCLASS == ELFCLASS64
+ if (hdr->e_machine == EM_MIPS) {
+ r_sym = ELF64_MIPS_R_SYM(rel->r_info);
+ r_sym = TO_NATIVE(r_sym);
+ } else {
+ r.r_info = TO_NATIVE(rel->r_info);
+ r_sym = ELF_R_SYM(r.r_info);
+ }
+#else
+ r.r_info = TO_NATIVE(rel->r_info);
+ r_sym = ELF_R_SYM(r.r_info);
+#endif
+ r.r_addend = 0;
+ sym = elf->symtab_start + r_sym;
+ /* Skip special sections */
+ if (sym->st_shndx >= SHN_LORESERVE)
+ continue;
+
+ secname = secstrings +
+ sechdrs[sym->st_shndx].sh_name;
+ if (section(secname))
+ warn_sec_mismatch(modname, name,
+ elf, sym, r);
+ }
}
}
}
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index b14255c..861d866 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -21,6 +21,7 @@
#define ELF_ST_BIND ELF32_ST_BIND
#define ELF_ST_TYPE ELF32_ST_TYPE
+#define Elf_Rel Elf32_Rel
#define Elf_Rela Elf32_Rela
#define ELF_R_SYM ELF32_R_SYM
#define ELF_R_TYPE ELF32_R_TYPE
@@ -34,11 +35,31 @@
#define ELF_ST_BIND ELF64_ST_BIND
#define ELF_ST_TYPE ELF64_ST_TYPE
+#define Elf_Rel Elf64_Rel
#define Elf_Rela Elf64_Rela
#define ELF_R_SYM ELF64_R_SYM
#define ELF_R_TYPE ELF64_R_TYPE
#endif
+/* The 64-bit MIPS ELF ABI uses an unusual reloc format. */
+typedef struct
+{
+ Elf32_Word r_sym; /* Symbol index */
+ unsigned char r_ssym; /* Special symbol for 2nd relocation */
+ unsigned char r_type3; /* 3rd relocation type */
+ unsigned char r_type2; /* 2nd relocation type */
+ unsigned char r_type1; /* 1st relocation type */
+} _Elf64_Mips_R_Info;
+
+typedef union
+{
+ Elf64_Xword r_info_number;
+ _Elf64_Mips_R_Info r_info_fields;
+} _Elf64_Mips_R_Info_union;
+
+#define ELF64_MIPS_R_SYM(i) \
+ ((__extension__ (_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_sym)
+
#if KERNEL_ELFDATA != HOST_ELFDATA
static inline void __endian(const void *src, void *dest, unsigned int size)
@@ -48,8 +69,6 @@
((unsigned char*)dest)[i] = ((unsigned char*)src)[size - i-1];
}
-
-
#define TO_NATIVE(x) \
({ \
typeof(x) __x; \
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d987048..21dad41 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3231,7 +3231,7 @@
goto out;
/* Handle mapped IPv4 packets arriving via IPv6 sockets */
- if (family == PF_INET6 && skb->protocol == ntohs(ETH_P_IP))
+ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
family = PF_INET;
read_lock_bh(&sk->sk_callback_lock);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 7177e98..c284dbb 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -594,6 +594,10 @@
*scontext_len = strlen(initial_sid_to_string[sid]) + 1;
scontextp = kmalloc(*scontext_len,GFP_ATOMIC);
+ if (!scontextp) {
+ rc = -ENOMEM;
+ goto out;
+ }
strcpy(scontextp, initial_sid_to_string[sid]);
*scontext = scontextp;
goto out;
diff --git a/sound/drivers/mpu401/mpu401.c b/sound/drivers/mpu401/mpu401.c
index da7ef26..77b0600 100644
--- a/sound/drivers/mpu401/mpu401.c
+++ b/sound/drivers/mpu401/mpu401.c
@@ -151,7 +151,7 @@
MODULE_DEVICE_TABLE(pnp, snd_mpu401_pnpids);
-static int __init snd_mpu401_pnp(int dev, struct pnp_dev *device,
+static int __devinit snd_mpu401_pnp(int dev, struct pnp_dev *device,
const struct pnp_device_id *id)
{
if (!pnp_port_valid(device, 0) ||
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index a36ec1d..e6945db 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -85,6 +85,8 @@
#include <linux/pnp.h>
#include <linux/isapnp.h>
#include <linux/moduleparam.h>
+#include <linux/delay.h>
+
#include <asm/io.h>
#include <asm/dma.h>
#include <sound/core.h>
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
index 49796be..e04fa49 100644
--- a/sound/oss/ad1848.c
+++ b/sound/oss/ad1848.c
@@ -2026,7 +2026,8 @@
if (irq > 0)
{
devc->dev_no = my_dev;
- if (request_irq(devc->irq, adintr, 0, devc->name, (void *)my_dev) < 0)
+ if (request_irq(devc->irq, adintr, 0, devc->name,
+ (void *)(long)my_dev) < 0)
{
printk(KERN_WARNING "ad1848: Unable to allocate IRQ\n");
/* Don't free it either then.. */
@@ -2175,7 +2176,7 @@
if (!share_dma)
{
if (devc->irq > 0) /* There is no point in freeing irq, if it wasn't allocated */
- free_irq(devc->irq, (void *)devc->dev_no);
+ free_irq(devc->irq, (void *)(long)devc->dev_no);
sound_free_dma(dma_playback);
@@ -2204,7 +2205,7 @@
unsigned char c930_stat = 0;
int cnt = 0;
- dev = (int)dev_id;
+ dev = (long)dev_id;
devc = (ad1848_info *) audio_devs[dev]->devc;
interrupt_again: /* Jump back here if int status doesn't reset */
@@ -2900,7 +2901,8 @@
return(dev);
}
-static struct pnp_dev *ad1848_init_generic(struct pnp_card *bus, struct address_info *hw_config, int slot)
+static struct pnp_dev __init *ad1848_init_generic(struct pnp_card *bus,
+ struct address_info *hw_config, int slot)
{
/* Configure Audio device */
diff --git a/sound/oss/nm256_audio.c b/sound/oss/nm256_audio.c
index 7de079b..6e662ac 100644
--- a/sound/oss/nm256_audio.c
+++ b/sound/oss/nm256_audio.c
@@ -960,7 +960,7 @@
/* Installs the AC97 mixer into CARD. */
-static int __init
+static int __devinit
nm256_install_mixer (struct nm256_info *card)
{
int mixer;
@@ -995,7 +995,7 @@
* RAM.
*/
-static void __init
+static void __devinit
nm256_peek_for_sig (struct nm256_info *card)
{
u32 port1offset
@@ -1056,7 +1056,7 @@
card->playing = 0;
card->recording = 0;
card->rev = rev;
- spin_lock_init(&card->lock);
+ spin_lock_init(&card->lock);
/* Init the memory port info. */
for (x = 0; x < 2; x++) {