Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/pcmcia/ds.c
Fix up merge failures with Linus's head and fix new compile failures.
Signed-Off-By: David Howells <dhowells@redhat.com>
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
index 605dedf..b359974 100644
--- a/arch/arm/common/sharpsl_pm.c
+++ b/arch/arm/common/sharpsl_pm.c
@@ -60,16 +60,16 @@
static int sharpsl_fatal_check(void);
static int sharpsl_average_value(int ad);
static void sharpsl_average_clear(void);
-static void sharpsl_charge_toggle(void *private_);
-static void sharpsl_battery_thread(void *private_);
+static void sharpsl_charge_toggle(struct work_struct *private_);
+static void sharpsl_battery_thread(struct work_struct *private_);
/*
* Variables
*/
struct sharpsl_pm_status sharpsl_pm;
-DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
-DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
+DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
@@ -116,7 +116,7 @@
EXPORT_SYMBOL(sharpsl_battery_kick);
-static void sharpsl_battery_thread(void *private_)
+static void sharpsl_battery_thread(struct work_struct *private_)
{
int voltage, percent, apm_status, i = 0;
@@ -128,7 +128,7 @@
/* Corgi cannot confirm when battery fully charged so periodically kick! */
if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
&& time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
- schedule_work(&toggle_charger);
+ schedule_delayed_work(&toggle_charger, 0);
while(1) {
voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
@@ -212,7 +212,7 @@
sharpsl_pm_led(SHARPSL_LED_OFF);
sharpsl_pm.charge_mode = CHRG_OFF;
- schedule_work(&sharpsl_bat);
+ schedule_delayed_work(&sharpsl_bat, 0);
}
static void sharpsl_charge_error(void)
@@ -222,7 +222,7 @@
sharpsl_pm.charge_mode = CHRG_ERROR;
}
-static void sharpsl_charge_toggle(void *private_)
+static void sharpsl_charge_toggle(struct work_struct *private_)
{
dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
@@ -254,7 +254,7 @@
else if (sharpsl_pm.charge_mode == CHRG_ON)
sharpsl_charge_off();
- schedule_work(&sharpsl_bat);
+ schedule_delayed_work(&sharpsl_bat, 0);
}
@@ -279,10 +279,10 @@
sharpsl_charge_off();
} else if (sharpsl_pm.full_count < 2) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
- schedule_work(&toggle_charger);
+ schedule_delayed_work(&toggle_charger, 0);
} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
- schedule_work(&toggle_charger);
+ schedule_delayed_work(&toggle_charger, 0);
} else {
sharpsl_charge_off();
sharpsl_pm.charge_mode = CHRG_DONE;
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f225a08..9d2346f 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -323,7 +323,8 @@
cancel_delayed_work(&irda_config->gpio_expa);
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
- schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+ schedule_delayed_work(&irda_config->gpio_expa, 0);
return 0;
}
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index dbc555d..cbe909b 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -74,7 +74,7 @@
.rows = 8,
.cols = 8,
.keymap = nokia770_keymap,
- .keymapsize = ARRAY_SIZE(nokia770_keymap)
+ .keymapsize = ARRAY_SIZE(nokia770_keymap),
.delay = 4,
};
@@ -191,7 +191,7 @@
printk("HP connected\n");
}
-static void codec_delayed_power_down(void *arg)
+static void codec_delayed_power_down(struct work_struct *work)
{
down(&audio_pwr_sem);
if (audio_pwr_state == -1)
@@ -200,7 +200,7 @@
up(&audio_pwr_sem);
}
-static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
+static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
static void nokia770_audio_pwr_down(void)
{
diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c
index 3b29e59..0cbf1b0 100644
--- a/arch/arm/mach-omap1/leds-osk.c
+++ b/arch/arm/mach-omap1/leds-osk.c
@@ -35,7 +35,7 @@
static u8 tps_leds_change;
-static void tps_work(void *unused)
+static void tps_work(struct work_struct *unused)
{
for (;;) {
u8 leds;
@@ -61,7 +61,7 @@
}
}
-static DECLARE_WORK(work, tps_work, NULL);
+static DECLARE_WORK(work, tps_work);
#ifdef CONFIG_OMAP_OSK_MISTRAL
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 26a95a6..3b1ad1d 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -206,7 +206,8 @@
cancel_delayed_work(&irda_config->gpio_expa);
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
- schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+ schedule_delayed_work(&irda_config->gpio_expa, 0);
return 0;
}
diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c
index 1b39874..12d2fe0 100644
--- a/arch/arm/mach-pxa/akita-ioexp.c
+++ b/arch/arm/mach-pxa/akita-ioexp.c
@@ -36,11 +36,11 @@
static int max7310_write(struct i2c_client *client, int address, int data);
static struct i2c_client max7310_template;
-static void akita_ioexp_work(void *private_);
+static void akita_ioexp_work(struct work_struct *private_);
static struct device *akita_ioexp_device;
static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
-DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL);
+DECLARE_WORK(akita_ioexp, akita_ioexp_work);
/*
@@ -158,7 +158,7 @@
EXPORT_SYMBOL(akita_set_ioexp);
EXPORT_SYMBOL(akita_reset_ioexp);
-static void akita_ioexp_work(void *private_)
+static void akita_ioexp_work(struct work_struct *private_)
{
if (akita_ioexp_device)
max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 1f9153a..6b5d351 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -51,10 +51,10 @@
}
}
-static void mce_work_fn(void *data);
-static DECLARE_WORK(mce_work, mce_work_fn, NULL);
+static void mce_work_fn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
-static void mce_work_fn(void *data)
+static void mce_work_fn(struct work_struct *work)
{
on_each_cpu(mce_checkregs, NULL, 1, 1);
schedule_delayed_work(&mce_work, MCE_RATE);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 4bb8b77..02a9b66 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1049,13 +1049,15 @@
struct warm_boot_cpu_info {
struct completion *complete;
+ struct work_struct task;
int apicid;
int cpu;
};
-static void __cpuinit do_warm_boot_cpu(void *p)
+static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
{
- struct warm_boot_cpu_info *info = p;
+ struct warm_boot_cpu_info *info =
+ container_of(work, struct warm_boot_cpu_info, task);
do_boot_cpu(info->apicid, info->cpu);
complete(info->complete);
}
@@ -1064,7 +1066,6 @@
{
DECLARE_COMPLETION_ONSTACK(done);
struct warm_boot_cpu_info info;
- struct work_struct task;
int apicid, ret;
struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
@@ -1089,7 +1090,7 @@
info.complete = &done;
info.apicid = apicid;
info.cpu = cpu;
- INIT_WORK(&task, do_warm_boot_cpu, &info);
+ INIT_WORK(&info.task, do_warm_boot_cpu);
tsc_sync_disabled = 1;
@@ -1097,7 +1098,7 @@
clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
KERNEL_PGD_PTRS);
flush_tlb_all();
- schedule_work(&task);
+ schedule_work(&info.task);
wait_for_completion(&done);
tsc_sync_disabled = 0;
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index fbc9582..9810c8c 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -217,7 +217,7 @@
static unsigned int cpufreq_init = 0;
static struct work_struct cpufreq_delayed_get_work;
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *work)
{
unsigned int cpu;
@@ -306,7 +306,7 @@
{
int ret;
- INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+ INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (!ret)
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index caab986..b62f0c4 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -209,7 +209,7 @@
}
#endif
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *private_)
{
printk(KERN_ERR "simserial: do_softint called\n");
}
@@ -698,7 +698,7 @@
info->flags = sstate->flags;
info->xmit_fifo_size = sstate->xmit_fifo_size;
info->line = line;
- INIT_WORK(&info->work, do_softint, info);
+ INIT_WORK(&info->work, do_softint);
info->state = sstate;
if (sstate->info) {
kfree(info);
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7cfa63a9..6bedd97 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -678,7 +678,7 @@
* disable the cmc interrupt vector.
*/
static void
-ia64_mca_cmc_vector_disable_keventd(void *unused)
+ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
{
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
}
@@ -690,7 +690,7 @@
* enable the cmc interrupt vector.
*/
static void
-ia64_mca_cmc_vector_enable_keventd(void *unused)
+ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
{
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
}
@@ -1247,8 +1247,8 @@
monarch_cpu = -1;
}
-static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
-static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
+static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
/*
* ia64_mca_cmc_int_handler
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index f7d7f56..b21ddec 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -463,15 +463,17 @@
}
struct create_idle {
+ struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
void
-do_fork_idle(void *_c_idle)
+do_fork_idle(struct work_struct *work)
{
- struct create_idle *c_idle = _c_idle;
+ struct create_idle *c_idle =
+ container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
@@ -482,10 +484,10 @@
{
int timeout;
struct create_idle c_idle = {
+ .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER(c_idle.done),
};
- DECLARE_WORK(work, do_fork_idle, &c_idle);
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
@@ -497,9 +499,9 @@
* We can't use kernel_thread since we must avoid to reschedule the child.
*/
if (!keventd_up() || current_is_keventd())
- work.func(work.data);
+ c_idle.work.func(&c_idle.work);
else {
- schedule_work(&work);
+ schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
}
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index f06a144..2c82412 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -319,7 +319,7 @@
static int channel_open = 0;
/* the work handler */
-static void sp_work(void *data)
+static void sp_work(struct work_struct *unused)
{
if (!channel_open) {
if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
@@ -354,7 +354,7 @@
return;
}
- INIT_WORK(&work, sp_work, NULL);
+ INIT_WORK(&work, sp_work);
queue_work(workqueue, &work);
} else
queue_work(workqueue, &work);
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
index 31bcdae..0e83776 100644
--- a/arch/powerpc/platforms/embedded6xx/ls_uart.c
+++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c
@@ -14,7 +14,7 @@
static struct work_struct wd_work;
-static void wd_stop(void *unused)
+static void wd_stop(struct work_struct *unused)
{
const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
int i = 0, rescue = 8;
@@ -122,7 +122,7 @@
ls_uart_init();
- INIT_WORK(&wd_work, wd_stop, NULL);
+ INIT_WORK(&wd_work, wd_stop);
schedule_work(&wd_work);
return 0;
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index afa593a..c3a8941 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -18,11 +18,11 @@
#define OLD_BACKLIGHT_MAX 15
-static void pmac_backlight_key_worker(void *data);
-static void pmac_backlight_set_legacy_worker(void *data);
+static void pmac_backlight_key_worker(struct work_struct *work);
+static void pmac_backlight_set_legacy_worker(struct work_struct *work);
-static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
-static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
+static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
+static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
/* Although these variables are used in interrupt context, it makes no sense to
* protect them. No user is able to produce enough key events per second and
@@ -94,7 +94,7 @@
return level;
}
-static void pmac_backlight_key_worker(void *data)
+static void pmac_backlight_key_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
@@ -166,7 +166,7 @@
return error;
}
-static void pmac_backlight_set_legacy_worker(void *data)
+static void pmac_backlight_set_legacy_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 1370774..49037ed 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -37,8 +37,8 @@
/* EEH event workqueue setup. */
static DEFINE_SPINLOCK(eeh_eventlist_lock);
LIST_HEAD(eeh_eventlist);
-static void eeh_thread_launcher(void *);
-DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+static void eeh_thread_launcher(struct work_struct *);
+DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
/* Serialize reset sequences for a given pci device */
DEFINE_MUTEX(eeh_event_mutex);
@@ -103,7 +103,7 @@
* eeh_thread_launcher
* @dummy - unused
*/
-static void eeh_thread_launcher(void *dummy)
+static void eeh_thread_launcher(struct work_struct *dummy)
{
if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
printk(KERN_ERR "Failed to start EEH daemon\n");
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index 2e1943e..709952c 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -385,6 +385,7 @@
phy_info_t *phy;
struct work_struct phy_relink;
struct work_struct phy_display_config;
+ struct net_device *dev;
uint sequence_done;
@@ -1391,10 +1392,11 @@
NULL
};
-static void mii_display_status(void *data)
+static void mii_display_status(struct work_struct *work)
{
- struct net_device *dev = data;
- volatile struct fcc_enet_private *fep = dev->priv;
+ volatile struct fcc_enet_private *fep =
+ container_of(work, struct fcc_enet_private, phy_relink);
+ struct net_device *dev = fep->dev;
uint s = fep->phy_status;
if (!fep->link && !fep->old_link) {
@@ -1428,10 +1430,12 @@
printk(".\n");
}
-static void mii_display_config(void *data)
+static void mii_display_config(struct work_struct *work)
{
- struct net_device *dev = data;
- volatile struct fcc_enet_private *fep = dev->priv;
+ volatile struct fcc_enet_private *fep =
+ container_of(work, struct fcc_enet_private,
+ phy_display_config);
+ struct net_device *dev = fep->dev;
uint s = fep->phy_status;
printk("%s: config: auto-negotiation ", dev->name);
@@ -1758,8 +1762,9 @@
cep->phy_id_done = 0;
cep->phy_addr = fip->fc_phyaddr;
mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
- INIT_WORK(&cep->phy_relink, mii_display_status, dev);
- INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
+ INIT_WORK(&cep->phy_relink, mii_display_status);
+ INIT_WORK(&cep->phy_display_config, mii_display_config);
+ cep->dev = dev;
#endif /* CONFIG_USE_MDIO */
fip++;
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c
index 2f9fa9e..e6c28fb 100644
--- a/arch/ppc/8xx_io/fec.c
+++ b/arch/ppc/8xx_io/fec.c
@@ -173,6 +173,7 @@
uint phy_speed;
phy_info_t *phy;
struct work_struct phy_task;
+ struct net_device *dev;
uint sequence_done;
@@ -1263,10 +1264,11 @@
printk(".\n");
}
-static void mii_display_config(void *priv)
+static void mii_display_config(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *)priv;
- struct fec_enet_private *fep = dev->priv;
+ struct fec_enet_private *fep =
+ container_of(work, struct fec_enet_private, phy_task);
+ struct net_device *dev = fep->dev;
volatile uint *s = &(fep->phy_status);
printk("%s: config: auto-negotiation ", dev->name);
@@ -1295,10 +1297,11 @@
fep->sequence_done = 1;
}
-static void mii_relink(void *priv)
+static void mii_relink(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *)priv;
- struct fec_enet_private *fep = dev->priv;
+ struct fec_enet_private *fep =
+ container_of(work, struct fec_enet_private, phy_task);
+ struct net_device *dev = fep->dev;
int duplex;
fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
@@ -1325,7 +1328,8 @@
{
struct fec_enet_private *fep = dev->priv;
- INIT_WORK(&fep->phy_task, mii_relink, (void *)dev);
+ fep->dev = dev;
+ INIT_WORK(&fep->phy_task, mii_relink);
schedule_work(&fep->phy_task);
}
@@ -1333,7 +1337,8 @@
{
struct fec_enet_private *fep = dev->priv;
- INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev);
+ fep->dev = dev;
+ INIT_WORK(&fep->phy_task, mii_display_config);
schedule_work(&fep->phy_task);
}
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index af1e8fc..67d5cf9 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -92,8 +92,8 @@
* Work queue
*/
static struct workqueue_struct *appldata_wq;
-static void appldata_work_fn(void *data);
-static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
+static void appldata_work_fn(struct work_struct *work);
+static DECLARE_WORK(appldata_work, appldata_work_fn);
/*
@@ -125,7 +125,7 @@
*
* call data gathering function for each (active) module
*/
-static void appldata_work_fn(void *data)
+static void appldata_work_fn(struct work_struct *work)
{
struct list_head *lh;
struct appldata_ops *ops;
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 3576b3c..7d4190e 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -638,7 +638,7 @@
return -1;
}
-void chan_interrupt(struct list_head *chans, struct work_struct *task,
+void chan_interrupt(struct list_head *chans, struct delayed_work *task,
struct tty_struct *tty, int irq)
{
struct list_head *ele, *next;
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 7b17216..96f0189 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -56,7 +56,7 @@
static LIST_HEAD(mc_requests);
-static void mc_work_proc(void *unused)
+static void mc_work_proc(struct work_struct *unused)
{
struct mconsole_entry *req;
unsigned long flags;
@@ -72,7 +72,7 @@
}
}
-static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
+static DECLARE_WORK(mconsole_work, mc_work_proc);
static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
{
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index ec9eb8b..286bc0b 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -99,6 +99,7 @@
* same device, since it tests for (dev->flags & IFF_UP). So
* there's no harm in delaying the device shutdown. */
schedule_work(&close_work);
+#error this is not permitted - close_work will go out of scope
goto out;
}
reactivate_fd(lp->fd, UM_ETH_IRQ);
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index ce9f373..6dfe632 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -132,7 +132,7 @@
DECLARE_MUTEX(ports_sem);
struct list_head ports = LIST_HEAD_INIT(ports);
-void port_work_proc(void *unused)
+void port_work_proc(struct work_struct *unused)
{
struct port_list *port;
struct list_head *ele;
@@ -150,7 +150,7 @@
local_irq_restore(flags);
}
-DECLARE_WORK(port_work, port_work_proc, NULL);
+DECLARE_WORK(port_work, port_work_proc);
static irqreturn_t port_interrupt(int irq, void *data)
{
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index bbea888..c7587fc 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -306,8 +306,8 @@
*/
static int check_interval = 5 * 60; /* 5 minutes */
-static void mcheck_timer(void *data);
-static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
+static void mcheck_timer(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
static void mcheck_check_cpu(void *info)
{
@@ -315,7 +315,7 @@
do_machine_check(NULL, 0);
}
-static void mcheck_timer(void *data)
+static void mcheck_timer(struct work_struct *work)
{
on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
schedule_delayed_work(&mcheck_work, check_interval * HZ);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 62c2e74..9800147 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -753,14 +753,16 @@
}
struct create_idle {
+ struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
-void do_fork_idle(void *_c_idle)
+void do_fork_idle(struct work_struct *work)
{
- struct create_idle *c_idle = _c_idle;
+ struct create_idle *c_idle =
+ container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
@@ -775,10 +777,10 @@
int timeout;
unsigned long start_rip;
struct create_idle c_idle = {
+ .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
- DECLARE_WORK(work, do_fork_idle, &c_idle);
/* allocate memory for gdts of secondary cpus. Hotplug is considered */
if (!cpu_gdt_descr[cpu].address &&
@@ -825,9 +827,9 @@
* thread.
*/
if (!keventd_up() || current_is_keventd())
- work.func(work.data);
+ c_idle.work.func(&c_idle.work);
else {
- schedule_work(&work);
+ schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index e3ef544..9f05bc9 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -563,7 +563,7 @@
static unsigned int cpufreq_init = 0;
static struct work_struct cpufreq_delayed_get_work;
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *v)
{
unsigned int cpu;
for_each_online_cpu(cpu) {
@@ -639,7 +639,7 @@
static int __init cpufreq_tsc(void)
{
- INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+ INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER))
cpufreq_init = 1;
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 0024211..5934c4b 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1274,9 +1274,10 @@
*
* FIXME! dispatch queue is not a queue at all!
*/
-static void as_work_handler(void *data)
+static void as_work_handler(struct work_struct *work)
{
- struct request_queue *q = data;
+ struct as_data *ad = container_of(work, struct as_data, antic_work);
+ struct request_queue *q = ad->q;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@@ -1332,7 +1333,7 @@
ad->antic_timer.function = as_antic_timeout;
ad->antic_timer.data = (unsigned long)q;
init_timer(&ad->antic_timer);
- INIT_WORK(&ad->antic_work, as_work_handler, q);
+ INIT_WORK(&ad->antic_work, as_work_handler);
INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e9019ed..84e9be0 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1840,9 +1840,11 @@
return 1;
}
-static void cfq_kick_queue(void *data)
+static void cfq_kick_queue(struct work_struct *work)
{
- request_queue_t *q = data;
+ struct cfq_data *cfqd =
+ container_of(work, struct cfq_data, unplug_work);
+ request_queue_t *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@@ -1986,7 +1988,7 @@
cfqd->idle_class_timer.function = cfq_idle_class_timer;
cfqd->idle_class_timer.data = (unsigned long) cfqd;
- INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 0f82e12..cc6e95f 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -34,7 +34,7 @@
*/
#include <scsi/scsi_cmnd.h>
-static void blk_unplug_work(void *data);
+static void blk_unplug_work(struct work_struct *work);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
@@ -227,7 +227,7 @@
if (q->unplug_delay == 0)
q->unplug_delay = 1;
- INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+ INIT_WORK(&q->unplug_work, blk_unplug_work);
q->unplug_timer.function = blk_unplug_timeout;
q->unplug_timer.data = (unsigned long)q;
@@ -1631,9 +1631,9 @@
}
}
-static void blk_unplug_work(void *data)
+static void blk_unplug_work(struct work_struct *work)
{
- request_queue_t *q = data;
+ request_queue_t *q = container_of(work, request_queue_t, unplug_work);
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c
index 9b5b156..2ebffb8 100644
--- a/crypto/cryptomgr.c
+++ b/crypto/cryptomgr.c
@@ -40,9 +40,10 @@
char template[CRYPTO_MAX_ALG_NAME];
};
-static void cryptomgr_probe(void *data)
+static void cryptomgr_probe(struct work_struct *work)
{
- struct cryptomgr_param *param = data;
+ struct cryptomgr_param *param =
+ container_of(work, struct cryptomgr_param, work);
struct crypto_template *tmpl;
struct crypto_instance *inst;
int err;
@@ -112,7 +113,7 @@
param->larval.type = larval->alg.cra_flags;
param->larval.mask = larval->mask;
- INIT_WORK(¶m->work, cryptomgr_probe, param);
+ INIT_WORK(¶m->work, cryptomgr_probe);
schedule_work(¶m->work);
return NOTIFY_STOP;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 068fe4f..02b30ae 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -50,6 +50,7 @@
struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
+ struct work_struct work;
};
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -564,12 +565,9 @@
acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
}
-static void acpi_os_execute_deferred(void *context)
+static void acpi_os_execute_deferred(struct work_struct *work)
{
- struct acpi_os_dpc *dpc = NULL;
-
-
- dpc = (struct acpi_os_dpc *)context;
+ struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
if (!dpc) {
printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
return;
@@ -602,7 +600,6 @@
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
- struct work_struct *task;
ACPI_FUNCTION_TRACE("os_queue_for_execution");
@@ -615,28 +612,22 @@
/*
* Allocate/initialize DPC structure. Note that this memory will be
- * freed by the callee. The kernel handles the tq_struct list in a
+ * freed by the callee. The kernel handles the work_struct list in a
* way that allows us to also free its memory inside the callee.
* Because we may want to schedule several tasks with different
* parameters we can't use the approach some kernel code uses of
- * having a static tq_struct.
- * We can save time and code by allocating the DPC and tq_structs
- * from the same memory.
+ * having a static work_struct.
*/
- dpc =
- kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
- GFP_ATOMIC);
+ dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
if (!dpc)
return_ACPI_STATUS(AE_NO_MEMORY);
dpc->function = function;
dpc->context = context;
- task = (void *)(dpc + 1);
- INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
-
- if (!queue_work(kacpid_wq, task)) {
+ INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+ if (!queue_work(kacpid_wq, &dpc->work)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Call to queue_work() failed.\n"));
kfree(dpc);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f8ec389..8816e30 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1081,7 +1081,7 @@
* ata_port_queue_task - Queue port_task
* @ap: The ata_port to queue port_task for
* @fn: workqueue function to be scheduled
- * @data: data value to pass to workqueue function
+ * @data: data for @fn to use
* @delay: delay time for workqueue function
*
* Schedule @fn(@data) for execution after @delay jiffies using
@@ -1096,7 +1096,7 @@
* LOCKING:
* Inherited from caller.
*/
-void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
+void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
unsigned long delay)
{
int rc;
@@ -1104,12 +1104,10 @@
if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
return;
- PREPARE_WORK(&ap->port_task, fn, data);
+ PREPARE_DELAYED_WORK(&ap->port_task, fn);
+ ap->port_task_data = data;
- if (!delay)
- rc = queue_work(ata_wq, &ap->port_task);
- else
- rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
+ rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
/* rc == 0 means that another user is using port task */
WARN_ON(rc == 0);
@@ -4588,10 +4586,11 @@
return poll_next;
}
-static void ata_pio_task(void *_data)
+static void ata_pio_task(struct work_struct *work)
{
- struct ata_queued_cmd *qc = _data;
- struct ata_port *ap = qc->ap;
+ struct ata_port *ap =
+ container_of(work, struct ata_port, port_task.work);
+ struct ata_queued_cmd *qc = ap->port_task_data;
u8 status;
int poll_next;
@@ -5635,9 +5634,9 @@
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
- INIT_WORK(&ap->port_task, NULL, NULL);
- INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
- INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
+ INIT_DELAYED_WORK(&ap->port_task, NULL);
+ INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+ INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 76a85df..08ad44b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -332,7 +332,7 @@
if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
- queue_work(ata_aux_wq, &ap->hotplug_task);
+ queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
if (ap->pflags & ATA_PFLAG_RECOVERED)
ata_port_printk(ap, KERN_INFO, "EH complete\n");
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 8eaace9..664e137 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2963,7 +2963,7 @@
/**
* ata_scsi_hotplug - SCSI part of hotplug
- * @data: Pointer to ATA port to perform SCSI hotplug on
+ * @work: Pointer to ATA port to perform SCSI hotplug on
*
* Perform SCSI part of hotplug. It's executed from a separate
* workqueue after EH completes. This is necessary because SCSI
@@ -2973,9 +2973,10 @@
* LOCKING:
* Kernel thread context (may sleep).
*/
-void ata_scsi_hotplug(void *data)
+void ata_scsi_hotplug(struct work_struct *work)
{
- struct ata_port *ap = data;
+ struct ata_port *ap =
+ container_of(work, struct ata_port, hotplug_task.work);
int i;
if (ap->pflags & ATA_PFLAG_UNLOADING) {
@@ -3076,7 +3077,7 @@
/**
* ata_scsi_dev_rescan - initiate scsi_rescan_device()
- * @data: Pointer to ATA port to perform scsi_rescan_device()
+ * @work: Pointer to ATA port to perform scsi_rescan_device()
*
* After ATA pass thru (SAT) commands are executed successfully,
* libata need to propagate the changes to SCSI layer. This
@@ -3086,9 +3087,10 @@
* LOCKING:
* Kernel thread context (may sleep).
*/
-void ata_scsi_dev_rescan(void *data)
+void ata_scsi_dev_rescan(struct work_struct *work)
{
- struct ata_port *ap = data;
+ struct ata_port *ap =
+ container_of(work, struct ata_port, scsi_rescan_task);
unsigned long flags;
unsigned int i;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 107b2b5..81ae41d 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -94,7 +94,7 @@
extern void ata_scsi_scan_host(struct ata_port *ap);
extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_hotplug(void *data);
+extern void ata_scsi_hotplug(struct work_struct *work);
extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
@@ -124,7 +124,7 @@
unsigned int (*actor) (struct ata_scsi_args *args,
u8 *rbuf, unsigned int buflen));
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
-extern void ata_scsi_dev_rescan(void *data);
+extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_bus_probe(struct ata_port *ap);
/* libata-eh.c */
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 87b17c3..f407861 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -135,7 +135,7 @@
int flags);
static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
char *page);
-static void idt77252_softint(void *dev_id);
+static void idt77252_softint(struct work_struct *work);
static struct atmdev_ops idt77252_ops =
@@ -2866,9 +2866,10 @@
}
static void
-idt77252_softint(void *dev_id)
+idt77252_softint(struct work_struct *work)
{
- struct idt77252_dev *card = dev_id;
+ struct idt77252_dev *card =
+ container_of(work, struct idt77252_dev, tqueue);
u32 stat;
int done;
@@ -3697,7 +3698,7 @@
card->pcidev = pcidev;
sprintf(card->name, "idt77252-%d", card->index);
- INIT_WORK(&card->tqueue, idt77252_softint, (void *)card);
+ INIT_WORK(&card->tqueue, idt77252_softint);
membase = pci_resource_start(pcidev, 1);
srambase = pci_resource_start(pcidev, 2);
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 6d11122..2308e83 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -159,7 +159,7 @@
void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
void aoecmd_ata_rsp(struct sk_buff *);
void aoecmd_cfg_rsp(struct sk_buff *);
-void aoecmd_sleepwork(void *vp);
+void aoecmd_sleepwork(struct work_struct *);
struct sk_buff *new_skb(ulong);
int aoedev_init(void);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8a13b1a..97f7f53 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -408,9 +408,9 @@
/* this function performs work that has been deferred until sleeping is OK
*/
void
-aoecmd_sleepwork(void *vp)
+aoecmd_sleepwork(struct work_struct *work)
{
- struct aoedev *d = (struct aoedev *) vp;
+ struct aoedev *d = container_of(work, struct aoedev, work);
if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 6125921..05a9719 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -88,7 +88,7 @@
kfree(d);
return NULL;
}
- INIT_WORK(&d->work, aoecmd_sleepwork, d);
+ INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
init_timer(&d->timer);
d->timer.data = (ulong) d;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e6d3a8..3f1b382 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -992,11 +992,11 @@
{
}
-static DECLARE_WORK(floppy_work, NULL, NULL);
+static DECLARE_WORK(floppy_work, NULL);
static void schedule_bh(void (*handler) (void))
{
- PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL);
+ PREPARE_WORK(&floppy_work, (work_func_t)handler);
schedule_work(&floppy_work);
}
@@ -1008,7 +1008,7 @@
spin_lock_irqsave(&floppy_lock, flags);
do_floppy = NULL;
- PREPARE_WORK(&floppy_work, (void *)empty, NULL);
+ PREPARE_WORK(&floppy_work, (work_func_t)empty);
del_timer(&fd_timer);
spin_unlock_irqrestore(&floppy_lock, flags);
}
@@ -1868,7 +1868,7 @@
printk("fdc_busy=%lu\n", fdc_busy);
if (do_floppy)
printk("do_floppy=%p\n", do_floppy);
- if (floppy_work.pending)
+ if (work_pending(&floppy_work))
printk("floppy_work.func=%p\n", floppy_work.func);
if (timer_pending(&fd_timer))
printk("fd_timer.function=%p\n", fd_timer.function);
@@ -4498,7 +4498,7 @@
printk("floppy timer still active:%s\n", timeout_message);
if (timer_pending(&fd_timer))
printk("auxiliary floppy timer still active\n");
- if (floppy_work.pending)
+ if (work_pending(&floppy_work))
printk("work still pending\n");
#endif
old_fdc = fdc;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 40a11e5..9d9bff2 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -352,19 +352,19 @@
static void run_fsm(void);
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
-static DECLARE_WORK(fsm_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
static void schedule_fsm(void)
{
if (!nice)
- schedule_work(&fsm_tq);
+ schedule_delayed_work(&fsm_tq, 0);
else
schedule_delayed_work(&fsm_tq, nice-1);
}
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
{
run_fsm();
}
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h
index 932342d..bc37032 100644
--- a/drivers/block/paride/pseudo.h
+++ b/drivers/block/paride/pseudo.h
@@ -35,7 +35,7 @@
#include <linux/sched.h>
#include <linux/workqueue.h>
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
static void (* ps_continuation)(void);
static int (* ps_ready)(void);
@@ -45,7 +45,7 @@
static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
-static DECLARE_WORK(ps_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
static void ps_set_intr(void (*continuation)(void),
int (*ready)(void),
@@ -63,14 +63,14 @@
if (!ps_tq_active) {
ps_tq_active = 1;
if (!ps_nice)
- schedule_work(&ps_tq);
+ schedule_delayed_work(&ps_tq, 0);
else
schedule_delayed_work(&ps_tq, ps_nice-1);
}
spin_unlock_irqrestore(&ps_spinlock,flags);
}
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
{
void (*con)(void);
unsigned long flags;
@@ -92,7 +92,7 @@
}
ps_tq_active = 1;
if (!ps_nice)
- schedule_work(&ps_tq);
+ schedule_delayed_work(&ps_tq, 0);
else
schedule_delayed_work(&ps_tq, ps_nice-1);
spin_unlock_irqrestore(&ps_spinlock,flags);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 47d6975..54509eb 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1244,9 +1244,10 @@
return IRQ_RETVAL(handled);
}
-static void carm_fsm_task (void *_data)
+static void carm_fsm_task (struct work_struct *work)
{
- struct carm_host *host = _data;
+ struct carm_host *host =
+ container_of(work, struct carm_host, fsm_task);
unsigned long flags;
unsigned int state;
int rc, i, next_dev;
@@ -1619,7 +1620,7 @@
host->pdev = pdev;
host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
- INIT_WORK(&host->fsm_task, carm_fsm_task, host);
+ INIT_WORK(&host->fsm_task, carm_fsm_task);
init_completion(&host->probe_comp);
for (i = 0; i < ARRAY_SIZE(host->req); i++)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 0d5c73f..2098eff 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -376,7 +376,7 @@
int stalled_pipe);
static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
static void ub_reset_enter(struct ub_dev *sc, int try);
-static void ub_reset_task(void *arg);
+static void ub_reset_task(struct work_struct *work);
static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
struct ub_capacity *ret);
@@ -1558,9 +1558,9 @@
schedule_work(&sc->reset_work);
}
-static void ub_reset_task(void *arg)
+static void ub_reset_task(struct work_struct *work)
{
- struct ub_dev *sc = arg;
+ struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
unsigned long flags;
struct list_head *p;
struct ub_lun *lun;
@@ -2179,7 +2179,7 @@
usb_init_urb(&sc->work_urb);
tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
atomic_set(&sc->poison, 0);
- INIT_WORK(&sc->reset_work, ub_reset_task, sc);
+ INIT_WORK(&sc->reset_work, ub_reset_task);
init_waitqueue_head(&sc->reset_wait);
init_timer(&sc->work_timer);
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 5167517..9256985 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -157,9 +157,10 @@
}
}
-static void bcm203x_work(void *user_data)
+static void bcm203x_work(struct work_struct *work)
{
- struct bcm203x_data *data = user_data;
+ struct bcm203x_data *data =
+ container_of(work, struct bcm203x_data, work);
if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
BT_ERR("Can't submit URB");
@@ -246,7 +247,7 @@
release_firmware(firmware);
- INIT_WORK(&data->work, bcm203x_work, (void *) data);
+ INIT_WORK(&data->work, bcm203x_work);
usb_set_intfdata(intf, data);
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e608dad..acb2de5 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -926,9 +926,10 @@
* had to poll every port to see if that port needed servicing.
*/
static void
-do_softint(void *private_)
+do_softint(struct work_struct *work)
{
- struct cyclades_port *info = (struct cyclades_port *) private_;
+ struct cyclades_port *info =
+ container_of(work, struct cyclades_port, tqueue);
struct tty_struct *tty;
tty = info->tty;
@@ -5328,7 +5329,7 @@
info->blocked_open = 0;
info->default_threshold = 0;
info->default_timeout = 0;
- INIT_WORK(&info->tqueue, do_softint, info);
+ INIT_WORK(&info->tqueue, do_softint);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
init_waitqueue_head(&info->shutdown_wait);
@@ -5403,7 +5404,7 @@
info->blocked_open = 0;
info->default_threshold = 0;
info->default_timeout = 0;
- INIT_WORK(&info->tqueue, do_softint, info);
+ INIT_WORK(&info->tqueue, do_softint);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
init_waitqueue_head(&info->shutdown_wait);
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 60c1695..806f9ce 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -500,9 +500,9 @@
static void
-via_dmablit_workqueue(void *data)
+via_dmablit_workqueue(struct work_struct *work)
{
- drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+ drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
drm_device_t *dev = blitq->dev;
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
@@ -571,7 +571,7 @@
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
}
DRM_INIT_WAITQUEUE(&blitq->busy_queue);
- INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
+ INIT_WORK(&blitq->wq, via_dmablit_workqueue);
init_timer(&blitq->poll_timer);
blitq->poll_timer.function = &via_dmablit_timer;
blitq->poll_timer.data = (unsigned long) blitq;
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 706733c..7c71eb7 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -200,7 +200,7 @@
static int info_ioctl(struct tty_struct *, struct file *,
unsigned int, unsigned long);
static void pc_set_termios(struct tty_struct *, struct termios *);
-static void do_softint(void *);
+static void do_softint(struct work_struct *work);
static void pc_stop(struct tty_struct *);
static void pc_start(struct tty_struct *);
static void pc_throttle(struct tty_struct * tty);
@@ -1505,7 +1505,7 @@
ch->brdchan = bc;
ch->mailbox = gd;
- INIT_WORK(&ch->tqueue, do_softint, ch);
+ INIT_WORK(&ch->tqueue, do_softint);
ch->board = &boards[crd];
spin_lock_irqsave(&epca_lock, flags);
@@ -2566,9 +2566,9 @@
/* --------------------- Begin do_softint ----------------------- */
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
{ /* Begin do_softint */
- struct channel *ch = (struct channel *) private_;
+ struct channel *ch = container_of(work, struct channel, tqueue);
/* Called in response to a modem change event */
if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */
struct tty_struct *tty = ch->tty;
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 15a4ea8..93b5519 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -723,9 +723,10 @@
* -------------------------------------------------------------------
*/
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
{
- struct esp_struct *info = (struct esp_struct *) private_;
+ struct esp_struct *info =
+ container_of(work, struct esp_struct, tqueue);
struct tty_struct *tty;
tty = info->tty;
@@ -746,9 +747,10 @@
* do_serial_hangup() -> tty->hangup() -> esp_hangup()
*
*/
-static void do_serial_hangup(void *private_)
+static void do_serial_hangup(struct work_struct *work)
{
- struct esp_struct *info = (struct esp_struct *) private_;
+ struct esp_struct *info =
+ container_of(work, struct esp_struct, tqueue_hangup);
struct tty_struct *tty;
tty = info->tty;
@@ -2501,8 +2503,8 @@
info->magic = ESP_MAGIC;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
- INIT_WORK(&info->tqueue, do_softint, info);
- INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+ INIT_WORK(&info->tqueue, do_softint);
+ INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
info->config.rx_timeout = rx_timeout;
info->config.flow_on = flow_on;
info->config.flow_off = flow_off;
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 817dc40..23b25ad 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -102,7 +102,7 @@
* Routine to poll RTC seconds field for change as often as possible,
* after first RTC_UIE use timer to reduce polling
*/
-static void genrtc_troutine(void *data)
+static void genrtc_troutine(struct work_struct *work)
{
unsigned int tmp = get_rtc_ss();
@@ -255,7 +255,7 @@
irq_active = 1;
stop_rtc_timers = 0;
lostint = 0;
- INIT_WORK(&genrtc_task, genrtc_troutine, NULL);
+ INIT_WORK(&genrtc_task, genrtc_troutine);
oldsecs = get_rtc_ss();
init_timer(&timer_task);
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 2cf63e7..82a41d5 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -69,7 +69,7 @@
#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
struct hvsi_struct {
- struct work_struct writer;
+ struct delayed_work writer;
struct work_struct handshaker;
wait_queue_head_t emptyq; /* woken when outbuf is emptied */
wait_queue_head_t stateq; /* woken when HVSI state changes */
@@ -744,9 +744,10 @@
return 0;
}
-static void hvsi_handshaker(void *arg)
+static void hvsi_handshaker(struct work_struct *work)
{
- struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+ struct hvsi_struct *hp =
+ container_of(work, struct hvsi_struct, handshaker);
if (hvsi_handshake(hp) >= 0)
return;
@@ -951,9 +952,10 @@
}
/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
-static void hvsi_write_worker(void *arg)
+static void hvsi_write_worker(struct work_struct *work)
{
- struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+ struct hvsi_struct *hp =
+ container_of(work, struct hvsi_struct, writer.work);
unsigned long flags;
#ifdef DEBUG
static long start_j = 0;
@@ -1287,8 +1289,8 @@
}
hp = &hvsi_ports[hvsi_count];
- INIT_WORK(&hp->writer, hvsi_write_worker, hp);
- INIT_WORK(&hp->handshaker, hvsi_handshaker, hp);
+ INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
+ INIT_WORK(&hp->handshaker, hvsi_handshaker);
init_waitqueue_head(&hp->emptyq);
init_waitqueue_head(&hp->stateq);
spin_lock_init(&hp->lock);
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index 54d93f0..c213fdbdb 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -84,8 +84,8 @@
static void serviceOutgoingFifo(i2eBordStrPtr);
// Functions defined in ip2.c as part of interrupt handling
-static void do_input(void *);
-static void do_status(void *);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
//***************
//* Debug Data *
@@ -331,8 +331,8 @@
pCh->ClosingWaitTime = 30*HZ;
// Initialize task queue objects
- INIT_WORK(&pCh->tqueue_input, do_input, pCh);
- INIT_WORK(&pCh->tqueue_status, do_status, pCh);
+ INIT_WORK(&pCh->tqueue_input, do_input);
+ INIT_WORK(&pCh->tqueue_status, do_status);
#ifdef IP2DEBUG_TRACE
pCh->trace = ip2trace;
@@ -1573,7 +1573,7 @@
#ifdef USE_IQ
schedule_work(&pCh->tqueue_input);
#else
- do_input(pCh);
+ do_input(&pCh->tqueue_input);
#endif
// Note we do not need to maintain any flow-control credits at this
@@ -1810,7 +1810,7 @@
#ifdef USE_IQ
schedule_work(&pCh->tqueue_status);
#else
- do_status(pCh);
+ do_status(&pCh->tqueue_status);
#endif
}
}
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index a3f32d4..cda2459 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -189,12 +189,12 @@
unsigned int set, unsigned int clear);
static void set_irq(int, int);
-static void ip2_interrupt_bh(i2eBordStrPtr pB);
+static void ip2_interrupt_bh(struct work_struct *work);
static irqreturn_t ip2_interrupt(int irq, void *dev_id);
static void ip2_poll(unsigned long arg);
static inline void service_all_boards(void);
-static void do_input(void *p);
-static void do_status(void *p);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
static void ip2_wait_until_sent(PTTY,int);
@@ -918,7 +918,7 @@
pCh++;
}
ex_exit:
- INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB);
+ INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
return;
err_release_region:
@@ -1125,8 +1125,8 @@
/******************************************************************************/
-/* Function: ip2_interrupt_bh(pB) */
-/* Parameters: pB - pointer to the board structure */
+/* Function: ip2_interrupt_bh(work) */
+/* Parameters: work - pointer to the board structure */
/* Returns: Nothing */
/* */
/* Description: */
@@ -1135,8 +1135,9 @@
/* */
/******************************************************************************/
static void
-ip2_interrupt_bh(i2eBordStrPtr pB)
+ip2_interrupt_bh(struct work_struct *work)
{
+ i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
// pB better well be set or we have a problem! We can only get
// here from the IMMEDIATE queue. Here, we process the boards.
// Checking pB doesn't cost much and it saves us from the sanity checkers.
@@ -1245,9 +1246,9 @@
ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
}
-static void do_input(void *p)
+static void do_input(struct work_struct *work)
{
- i2ChanStrPtr pCh = p;
+ i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
unsigned long flags;
ip2trace(CHANN, ITRC_INPUT, 21, 0 );
@@ -1279,9 +1280,9 @@
}
}
-static void do_status(void *p)
+static void do_status(struct work_struct *work)
{
- i2ChanStrPtr pCh = p;
+ i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
int status;
status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 58c955e..1637c1d 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -530,9 +530,9 @@
/* Interrupt handlers */
-static void isicom_bottomhalf(void *data)
+static void isicom_bottomhalf(struct work_struct *work)
{
- struct isi_port *port = (struct isi_port *) data;
+ struct isi_port *port = container_of(work, struct isi_port, bh_tqueue);
struct tty_struct *tty = port->tty;
if (!tty)
@@ -1474,9 +1474,9 @@
}
/* hangup et all */
-static void do_isicom_hangup(void *data)
+static void do_isicom_hangup(struct work_struct *work)
{
- struct isi_port *port = data;
+ struct isi_port *port = container_of(work, struct isi_port, hangup_tq);
struct tty_struct *tty;
tty = port->tty;
@@ -1966,8 +1966,8 @@
port->channel = channel;
port->close_delay = 50 * HZ/100;
port->closing_wait = 3000 * HZ/100;
- INIT_WORK(&port->hangup_tq, do_isicom_hangup, port);
- INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port);
+ INIT_WORK(&port->hangup_tq, do_isicom_hangup);
+ INIT_WORK(&port->bh_tqueue, isicom_bottomhalf);
port->status = 0;
init_waitqueue_head(&port->open_wait);
init_waitqueue_head(&port->close_wait);
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 96cb1f0..2d025a9 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -222,7 +222,7 @@
/*
* static functions:
*/
-static void do_moxa_softint(void *);
+static void do_moxa_softint(struct work_struct *);
static int moxa_open(struct tty_struct *, struct file *);
static void moxa_close(struct tty_struct *, struct file *);
static int moxa_write(struct tty_struct *, const unsigned char *, int);
@@ -363,7 +363,7 @@
for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
ch->type = PORT_16550A;
ch->port = i;
- INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
+ INIT_WORK(&ch->tqueue, do_moxa_softint);
ch->tty = NULL;
ch->close_delay = 5 * HZ / 10;
ch->closing_wait = 30 * HZ;
@@ -509,9 +509,9 @@
module_init(moxa_init);
module_exit(moxa_exit);
-static void do_moxa_softint(void *private_)
+static void do_moxa_softint(struct work_struct *work)
{
- struct moxa_str *ch = (struct moxa_str *) private_;
+ struct moxa_str *ch = container_of(work, struct moxa_str, tqueue);
struct tty_struct *tty;
if (ch && (tty = ch->tty)) {
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 048d911..5ed2486 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -389,7 +389,7 @@
/* static void mxser_poll(unsigned long); */
static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
-static void mxser_do_softint(void *);
+static void mxser_do_softint(struct work_struct *);
static int mxser_open(struct tty_struct *, struct file *);
static void mxser_close(struct tty_struct *, struct file *);
static int mxser_write(struct tty_struct *, const unsigned char *, int);
@@ -590,7 +590,7 @@
info->custom_divisor = hwconf->baud_base[i] * 16;
info->close_delay = 5 * HZ / 10;
info->closing_wait = 30 * HZ;
- INIT_WORK(&info->tqueue, mxser_do_softint, info);
+ INIT_WORK(&info->tqueue, mxser_do_softint);
info->normal_termios = mxvar_sdriver->init_termios;
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
@@ -917,9 +917,10 @@
return 0;
}
-static void mxser_do_softint(void *private_)
+static void mxser_do_softint(struct work_struct *work)
{
- struct mxser_struct *info = private_;
+ struct mxser_struct *info =
+ container_of(work, struct mxser_struct, tqueue);
struct tty_struct *tty;
tty = info->tty;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index f9f7250..1bd1229 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -421,7 +421,7 @@
/*
* Bottom half interrupt handlers
*/
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
static void bh_transmit(MGSLPC_INFO *info);
static void bh_status(MGSLPC_INFO *info);
@@ -547,7 +547,7 @@
memset(info, 0, sizeof(MGSLPC_INFO));
info->magic = MGSLPC_MAGIC;
- INIT_WORK(&info->task, bh_handler, info);
+ INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
@@ -835,9 +835,9 @@
return rc;
}
-static void bh_handler(void* Context)
+static void bh_handler(struct work_struct *work)
{
- MGSLPC_INFO *info = (MGSLPC_INFO*)Context;
+ MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
int action;
if (!info)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d40df30..4c6782a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1422,9 +1422,9 @@
static unsigned int ip_cnt;
-static void rekey_seq_generator(void *private_);
+static void rekey_seq_generator(struct work_struct *work);
-static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
+static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
/*
* Lock avoidance:
@@ -1438,7 +1438,7 @@
* happen, and even if that happens only a not perfectly compliant
* ISN is generated, nothing fatal.
*/
-static void rekey_seq_generator(void *private_)
+static void rekey_seq_generator(struct work_struct *work)
{
struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index c084149..fc87070 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -765,7 +765,7 @@
sonypi_device.bluetooth_power = state;
}
-static void input_keyrelease(void *data)
+static void input_keyrelease(struct work_struct *work)
{
struct sonypi_keypress kp;
@@ -1412,7 +1412,7 @@
goto err_inpdev_unregister;
}
- INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL);
+ INIT_WORK(&sonypi_device.input_work, input_keyrelease);
}
sonypi_enable(0);
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 7e1bd95..99137ab 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2261,9 +2261,10 @@
* do_sx_hangup() -> tty->hangup() -> sx_hangup()
*
*/
-static void do_sx_hangup(void *private_)
+static void do_sx_hangup(struct work_struct *work)
{
- struct specialix_port *port = (struct specialix_port *) private_;
+ struct specialix_port *port =
+ container_of(work, struct specialix_port, tqueue_hangup);
struct tty_struct *tty;
func_enter();
@@ -2336,9 +2337,10 @@
}
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
{
- struct specialix_port *port = (struct specialix_port *) private_;
+ struct specialix_port *port =
+ container_of(work, struct specialix_port, tqueue);
struct tty_struct *tty;
func_enter();
@@ -2411,8 +2413,8 @@
memset(sx_port, 0, sizeof(sx_port));
for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
sx_port[i].magic = SPECIALIX_MAGIC;
- INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]);
- INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]);
+ INIT_WORK(&sx_port[i].tqueue, do_softint);
+ INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
sx_port[i].close_delay = 50 * HZ/100;
sx_port[i].closing_wait = 3000 * HZ/100;
init_waitqueue_head(&sx_port[i].open_wait);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 06784ad..147c30d 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -802,7 +802,7 @@
/*
* Bottom half interrupt handlers
*/
-static void mgsl_bh_handler(void* Context);
+static void mgsl_bh_handler(struct work_struct *work);
static void mgsl_bh_receive(struct mgsl_struct *info);
static void mgsl_bh_transmit(struct mgsl_struct *info);
static void mgsl_bh_status(struct mgsl_struct *info);
@@ -1071,9 +1071,10 @@
/*
* Perform bottom half processing of work items queued by ISR.
*/
-static void mgsl_bh_handler(void* Context)
+static void mgsl_bh_handler(struct work_struct *work)
{
- struct mgsl_struct *info = (struct mgsl_struct*)Context;
+ struct mgsl_struct *info =
+ container_of(work, struct mgsl_struct, task);
int action;
if (!info)
@@ -4337,7 +4338,7 @@
} else {
memset(info, 0, sizeof(struct mgsl_struct));
info->magic = MGSL_MAGIC;
- INIT_WORK(&info->task, mgsl_bh_handler, info);
+ INIT_WORK(&info->task, mgsl_bh_handler);
info->max_frame_size = 4096;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index d4334c7..07f34d4 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -485,7 +485,7 @@
static void set_rate(struct slgt_info *info, u32 data_rate);
static int bh_action(struct slgt_info *info);
-static void bh_handler(void* context);
+static void bh_handler(struct work_struct *work);
static void bh_transmit(struct slgt_info *info);
static void isr_serial(struct slgt_info *info);
static void isr_rdma(struct slgt_info *info);
@@ -1878,9 +1878,9 @@
/*
* perform bottom half processing
*/
-static void bh_handler(void* context)
+static void bh_handler(struct work_struct *work)
{
- struct slgt_info *info = context;
+ struct slgt_info *info = container_of(work, struct slgt_info, task);
int action;
if (!info)
@@ -3326,7 +3326,7 @@
} else {
memset(info, 0, sizeof(struct slgt_info));
info->magic = MGSL_MAGIC;
- INIT_WORK(&info->task, bh_handler, info);
+ INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
info->raw_rx_size = DMABUFSIZE;
info->close_delay = 5*HZ/10;
@@ -4799,6 +4799,6 @@
spin_lock_irqsave(&info->lock, flags);
info->pending_bh |= BH_RECEIVE;
spin_unlock_irqrestore(&info->lock, flags);
- bh_handler(info);
+ bh_handler(&info->task);
}
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 3e932b6..13a5724 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -602,7 +602,7 @@
static void set_rate(SLMP_INFO *info, u32 data_rate);
static int bh_action(SLMP_INFO *info);
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
static void bh_receive(SLMP_INFO *info);
static void bh_transmit(SLMP_INFO *info);
static void bh_status(SLMP_INFO *info);
@@ -2063,9 +2063,9 @@
/* Perform bottom half processing of work items queued by ISR.
*/
-void bh_handler(void* Context)
+void bh_handler(struct work_struct *work)
{
- SLMP_INFO *info = (SLMP_INFO*)Context;
+ SLMP_INFO *info = container_of(work, SLMP_INFO, task);
int action;
if (!info)
@@ -3805,7 +3805,7 @@
} else {
memset(info, 0, sizeof(SLMP_INFO));
info->magic = MGSL_MAGIC;
- INIT_WORK(&info->task, bh_handler, info);
+ INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5f49280..c64f5bc 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -219,13 +219,13 @@
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
-static void moom_callback(void *ignored)
+static void moom_callback(struct work_struct *ignored)
{
out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
GFP_KERNEL, 0);
}
-static DECLARE_WORK(moom_work, moom_callback, NULL);
+static DECLARE_WORK(moom_work, moom_callback);
static void sysrq_handle_moom(int key, struct tty_struct *tty)
{
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 6e1329d..774fa86 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -325,9 +325,9 @@
schedule_work(&chip->work);
}
-static void timeout_work(void *ptr)
+static void timeout_work(struct work_struct *work)
{
- struct tpm_chip *chip = ptr;
+ struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
down(&chip->buffer_mutex);
atomic_set(&chip->data_pending, 0);
@@ -1105,7 +1105,7 @@
init_MUTEX(&chip->tpm_mutex);
INIT_LIST_HEAD(&chip->list);
- INIT_WORK(&chip->work, timeout_work, chip);
+ INIT_WORK(&chip->work, timeout_work);
init_timer(&chip->user_read_timer);
chip->user_read_timer.function = user_reader_timeout;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 50dc492..b3cfc8b 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1254,7 +1254,7 @@
/**
* do_tty_hangup - actual handler for hangup events
- * @data: tty device
+ * @work: tty device
*
* This can be called by the "eventd" kernel thread. That is process
* synchronous but doesn't hold any locks, so we need to make sure we
@@ -1274,9 +1274,10 @@
* tasklist_lock to walk task list for hangup event
*
*/
-static void do_tty_hangup(void *data)
+static void do_tty_hangup(struct work_struct *work)
{
- struct tty_struct *tty = (struct tty_struct *) data;
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, hangup_work);
struct file * cons_filp = NULL;
struct file *filp, *f = NULL;
struct task_struct *p;
@@ -1433,7 +1434,7 @@
printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
#endif
- do_tty_hangup((void *) tty);
+ do_tty_hangup(&tty->hangup_work);
}
EXPORT_SYMBOL(tty_vhangup);
@@ -3304,12 +3305,13 @@
* Nasty bug: do_SAK is being called in interrupt context. This can
* deadlock. We punt it up to process context. AKPM - 16Mar2001
*/
-static void __do_SAK(void *arg)
+static void __do_SAK(struct work_struct *work)
{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, SAK_work);
#ifdef TTY_SOFT_SAK
tty_hangup(tty);
#else
- struct tty_struct *tty = arg;
struct task_struct *g, *p;
int session;
int i;
@@ -3388,7 +3390,7 @@
{
if (!tty)
return;
- PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
+ PREPARE_WORK(&tty->SAK_work, __do_SAK);
schedule_work(&tty->SAK_work);
}
@@ -3396,7 +3398,7 @@
/**
* flush_to_ldisc
- * @private_: tty structure passed from work queue.
+ * @work: tty structure passed from work queue.
*
* This routine is called out of the software interrupt to flush data
* from the buffer chain to the line discipline.
@@ -3406,9 +3408,10 @@
* receive_buf method is single threaded for each tty instance.
*/
-static void flush_to_ldisc(void *private_)
+static void flush_to_ldisc(struct work_struct *work)
{
- struct tty_struct *tty = (struct tty_struct *) private_;
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, buf.work.work);
unsigned long flags;
struct tty_ldisc *disc;
struct tty_buffer *tbuf, *head;
@@ -3553,7 +3556,7 @@
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
- flush_to_ldisc((void *) tty);
+ flush_to_ldisc(&tty->buf.work.work);
else
schedule_delayed_work(&tty->buf.work, 1);
}
@@ -3580,17 +3583,17 @@
tty->overrun_time = jiffies;
tty->buf.head = tty->buf.tail = NULL;
tty_buffer_init(tty);
- INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
+ INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
init_MUTEX(&tty->buf.pty_sem);
mutex_init(&tty->termios_mutex);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
- INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
+ INIT_WORK(&tty->hangup_work, do_tty_hangup);
mutex_init(&tty->atomic_read_lock);
mutex_init(&tty->atomic_write_lock);
spin_lock_init(&tty->read_lock);
INIT_LIST_HEAD(&tty->tty_files);
- INIT_WORK(&tty->SAK_work, NULL, NULL);
+ INIT_WORK(&tty->SAK_work, NULL);
}
/*
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 87587b4..75ff028 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -155,7 +155,7 @@
static void set_vesa_blanking(char __user *p);
static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
-static void console_callback(void *ignored);
+static void console_callback(struct work_struct *ignored);
static void blank_screen_t(unsigned long dummy);
static void set_palette(struct vc_data *vc);
@@ -174,7 +174,7 @@
static int blankinterval = 10*60*HZ;
static int vesa_off_interval;
-static DECLARE_WORK(console_work, console_callback, NULL);
+static DECLARE_WORK(console_work, console_callback);
/*
* fg_console is the current virtual console,
@@ -2154,7 +2154,7 @@
* with other console code and prevention of re-entrancy is
* ensured with console_sem.
*/
-static void console_callback(void *ignored)
+static void console_callback(struct work_struct *ignored)
{
acquire_console_sem();
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 05f8ce2..b418b16 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,9 +31,11 @@
#include <linux/connector.h>
#include <linux/delay.h>
-void cn_queue_wrapper(void *data)
+void cn_queue_wrapper(struct work_struct *work)
{
- struct cn_callback_data *d = data;
+ struct cn_callback_entry *cbq =
+ container_of(work, struct cn_callback_entry, work.work);
+ struct cn_callback_data *d = &cbq->data;
d->callback(d->callback_priv);
@@ -57,7 +59,7 @@
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
cbq->data.callback = callback;
- INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
+ INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
return cbq;
}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index b49bacf..5e7cd45 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -135,40 +135,39 @@
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
- if (likely(!test_bit(0, &__cbq->work.pending) &&
+ if (likely(!test_bit(WORK_STRUCT_PENDING,
+ &__cbq->work.work.management) &&
__cbq->data.ddata == NULL)) {
__cbq->data.callback_priv = msg;
__cbq->data.ddata = data;
__cbq->data.destruct_data = destruct_data;
- if (queue_work(dev->cbdev->cn_queue,
- &__cbq->work))
+ if (queue_delayed_work(
+ dev->cbdev->cn_queue,
+ &__cbq->work, 0))
err = 0;
} else {
- struct work_struct *w;
struct cn_callback_data *d;
- w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
- if (w) {
- d = (struct cn_callback_data *)(w+1);
-
+ __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
+ if (__cbq) {
+ d = &__cbq->data;
d->callback_priv = msg;
d->callback = __cbq->data.callback;
d->ddata = data;
d->destruct_data = destruct_data;
- d->free = w;
+ d->free = __cbq;
- INIT_LIST_HEAD(&w->entry);
- w->pending = 0;
- w->func = &cn_queue_wrapper;
- w->data = d;
- init_timer(&w->timer);
+ INIT_DELAYED_WORK(&__cbq->work,
+ &cn_queue_wrapper);
- if (queue_work(dev->cbdev->cn_queue, w))
+ if (queue_delayed_work(
+ dev->cbdev->cn_queue,
+ &__cbq->work, 0))
err = 0;
else {
- kfree(w);
+ kfree(__cbq);
err = -EINVAL;
}
} else
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index dd0c262..7a7c6e6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -42,7 +42,7 @@
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
-static void handle_update(void *data);
+static void handle_update(struct work_struct *work);
/**
* Two notifier lists: the "policy" list is involved in the
@@ -665,7 +665,7 @@
mutex_init(&policy->lock);
mutex_lock(&policy->lock);
init_completion(&policy->kobj_unregister);
- INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
+ INIT_WORK(&policy->update, handle_update);
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
@@ -895,9 +895,11 @@
}
-static void handle_update(void *data)
+static void handle_update(struct work_struct *work)
{
- unsigned int cpu = (unsigned int)(long)data;
+ struct cpufreq_policy *policy =
+ container_of(work, struct cpufreq_policy, update);
+ unsigned int cpu = policy->cpu;
dprintk("handle_update for cpu %u called\n", cpu);
cpufreq_update_policy(cpu);
}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c4c578d..5ef5ede 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -59,7 +59,7 @@
#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
struct cpu_dbs_info_s {
struct cpufreq_policy *cur_policy;
@@ -82,7 +82,7 @@
* is recursive for the same process. -Venki
*/
static DEFINE_MUTEX (dbs_mutex);
-static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
+static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
struct dbs_tuners {
unsigned int sampling_rate;
@@ -420,7 +420,7 @@
}
}
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
{
int i;
lock_cpu_hotplug();
@@ -435,7 +435,6 @@
static inline void dbs_timer_init(void)
{
- INIT_WORK(&dbs_work, do_dbs_timer, NULL);
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bf8aa45..e1cc511 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,13 +47,17 @@
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
+
+/* Sampling types */
+enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
cputime64_t prev_cpu_wall;
struct cpufreq_policy *cur_policy;
- struct work_struct work;
+ struct delayed_work work;
+ enum dbs_sample sample_type;
unsigned int enable;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
@@ -407,30 +411,31 @@
}
}
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
{
unsigned int cpu = smp_processor_id();
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ enum dbs_sample sample_type = dbs_info->sample_type;
/* We want all CPUs to do sampling nearly on same jiffy */
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+ /* Permit rescheduling of this work item */
+ work_release(work);
+
delay -= jiffies % delay;
if (!dbs_info->enable)
return;
/* Common NORMAL_SAMPLE setup */
- INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
+ dbs_info->sample_type = DBS_NORMAL_SAMPLE;
if (!dbs_tuners_ins.powersave_bias ||
- (unsigned long) data == DBS_NORMAL_SAMPLE) {
+ sample_type == DBS_NORMAL_SAMPLE) {
lock_cpu_hotplug();
dbs_check_cpu(dbs_info);
unlock_cpu_hotplug();
if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
- INIT_WORK(&dbs_info->work, do_dbs_timer,
- (void *)DBS_SUB_SAMPLE);
+ dbs_info->sample_type = DBS_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
}
} else {
@@ -449,7 +454,8 @@
delay -= jiffies % delay;
ondemand_powersave_bias_init();
- INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
+ INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
+ dbs_info->sample_type = DBS_NORMAL_SAMPLE;
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
}
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
index 4630f19..15edf40 100644
--- a/drivers/i2c/chips/ds1374.c
+++ b/drivers/i2c/chips/ds1374.c
@@ -140,12 +140,14 @@
return t1;
}
-static void ds1374_set_work(void *arg)
+static ulong new_time;
+
+static void ds1374_set_work(struct work_struct *work)
{
ulong t1, t2;
int limit = 10; /* arbitrary retry limit */
- t1 = *(ulong *) arg;
+ t1 = new_time;
mutex_lock(&ds1374_mutex);
@@ -167,11 +169,9 @@
"can't confirm time set from rtc chip\n");
}
-static ulong new_time;
-
static struct workqueue_struct *ds1374_workqueue;
-static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time);
+static DECLARE_WORK(ds1374_work, ds1374_set_work);
int ds1374_set_rtc_time(ulong nowtime)
{
@@ -180,7 +180,7 @@
if (in_interrupt())
queue_work(ds1374_workqueue, &ds1374_work);
else
- ds1374_set_work(&new_time);
+ ds1374_set_work(NULL);
return 0;
}
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index d90a3a1..8f4378a 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -31,9 +31,10 @@
#include "config_roms.h"
-static void delayed_reset_bus(void * __reset_info)
+static void delayed_reset_bus(struct work_struct *work)
{
- struct hpsb_host *host = (struct hpsb_host*)__reset_info;
+ struct hpsb_host *host =
+ container_of(work, struct hpsb_host, delayed_reset.work);
int generation = host->csr.generation + 1;
/* The generation field rolls over to 2 rather than 0 per IEEE
@@ -145,7 +146,7 @@
atomic_set(&h->generation, 0);
- INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);
+ INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
init_timer(&h->timeout);
h->timeout.data = (unsigned long) h;
@@ -234,7 +235,7 @@
* Config ROM in the near future. */
reset_delay = HZ;
- PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host);
+ PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
schedule_delayed_work(&host->delayed_reset, reset_delay);
return 0;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index bc6dbfa..d553e38 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -62,7 +62,7 @@
struct class_device class_dev;
int update_config_rom;
- struct work_struct delayed_reset;
+ struct delayed_work delayed_reset;
unsigned int config_roms;
struct list_head addr_space;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 6986ac1..cd156d4 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -493,20 +493,25 @@
scsi_unblock_requests(scsi_id->scsi_host);
}
-static void sbp2util_write_orb_pointer(void *p)
+static void sbp2util_write_orb_pointer(struct work_struct *work)
{
+ struct scsi_id_instance_data *scsi_id =
+ container_of(work, struct scsi_id_instance_data,
+ protocol_work.work);
quadlet_t data[2];
- data[0] = ORB_SET_NODE_ID(
- ((struct scsi_id_instance_data *)p)->hi->host->node_id);
- data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
+ data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
+ data[1] = scsi_id->last_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
- sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8);
+ sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
}
-static void sbp2util_write_doorbell(void *p)
+static void sbp2util_write_doorbell(struct work_struct *work)
{
- sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4);
+ struct scsi_id_instance_data *scsi_id =
+ container_of(work, struct scsi_id_instance_data,
+ protocol_work.work);
+ sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
}
/*
@@ -843,7 +848,7 @@
INIT_LIST_HEAD(&scsi_id->scsi_list);
spin_lock_init(&scsi_id->sbp2_command_orb_lock);
atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
- INIT_WORK(&scsi_id->protocol_work, NULL, NULL);
+ INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
ud->device.driver_data = scsi_id;
@@ -2047,11 +2052,10 @@
* We do not accept new commands until the job is over.
*/
scsi_block_requests(scsi_id->scsi_host);
- PREPARE_WORK(&scsi_id->protocol_work,
+ PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
last_orb ? sbp2util_write_doorbell:
- sbp2util_write_orb_pointer,
- scsi_id);
- schedule_work(&scsi_id->protocol_work);
+ sbp2util_write_orb_pointer);
+ schedule_delayed_work(&scsi_id->protocol_work, 0);
}
}
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index abbe48e..1b16d6b 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -348,7 +348,7 @@
unsigned workarounds;
atomic_t state;
- struct work_struct protocol_work;
+ struct delayed_work protocol_work;
};
/* For use in scsi_id_instance_data.state */
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 7767a11..af93979 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -55,11 +55,11 @@
int status;
};
-static void process_req(void *data);
+static void process_req(struct work_struct *work);
static DEFINE_MUTEX(lock);
static LIST_HEAD(req_list);
-static DECLARE_WORK(work, process_req, NULL);
+static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq;
void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -215,7 +215,7 @@
return ret;
}
-static void process_req(void *data)
+static void process_req(struct work_struct *work)
{
struct addr_req *req, *temp_req;
struct sockaddr_in *src_in, *dst_in;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 20e9f64..98272fb 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -285,9 +285,10 @@
kfree(tprops);
}
-static void ib_cache_task(void *work_ptr)
+static void ib_cache_task(struct work_struct *_work)
{
- struct ib_update_work *work = work_ptr;
+ struct ib_update_work *work =
+ container_of(_work, struct ib_update_work, work);
ib_cache_update(work->device, work->port_num);
kfree(work);
@@ -306,7 +307,7 @@
event->event == IB_EVENT_CLIENT_REREGISTER) {
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (work) {
- INIT_WORK(&work->work, ib_cache_task, work);
+ INIT_WORK(&work->work, ib_cache_task);
work->device = event->device;
work->port_num = event->element.port_num;
schedule_work(&work->work);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e5dc453..79c937b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -101,7 +101,7 @@
};
struct cm_work {
- struct work_struct work;
+ struct delayed_work work;
struct list_head list;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
@@ -161,7 +161,7 @@
atomic_t work_count;
};
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
@@ -668,8 +668,7 @@
return ERR_PTR(-ENOMEM);
timewait_info->work.local_id = local_id;
- INIT_WORK(&timewait_info->work.work, cm_work_handler,
- &timewait_info->work);
+ INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
return timewait_info;
}
@@ -2995,9 +2994,9 @@
}
}
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
{
- struct cm_work *work = data;
+ struct cm_work *work = container_of(_work, struct cm_work, work.work);
int ret;
switch (work->cm_event.event) {
@@ -3087,12 +3086,12 @@
* we need to find the cm_id once we're in the context of the
* worker thread, rather than holding a reference on it.
*/
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->local_id = cm_id->local_id;
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
out:
return ret;
}
@@ -3191,11 +3190,11 @@
return;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = (struct cm_port *)mad_agent->context;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index cf48f26..985a6b5 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1340,9 +1340,9 @@
return (id_priv->query_id < 0) ? id_priv->query_id : 0;
}
-static void cma_work_handler(void *data)
+static void cma_work_handler(struct work_struct *_work)
{
- struct cma_work *work = data;
+ struct cma_work *work = container_of(_work, struct cma_work, work);
struct rdma_id_private *id_priv = work->id;
int destroy = 0;
@@ -1373,7 +1373,7 @@
return -ENOMEM;
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1430,7 +1430,7 @@
return -ENOMEM;
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1583,7 +1583,7 @@
}
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ADDR_QUERY;
work->new_state = CMA_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index cf797d7..1039ad5 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -828,9 +828,9 @@
* thread asleep on the destroy_comp list vs. an object destroyed
* here synchronously when the last reference is removed.
*/
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
{
- struct iwcm_work *work = arg;
+ struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
struct iw_cm_event levent;
struct iwcm_id_private *cm_id_priv = work->cm_id;
unsigned long flags;
@@ -900,7 +900,7 @@
goto out;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_WORK(&work->work, cm_work_handler);
work->cm_id = cm_id_priv;
work->event = *iw_event;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 3f9c162..15f38d9 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -65,8 +65,8 @@
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad);
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void timeout_sends(void *data);
-static void local_completions(void *data);
+static void timeout_sends(struct work_struct *work);
+static void local_completions(struct work_struct *work);
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
struct ib_mad_agent_private *agent_priv,
u8 mgmt_class);
@@ -356,10 +356,9 @@
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
INIT_LIST_HEAD(&mad_agent_priv->done_list);
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
- INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+ INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list);
- INIT_WORK(&mad_agent_priv->local_work, local_completions,
- mad_agent_priv);
+ INIT_WORK(&mad_agent_priv->local_work, local_completions);
atomic_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp);
@@ -2198,12 +2197,12 @@
/*
* IB MAD completion callback
*/
-static void ib_mad_completion_handler(void *data)
+static void ib_mad_completion_handler(struct work_struct *work)
{
struct ib_mad_port_private *port_priv;
struct ib_wc wc;
- port_priv = (struct ib_mad_port_private *)data;
+ port_priv = container_of(work, struct ib_mad_port_private, work);
ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@ -2324,7 +2323,7 @@
}
EXPORT_SYMBOL(ib_cancel_mad);
-static void local_completions(void *data)
+static void local_completions(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_local_private *local;
@@ -2334,7 +2333,8 @@
struct ib_wc wc;
struct ib_mad_send_wc mad_send_wc;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv =
+ container_of(work, struct ib_mad_agent_private, local_work);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->local_list)) {
@@ -2434,14 +2434,15 @@
return ret;
}
-static void timeout_sends(void *data)
+static void timeout_sends(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags, delay;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+ timed_work.work);
mad_send_wc.vendor_err = 0;
spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2799,7 +2800,7 @@
ret = -ENOMEM;
goto error8;
}
- INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
+ INIT_WORK(&port_priv->work, ib_mad_completion_handler);
spin_lock_irqsave(&ib_mad_port_list_lock, flags);
list_add_tail(&port_priv->port_list, &ib_mad_port_list);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d06b590..d5548e7 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -102,7 +102,7 @@
struct list_head send_list;
struct list_head wait_list;
struct list_head done_list;
- struct work_struct timed_work;
+ struct delayed_work timed_work;
unsigned long timeout;
struct list_head local_list;
struct work_struct local_work;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 1ef79d0..3663fd7 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -45,8 +45,8 @@
struct mad_rmpp_recv {
struct ib_mad_agent_private *agent;
struct list_head list;
- struct work_struct timeout_work;
- struct work_struct cleanup_work;
+ struct delayed_work timeout_work;
+ struct delayed_work cleanup_work;
struct completion comp;
enum rmpp_state state;
spinlock_t lock;
@@ -233,9 +233,10 @@
}
}
-static void recv_timeout_handler(void *data)
+static void recv_timeout_handler(struct work_struct *work)
{
- struct mad_rmpp_recv *rmpp_recv = data;
+ struct mad_rmpp_recv *rmpp_recv =
+ container_of(work, struct mad_rmpp_recv, timeout_work.work);
struct ib_mad_recv_wc *rmpp_wc;
unsigned long flags;
@@ -254,9 +255,10 @@
ib_free_recv_mad(rmpp_wc);
}
-static void recv_cleanup_handler(void *data)
+static void recv_cleanup_handler(struct work_struct *work)
{
- struct mad_rmpp_recv *rmpp_recv = data;
+ struct mad_rmpp_recv *rmpp_recv =
+ container_of(work, struct mad_rmpp_recv, cleanup_work.work);
unsigned long flags;
spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
@@ -285,8 +287,8 @@
rmpp_recv->agent = agent;
init_completion(&rmpp_recv->comp);
- INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
- INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
+ INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
+ INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
spin_lock_init(&rmpp_recv->lock);
rmpp_recv->state = RMPP_STATE_ACTIVE;
atomic_set(&rmpp_recv->refcount, 1);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1706d3c..e45afba 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -360,9 +360,10 @@
kfree(sm_ah);
}
-static void update_sm_ah(void *port_ptr)
+static void update_sm_ah(struct work_struct *work)
{
- struct ib_sa_port *port = port_ptr;
+ struct ib_sa_port *port =
+ container_of(work, struct ib_sa_port, update_task);
struct ib_sa_sm_ah *new_ah, *old_ah;
struct ib_port_attr port_attr;
struct ib_ah_attr ah_attr;
@@ -992,8 +993,7 @@
if (IS_ERR(sa_dev->port[i].agent))
goto err;
- INIT_WORK(&sa_dev->port[i].update_task,
- update_sm_ah, &sa_dev->port[i]);
+ INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
}
ib_set_client_data(device, &sa_client, sa_dev);
@@ -1010,7 +1010,7 @@
goto err;
for (i = 0; i <= e - s; ++i)
- update_sm_ah(&sa_dev->port[i]);
+ update_sm_ah(&sa_dev->port[i].update_task);
return;
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index efe147d..db12cc0 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -179,9 +179,10 @@
up_write(¤t->mm->mmap_sem);
}
-static void ib_umem_account(void *work_ptr)
+static void ib_umem_account(struct work_struct *_work)
{
- struct ib_umem_account_work *work = work_ptr;
+ struct ib_umem_account_work *work =
+ container_of(_work, struct ib_umem_account_work, work);
down_write(&work->mm->mmap_sem);
work->mm->locked_vm -= work->diff;
@@ -216,7 +217,7 @@
return;
}
- INIT_WORK(&work->work, ib_umem_account, work);
+ INIT_WORK(&work->work, ib_umem_account);
work->mm = mm;
work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 413754b..8536aeb 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -214,9 +214,10 @@
unsigned long num_pages;
};
-static void user_pages_account(void *ptr)
+static void user_pages_account(struct work_struct *_work)
{
- struct ipath_user_pages_work *work = ptr;
+ struct ipath_user_pages_work *work =
+ container_of(_work, struct ipath_user_pages_work, work);
down_write(&work->mm->mmap_sem);
work->mm->locked_vm -= work->num_pages;
@@ -242,7 +243,7 @@
goto bail;
- INIT_WORK(&work->work, user_pages_account, work);
+ INIT_WORK(&work->work, user_pages_account);
work->mm = mm;
work->num_pages = num_pages;
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index cd044ea..e948158 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -57,7 +57,7 @@
module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
-static void catas_reset(void *work_ptr)
+static void catas_reset(struct work_struct *work)
{
struct mthca_dev *dev, *tmpdev;
LIST_HEAD(tlist);
@@ -203,7 +203,7 @@
int __init mthca_catas_init(void)
{
- INIT_WORK(&catas_work, catas_reset, NULL);
+ INIT_WORK(&catas_work, catas_reset);
catas_wq = create_singlethread_workqueue("mthca_catas");
if (!catas_wq)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index f2b6185..9954799 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -136,11 +136,11 @@
struct list_head multicast_list;
struct rb_root multicast_tree;
- struct work_struct pkey_task;
- struct work_struct mcast_task;
+ struct delayed_work pkey_task;
+ struct delayed_work mcast_task;
struct work_struct flush_task;
struct work_struct restart_task;
- struct work_struct ah_reap_task;
+ struct delayed_work ah_reap_task;
struct ib_device *ca;
u8 port;
@@ -254,13 +254,13 @@
void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_ah *address, u32 qpn);
-void ipoib_reap_ah(void *dev_ptr);
+void ipoib_reap_ah(struct work_struct *work);
void ipoib_flush_paths(struct net_device *dev);
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_ib_dev_flush(void *dev);
+void ipoib_ib_dev_flush(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev);
@@ -271,10 +271,10 @@
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
void ipoib_dev_cleanup(struct net_device *dev);
-void ipoib_mcast_join_task(void *dev_ptr);
+void ipoib_mcast_join_task(struct work_struct *work);
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
-void ipoib_mcast_restart_task(void *dev_ptr);
+void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
@@ -312,7 +312,7 @@
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
-void ipoib_pkey_poll(void *dev);
+void ipoib_pkey_poll(struct work_struct *work);
int ipoib_pkey_dev_delay_open(struct net_device *dev);
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 8bf5e9e..f10fba5d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -400,10 +400,11 @@
spin_unlock_irq(&priv->tx_lock);
}
-void ipoib_reap_ah(void *dev_ptr)
+void ipoib_reap_ah(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+ struct net_device *dev = priv->dev;
__ipoib_reap_ah(dev);
@@ -613,10 +614,11 @@
return 0;
}
-void ipoib_ib_dev_flush(void *_dev)
+void ipoib_ib_dev_flush(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *)_dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
+ struct ipoib_dev_priv *cpriv, *priv =
+ container_of(work, struct ipoib_dev_priv, flush_task);
+ struct net_device *dev = priv->dev;
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
@@ -638,14 +640,14 @@
*/
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ipoib_ib_dev_up(dev);
- ipoib_mcast_restart_task(dev);
+ ipoib_mcast_restart_task(&priv->restart_task);
}
mutex_lock(&priv->vlan_mutex);
/* Flush any child interfaces too */
list_for_each_entry(cpriv, &priv->child_intfs, list)
- ipoib_ib_dev_flush(cpriv->dev);
+ ipoib_ib_dev_flush(&cpriv->flush_task);
mutex_unlock(&priv->vlan_mutex);
}
@@ -672,10 +674,11 @@
* change async notification is available.
*/
-void ipoib_pkey_poll(void *dev_ptr)
+void ipoib_pkey_poll(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, pkey_task.work);
+ struct net_device *dev = priv->dev;
ipoib_pkey_dev_check_presence(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5ba3154..c092802 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -940,11 +940,11 @@
INIT_LIST_HEAD(&priv->dead_ahs);
INIT_LIST_HEAD(&priv->multicast_list);
- INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
- INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
- INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
- INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
- INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
+ INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll);
+ INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
+ INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
+ INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+ INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
}
struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d282d65..b04b72c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -399,7 +399,8 @@
mcast->backoff = 1;
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
complete(&mcast->done);
return;
@@ -435,7 +436,8 @@
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
if (status == -ETIMEDOUT)
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+ 0);
else
queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
mcast->backoff * HZ);
@@ -517,10 +519,11 @@
mcast->query_id = ret;
}
-void ipoib_mcast_join_task(void *dev_ptr)
+void ipoib_mcast_join_task(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, mcast_task.work);
+ struct net_device *dev = priv->dev;
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
return;
@@ -610,7 +613,7 @@
mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
spin_lock_irq(&priv->lock);
@@ -818,10 +821,11 @@
}
}
-void ipoib_mcast_restart_task(void *dev_ptr)
+void ipoib_mcast_restart_task(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, restart_task);
+ struct net_device *dev = priv->dev;
struct dev_mc_list *mclist;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 18a0000..693b770 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -48,7 +48,7 @@
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-static void iser_comp_error_worker(void *data);
+static void iser_comp_error_worker(struct work_struct *work);
static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
@@ -480,8 +480,7 @@
init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0);
atomic_set(&ib_conn->post_send_buf_count, 0);
- INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker,
- ib_conn);
+ INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
@@ -754,9 +753,10 @@
return ret_val;
}
-static void iser_comp_error_worker(void *data)
+static void iser_comp_error_worker(struct work_struct *work)
{
- struct iser_conn *ib_conn = data;
+ struct iser_conn *ib_conn =
+ container_of(work, struct iser_conn, comperror_work);
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 64ab5fc..a628959 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -390,9 +390,10 @@
wait_for_completion(&target->done);
}
-static void srp_remove_work(void *target_ptr)
+static void srp_remove_work(struct work_struct *work)
{
- struct srp_target_port *target = target_ptr;
+ struct srp_target_port *target =
+ container_of(work, struct srp_target_port, work);
spin_lock_irq(target->scsi_host->host_lock);
if (target->state != SRP_TARGET_DEAD) {
@@ -575,7 +576,7 @@
spin_lock_irq(target->scsi_host->host_lock);
if (target->state == SRP_TARGET_CONNECTING) {
target->state = SRP_TARGET_DEAD;
- INIT_WORK(&target->work, srp_remove_work, target);
+ INIT_WORK(&target->work, srp_remove_work);
schedule_work(&target->work);
}
spin_unlock_irq(target->scsi_host->host_lock);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index cbb9366..8451b29 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -567,9 +567,9 @@
* interrupt context.
*/
-static void atkbd_event_work(void *data)
+static void atkbd_event_work(struct work_struct *work)
{
- struct atkbd *atkbd = data;
+ struct atkbd *atkbd = container_of(work, struct atkbd, event_work);
mutex_lock(&atkbd->event_mutex);
@@ -943,7 +943,7 @@
atkbd->dev = dev;
ps2_init(&atkbd->ps2dev, serio);
- INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd);
+ INIT_WORK(&atkbd->event_work, atkbd_event_work);
mutex_init(&atkbd->event_mutex);
switch (serio->id.type) {
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 979b93e..b7f049b 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -572,9 +572,9 @@
* were in.
*/
static void
-lkkbd_reinit (void *data)
+lkkbd_reinit (struct work_struct *work)
{
- struct lkkbd *lk = data;
+ struct lkkbd *lk = container_of(work, struct lkkbd, tq);
int division;
unsigned char leds_on = 0;
unsigned char leds_off = 0;
@@ -651,7 +651,7 @@
lk->serio = serio;
lk->dev = input_dev;
- INIT_WORK (&lk->tq, lkkbd_reinit, lk);
+ INIT_WORK (&lk->tq, lkkbd_reinit);
lk->bell_volume = bell_volume;
lk->keyclick_volume = keyclick_volume;
lk->ctrlclick_volume = ctrlclick_volume;
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index cac4781..6cd887c 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -208,9 +208,9 @@
* were in.
*/
-static void sunkbd_reinit(void *data)
+static void sunkbd_reinit(struct work_struct *work)
{
- struct sunkbd *sunkbd = data;
+ struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
@@ -248,7 +248,7 @@
sunkbd->serio = serio;
sunkbd->dev = input_dev;
init_waitqueue_head(&sunkbd->wait);
- INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd);
+ INIT_WORK(&sunkbd->tq, sunkbd_reinit);
snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys);
serio_set_drvdata(serio, sunkbd);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 6f9b2c7..52bb222 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -888,9 +888,10 @@
* psmouse_resync() attempts to re-validate current protocol.
*/
-static void psmouse_resync(void *p)
+static void psmouse_resync(struct work_struct *work)
{
- struct psmouse *psmouse = p, *parent = NULL;
+ struct psmouse *parent = NULL, *psmouse =
+ container_of(work, struct psmouse, resync_work);
struct serio *serio = psmouse->ps2dev.serio;
psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
int failed = 0, enabled = 0;
@@ -1121,7 +1122,7 @@
goto out;
ps2_init(&psmouse->ps2dev, serio);
- INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
+ INIT_WORK(&psmouse->resync_work, psmouse_resync);
psmouse->dev = input_dev;
snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index e5b1b60..b3e84d3 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -251,9 +251,9 @@
* ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.)
*/
-static void ps2_execute_scheduled_command(void *data)
+static void ps2_execute_scheduled_command(struct work_struct *work)
{
- struct ps2work *ps2work = data;
+ struct ps2work *ps2work = container_of(work, struct ps2work, work);
ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command);
kfree(ps2work);
@@ -278,7 +278,7 @@
ps2work->ps2dev = ps2dev;
ps2work->command = command;
memcpy(ps2work->param, param, send);
- INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work);
+ INIT_WORK(&ps2work->work, ps2_execute_scheduled_command);
if (!schedule_work(&ps2work->work)) {
kfree(ps2work);
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c
index 6ae6eb3..946c38c 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/isdn/act2000/capi.c
@@ -627,8 +627,10 @@
}
void
-actcapi_dispatch(act2000_card *card)
+actcapi_dispatch(struct work_struct *work)
{
+ struct act2000_card *card =
+ container_of(work, struct act2000_card, rcv_tq);
struct sk_buff *skb;
actcapi_msg *msg;
__u16 ccmd;
diff --git a/drivers/isdn/act2000/capi.h b/drivers/isdn/act2000/capi.h
index 49f453c..e55f6a9 100644
--- a/drivers/isdn/act2000/capi.h
+++ b/drivers/isdn/act2000/capi.h
@@ -356,7 +356,7 @@
extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *);
extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *);
extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8);
-extern void actcapi_dispatch(act2000_card *);
+extern void actcapi_dispatch(struct work_struct *);
#ifdef DEBUG_MSG
extern void actcapi_debug_msg(struct sk_buff *skb, int);
#else
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index d89dcde..90593e2 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -192,8 +192,11 @@
}
static void
-act2000_transmit(struct act2000_card *card)
+act2000_transmit(struct work_struct *work)
{
+ struct act2000_card *card =
+ container_of(work, struct act2000_card, snd_tq);
+
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_send(card);
@@ -207,8 +210,11 @@
}
static void
-act2000_receive(struct act2000_card *card)
+act2000_receive(struct work_struct *work)
{
+ struct act2000_card *card =
+ container_of(work, struct act2000_card, poll_tq);
+
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_receive(card);
@@ -227,7 +233,7 @@
act2000_card * card = (act2000_card *)data;
unsigned long flags;
- act2000_receive(card);
+ act2000_receive(&card->poll_tq);
spin_lock_irqsave(&card->lock, flags);
mod_timer(&card->ptimer, jiffies+3);
spin_unlock_irqrestore(&card->lock, flags);
@@ -578,9 +584,9 @@
skb_queue_head_init(&card->sndq);
skb_queue_head_init(&card->rcvq);
skb_queue_head_init(&card->ackq);
- INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card);
- INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card);
- INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card);
+ INIT_WORK(&card->snd_tq, act2000_transmit);
+ INIT_WORK(&card->rcv_tq, actcapi_dispatch);
+ INIT_WORK(&card->poll_tq, act2000_receive);
init_timer(&card->ptimer);
card->interface.owner = THIS_MODULE;
card->interface.channels = ACT2000_BCH;
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 8c4fcb9..783a255 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -208,9 +208,10 @@
}
}
-static void notify_handler(void *data)
+static void notify_handler(struct work_struct *work)
{
- struct capi_notifier *np = data;
+ struct capi_notifier *np =
+ container_of(work, struct capi_notifier, work);
switch (np->cmd) {
case KCI_CONTRUP:
@@ -235,7 +236,7 @@
if (!np)
return -ENOMEM;
- INIT_WORK(&np->work, notify_handler, np);
+ INIT_WORK(&np->work, notify_handler);
np->cmd = cmd;
np->controller = controller;
np->applid = applid;
@@ -248,10 +249,11 @@
/* -------- Receiver ------------------------------------------ */
-static void recv_handler(void *_ap)
+static void recv_handler(struct work_struct *work)
{
struct sk_buff *skb;
- struct capi20_appl *ap = (struct capi20_appl *) _ap;
+ struct capi20_appl *ap =
+ container_of(work, struct capi20_appl, recv_work);
if ((!ap) || (ap->release_in_progress))
return;
@@ -527,7 +529,7 @@
ap->callback = NULL;
init_MUTEX(&ap->recv_sem);
skb_queue_head_init(&ap->recv_queue);
- INIT_WORK(&ap->recv_work, recv_handler, (void *)ap);
+ INIT_WORK(&ap->recv_work, recv_handler);
ap->release_in_progress = 0;
write_unlock_irqrestore(&application_lock, flags);
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index bec5901..3b19cae 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -232,9 +232,10 @@
static void
-Amd7930_bh(struct IsdnCardState *cs)
+Amd7930_bh(struct work_struct *work)
{
-
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *stptr;
if (!cs)
@@ -789,7 +790,7 @@
void __devinit
setup_Amd7930(struct IsdnCardState *cs)
{
- INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs);
+ INIT_WORK(&cs->tqueue, Amd7930_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 785b085..cede72c 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1137,7 +1137,6 @@
cs->tx_skb = NULL;
cs->tx_cnt = 0;
cs->event = 0;
- cs->tqueue.data = cs;
skb_queue_head_init(&cs->rq);
skb_queue_head_init(&cs->sq);
@@ -1554,7 +1553,7 @@
static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg);
static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs);
static void hisax_bc_close(struct BCState *bcs);
-static void hisax_bh(struct IsdnCardState *cs);
+static void hisax_bh(struct work_struct *work);
static void EChannel_proc_rcv(struct hisax_d_if *d_if);
int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
@@ -1586,7 +1585,7 @@
hisax_d_if->cs = cs;
cs->hw.hisax_d_if = hisax_d_if;
cs->cardmsg = hisax_cardmsg;
- INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs);
+ INIT_WORK(&cs->tqueue, hisax_bh);
cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1;
for (i = 0; i < 2; i++) {
cs->bcs[i].BC_SetStack = hisax_bc_setstack;
@@ -1618,8 +1617,10 @@
schedule_work(&cs->tqueue);
}
-static void hisax_bh(struct IsdnCardState *cs)
+static void hisax_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *st;
int pr;
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index d852c9d..de9b1a4 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1083,8 +1083,9 @@
/* bottom half handler for interrupt */
/*************************************/
static void
-hfc4s8s_bh(hfc4s8s_hw * hw)
+hfc4s8s_bh(struct work_struct *work)
{
+ hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
u_char b;
struct hfc4s8s_l1 *l1p;
volatile u_char *fifo_stat;
@@ -1550,7 +1551,7 @@
goto out;
}
- INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw);
+ INIT_WORK(&hw->tqueue, hfc4s8s_bh);
if (request_irq
(hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index 6360e82..8d98644 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -549,10 +549,11 @@
}
static void
-hfcd_bh(struct IsdnCardState *cs)
+hfcd_bh(struct work_struct *work)
{
- if (!cs)
- return;
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
+
if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
switch (cs->dc.hfcd.ph_state) {
case (0):
@@ -1072,5 +1073,5 @@
cs->dbusytimer.function = (void *) hfc_dbusy_timer;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
- INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs);
+ INIT_WORK(&cs->tqueue, hfcd_bh);
}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 93f60b5..5db0a85 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1506,8 +1506,10 @@
/* handle L1 state changes */
/***************************/
static void
-hfcpci_bh(struct IsdnCardState *cs)
+hfcpci_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
u_long flags;
// struct PStack *stptr;
@@ -1722,7 +1724,7 @@
Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
/* At this point the needed PCI config is done */
/* fifos are still not enabled */
- INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs);
+ INIT_WORK(&cs->tqueue, hfcpci_bh);
cs->setstack_d = setstack_hfcpci;
cs->BC_Send_Data = &hfcpci_send_data;
cs->readisac = NULL;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 954d153..4fd09d2 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1251,8 +1251,10 @@
/* handle L1 state changes */
/***************************/
static void
-hfcsx_bh(struct IsdnCardState *cs)
+hfcsx_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
u_long flags;
if (!cs)
@@ -1499,7 +1501,7 @@
cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
- INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs);
+ INIT_WORK(&cs->tqueue, hfcsx_bh);
cs->readisac = NULL;
cs->writeisac = NULL;
cs->readisacfifo = NULL;
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index da70692..682cac3 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -77,8 +77,10 @@
}
static void
-icc_bh(struct IsdnCardState *cs)
+icc_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *stptr;
if (!cs)
@@ -674,7 +676,7 @@
void __devinit
setup_icc(struct IsdnCardState *cs)
{
- INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs);
+ INIT_WORK(&cs->tqueue, icc_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 282f349..4e9f238 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -81,8 +81,10 @@
}
static void
-isac_bh(struct IsdnCardState *cs)
+isac_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *stptr;
if (!cs)
@@ -674,7 +676,7 @@
void __devinit
setup_isac(struct IsdnCardState *cs)
{
- INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs);
+ INIT_WORK(&cs->tqueue, isac_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
cs->dbusytimer.data = (long) cs;
init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 674af67..6f1a658 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -437,8 +437,10 @@
#define B_LL_OK 10
static void
-isar_bh(struct BCState *bcs)
+isar_bh(struct work_struct *work)
{
+ struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
BChannel_bh(bcs);
if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event))
ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR);
@@ -1580,7 +1582,7 @@
cs->bcs[i].mode = 0;
cs->bcs[i].hw.isar.dpath = i + 1;
modeisar(&cs->bcs[i], 0, 0);
- INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]);
+ INIT_WORK(&cs->bcs[i].tqueue, isar_bh);
}
}
diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c
index bab3568..a14204e 100644
--- a/drivers/isdn/hisax/isdnl1.c
+++ b/drivers/isdn/hisax/isdnl1.c
@@ -315,8 +315,10 @@
}
void
-BChannel_bh(struct BCState *bcs)
+BChannel_bh(struct work_struct *work)
{
+ struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
if (!bcs)
return;
if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event))
@@ -362,7 +364,7 @@
bcs->cs = cs;
bcs->channel = bc;
- INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs);
+ INIT_WORK(&bcs->tqueue, BChannel_bh);
spin_lock_init(&bcs->aclock);
bcs->BC_SetStack = NULL;
bcs->BC_Close = NULL;
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index 1655341..3aeceaf 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -101,8 +101,10 @@
}
static void
-W6692_bh(struct IsdnCardState *cs)
+W6692_bh(struct work_struct *work)
{
+ struct IsdnCardState *cs =
+ container_of(work, struct IsdnCardState, tqueue);
struct PStack *stptr;
if (!cs)
@@ -1070,7 +1072,7 @@
id_list[cs->subtyp].card_name, cs->irq,
cs->hw.w6692.iobase);
- INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs);
+ INIT_WORK(&cs->tqueue, W6692_bh);
cs->readW6692 = &ReadW6692;
cs->writeW6692 = &WriteW6692;
cs->readisacfifo = &ReadISACfifo;
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f8d6ae6..2e4daeb 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -984,9 +984,9 @@
/*
* called from tq_immediate
*/
-static void isdn_net_softint(void *private)
+static void isdn_net_softint(struct work_struct *work)
{
- isdn_net_local *lp = private;
+ isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
struct sk_buff *skb;
spin_lock_bh(&lp->xmit_lock);
@@ -2596,7 +2596,7 @@
netdev->local->netdev = netdev;
netdev->local->next = netdev->local;
- INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local);
+ INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
spin_lock_init(&netdev->local->xmit_lock);
netdev->local->isdn_device = -1;
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 6ead5e1..1966f34 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -68,8 +68,6 @@
static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
-extern void pcbit_deliver(void * data);
-
int pcbit_init_dev(int board, int mem_base, int irq)
{
struct pcbit_dev *dev;
@@ -129,7 +127,7 @@
memset(dev->b2, 0, sizeof(struct pcbit_chan));
dev->b2->id = 1;
- INIT_WORK(&dev->qdelivery, pcbit_deliver, dev);
+ INIT_WORK(&dev->qdelivery, pcbit_deliver);
/*
* interrupts
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index 937fd21..0c9f6df 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -67,7 +67,6 @@
* Prototypes
*/
-void pcbit_deliver(void *data);
static void pcbit_transmit(struct pcbit_dev *dev);
static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack);
@@ -299,11 +298,12 @@
*/
void
-pcbit_deliver(void *data)
+pcbit_deliver(struct work_struct *work)
{
struct frame_buf *frame;
unsigned long flags, msg;
- struct pcbit_dev *dev = (struct pcbit_dev *) data;
+ struct pcbit_dev *dev =
+ container_of(work, struct pcbit_dev, qdelivery);
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/isdn/pcbit/pcbit.h b/drivers/isdn/pcbit/pcbit.h
index 388bace..19c18e8 100644
--- a/drivers/isdn/pcbit/pcbit.h
+++ b/drivers/isdn/pcbit/pcbit.h
@@ -166,4 +166,6 @@
#define L2_RUNNING 5
#define L2_ERROR 6
+extern void pcbit_deliver(struct work_struct *work);
+
#endif
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index f1b6f56..5ed41fe 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -48,7 +48,8 @@
} ____cacheline_aligned;
struct rackmeter_cpu {
- struct work_struct sniffer;
+ struct delayed_work sniffer;
+ struct rackmeter *rm;
cputime64_t prev_wall;
cputime64_t prev_idle;
int zero;
@@ -208,11 +209,12 @@
rackmeter_do_pause(rm, 0);
}
-static void rackmeter_do_timer(void *data)
+static void rackmeter_do_timer(struct work_struct *work)
{
- struct rackmeter *rm = data;
+ struct rackmeter_cpu *rcpu =
+ container_of(work, struct rackmeter_cpu, sniffer.work);
+ struct rackmeter *rm = rcpu->rm;
unsigned int cpu = smp_processor_id();
- struct rackmeter_cpu *rcpu = &rm->cpu[cpu];
cputime64_t cur_jiffies, total_idle_ticks;
unsigned int total_ticks, idle_ticks;
int i, offset, load, cumm, pause;
@@ -263,8 +265,10 @@
* on those machines yet
*/
- INIT_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer, rm);
- INIT_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer, rm);
+ rm->cpu[0].rm = rm;
+ INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
+ rm->cpu[1].rm = rm;
+ INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
for_each_online_cpu(cpu) {
struct rackmeter_cpu *rcpu;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 4f724cd..6dde27a 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -601,7 +601,7 @@
* sysfs visibility
*/
-static void smu_expose_childs(void *unused)
+static void smu_expose_childs(struct work_struct *unused)
{
struct device_node *np;
@@ -611,7 +611,7 @@
&smu->of_dev->dev);
}
-static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
+static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
static int smu_platform_probe(struct of_device* dev,
const struct of_device_id *match)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 08a40f4..ed2d4ef 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -458,11 +458,11 @@
* interrupt context.
*/
static struct workqueue_struct *_kcryptd_workqueue;
-static void kcryptd_do_work(void *data);
+static void kcryptd_do_work(struct work_struct *work);
static void kcryptd_queue_io(struct crypt_io *io)
{
- INIT_WORK(&io->work, kcryptd_do_work, io);
+ INIT_WORK(&io->work, kcryptd_do_work);
queue_work(_kcryptd_workqueue, &io->work);
}
@@ -618,9 +618,9 @@
dec_pending(io, crypt_convert(cc, &ctx));
}
-static void kcryptd_do_work(void *data)
+static void kcryptd_do_work(struct work_struct *work)
{
- struct crypt_io *io = data;
+ struct crypt_io *io = container_of(work, struct crypt_io, work);
if (io->post_process)
process_read_endio(io);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d754e0b..e77ee6f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -104,8 +104,8 @@
static kmem_cache_t *_mpio_cache;
struct workqueue_struct *kmultipathd;
-static void process_queued_ios(void *data);
-static void trigger_event(void *data);
+static void process_queued_ios(struct work_struct *work);
+static void trigger_event(struct work_struct *work);
/*-----------------------------------------------
@@ -173,8 +173,8 @@
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
m->queue_io = 1;
- INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
- INIT_WORK(&m->trigger_event, trigger_event, m);
+ INIT_WORK(&m->process_queued_ios, process_queued_ios);
+ INIT_WORK(&m->trigger_event, trigger_event);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
@@ -379,9 +379,10 @@
}
}
-static void process_queued_ios(void *data)
+static void process_queued_ios(struct work_struct *work)
{
- struct multipath *m = (struct multipath *) data;
+ struct multipath *m =
+ container_of(work, struct multipath, process_queued_ios);
struct hw_handler *hwh = &m->hw_handler;
struct pgpath *pgpath = NULL;
unsigned init_required = 0, must_queue = 1;
@@ -421,9 +422,10 @@
* An event is triggered whenever a path is taken out of use.
* Includes path failure and PG bypass.
*/
-static void trigger_event(void *data)
+static void trigger_event(struct work_struct *work)
{
- struct multipath *m = (struct multipath *) data;
+ struct multipath *m =
+ container_of(work, struct multipath, trigger_event);
dm_table_event(m->ti->table);
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 48a653b..fc8cbb1 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -883,7 +883,7 @@
do_writes(ms, &writes);
}
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
{
struct mirror_set *ms;
@@ -1269,7 +1269,7 @@
dm_dirty_log_exit();
return r;
}
- INIT_WORK(&_kmirrord_work, do_work, NULL);
+ INIT_WORK(&_kmirrord_work, do_work);
r = dm_register_target(&mirror_target);
if (r < 0) {
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 5281e00..91c7aa1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,7 +40,7 @@
#define SNAPSHOT_PAGES 256
struct workqueue_struct *ksnapd;
-static void flush_queued_bios(void *data);
+static void flush_queued_bios(struct work_struct *work);
struct pending_exception {
struct exception e;
@@ -528,7 +528,7 @@
}
bio_list_init(&s->queued_bios);
- INIT_WORK(&s->queued_bios_work, flush_queued_bios, s);
+ INIT_WORK(&s->queued_bios_work, flush_queued_bios);
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
@@ -603,9 +603,10 @@
}
}
-static void flush_queued_bios(void *data)
+static void flush_queued_bios(struct work_struct *work)
{
- struct dm_snapshot *s = (struct dm_snapshot *) data;
+ struct dm_snapshot *s =
+ container_of(work, struct dm_snapshot, queued_bios_work);
struct bio *queued_bios;
unsigned long flags;
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index f1db6ef..b3c0149 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -417,7 +417,7 @@
/*
* kcopyd does this every time it's woken up.
*/
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
{
/*
* The order that these are called is *very* important.
@@ -628,7 +628,7 @@
}
kcopyd_clients++;
- INIT_WORK(&_kcopyd_work, do_work, NULL);
+ INIT_WORK(&_kcopyd_work, do_work);
mutex_unlock(&kcopyd_init_lock);
return 0;
}
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c
index 0689324..6e16680 100644
--- a/drivers/media/dvb/b2c2/flexcop-pci.c
+++ b/drivers/media/dvb/b2c2/flexcop-pci.c
@@ -63,7 +63,7 @@
unsigned long last_irq;
- struct work_struct irq_check_work;
+ struct delayed_work irq_check_work;
struct flexcop_device *fc_dev;
};
@@ -97,9 +97,10 @@
return 0;
}
-static void flexcop_pci_irq_check_work(void *data)
+static void flexcop_pci_irq_check_work(struct work_struct *work)
{
- struct flexcop_pci *fc_pci = data;
+ struct flexcop_pci *fc_pci =
+ container_of(work, struct flexcop_pci, irq_check_work.work);
struct flexcop_device *fc = fc_pci->fc_dev;
flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714);
@@ -371,7 +372,7 @@
if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
goto err_fc_exit;
- INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci);
+ INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
return ret;
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index 8a7dd50..206c13e 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -128,7 +128,7 @@
struct dvbt_set_parameters_msg param;
struct dvbt_get_status_msg status;
- struct work_struct query_work;
+ struct delayed_work query_work;
wait_queue_head_t poll_wq;
int pending_fe_events;
@@ -142,7 +142,7 @@
#ifdef ENABLE_RC
struct input_dev *rc_input_dev;
char phys[64];
- struct work_struct rc_query_work;
+ struct delayed_work rc_query_work;
int rc_input_event;
u32 rc_last_code;
unsigned long last_event_jiffies;
@@ -723,9 +723,10 @@
#ifdef ENABLE_RC
-static void cinergyt2_query_rc (void *data)
+static void cinergyt2_query_rc (struct work_struct *work)
{
- struct cinergyt2 *cinergyt2 = data;
+ struct cinergyt2 *cinergyt2 =
+ container_of(work, struct cinergyt2, rc_query_work.work);
char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
struct cinergyt2_rc_event rc_events[12];
int n, len, i;
@@ -806,7 +807,7 @@
strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
cinergyt2->rc_input_event = KEY_MAX;
cinergyt2->rc_last_code = ~0;
- INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2);
+ INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
input_dev->name = DRIVER_NAME " remote control";
input_dev->phys = cinergyt2->phys;
@@ -847,9 +848,10 @@
#endif /* ENABLE_RC */
-static void cinergyt2_query (void *data)
+static void cinergyt2_query (struct work_struct *work)
{
- struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data;
+ struct cinergyt2 *cinergyt2 =
+ container_of(work, struct cinergyt2, query_work.work);
char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
struct dvbt_get_status_msg *s = &cinergyt2->status;
uint8_t lock_bits;
@@ -893,7 +895,7 @@
mutex_init(&cinergyt2->sem);
init_waitqueue_head (&cinergyt2->poll_wq);
- INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2);
+ INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
cinergyt2->udev = interface_to_usbdev(intf);
cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8859ab7..ebf4dc5 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -127,6 +127,7 @@
int in_use;
struct net_device_stats stats;
u16 pid;
+ struct net_device *net;
struct dvb_net *host;
struct dmx_demux *demux;
struct dmx_section_feed *secfeed;
@@ -1123,10 +1124,11 @@
}
-static void wq_set_multicast_list (void *data)
+static void wq_set_multicast_list (struct work_struct *work)
{
- struct net_device *dev = data;
- struct dvb_net_priv *priv = dev->priv;
+ struct dvb_net_priv *priv =
+ container_of(work, struct dvb_net_priv, set_multicast_list_wq);
+ struct net_device *dev = priv->net;
dvb_net_feed_stop(dev);
priv->rx_mode = RX_MODE_UNI;
@@ -1167,9 +1169,11 @@
}
-static void wq_restart_net_feed (void *data)
+static void wq_restart_net_feed (struct work_struct *work)
{
- struct net_device *dev = data;
+ struct dvb_net_priv *priv =
+ container_of(work, struct dvb_net_priv, restart_net_feed_wq);
+ struct net_device *dev = priv->net;
if (netif_running(dev)) {
dvb_net_feed_stop(dev);
@@ -1276,6 +1280,7 @@
dvbnet->device[if_num] = net;
priv = net->priv;
+ priv->net = net;
priv->demux = dvbnet->demux;
priv->pid = pid;
priv->rx_mode = RX_MODE_UNI;
@@ -1284,8 +1289,8 @@
priv->feedtype = feedtype;
reset_ule(priv);
- INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net);
- INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net);
+ INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
+ INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
mutex_init(&priv->mutex);
net->base_addr = pid;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 0a3a0b6..794e447 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -13,9 +13,10 @@
*
* TODO: Fix the repeat rate of the input device.
*/
-static void dvb_usb_read_remote_control(void *data)
+static void dvb_usb_read_remote_control(struct work_struct *work)
{
- struct dvb_usb_device *d = data;
+ struct dvb_usb_device *d =
+ container_of(work, struct dvb_usb_device, rc_query_work.work);
u32 event;
int state;
@@ -128,7 +129,7 @@
input_register_device(d->rc_input_dev);
- INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d);
+ INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control);
info("schedule remote query interval to %d msecs.", d->props.rc_interval);
schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval));
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 376c45a..0d721731 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -369,7 +369,7 @@
/* remote control */
struct input_dev *rc_input_dev;
char rc_phys[64];
- struct work_struct rc_query_work;
+ struct delayed_work rc_query_work;
u32 last_event;
int last_state;
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index 41f4b8d..b12cec9 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -82,6 +82,8 @@
struct pardevice *pdev;
struct parport *port;
struct work_struct cb_task;
+ void (*cb_func)(void *cbdata);
+ void *cb_data;
int open_count;
wait_queue_head_t wq_stream;
/* image state flags */
@@ -130,6 +132,20 @@
#define PARPORT_CHUNK_SIZE PAGE_SIZE
+static void cpia_pp_run_callback(struct work_struct *work)
+{
+ void (*cb_func)(void *cbdata);
+ void *cb_data;
+ struct pp_cam_entry *cam;
+
+ cam = container_of(work, struct pp_cam_entry, cb_task);
+ cb_func = cam->cb_func;
+ cb_data = cam->cb_data;
+ work_release(work);
+
+ cb_func(cb_data);
+}
+
/****************************************************************************
*
* CPiA-specific low-level parport functions for nibble uploads
@@ -664,7 +680,9 @@
int retval = 0;
if(cam->port->irq != PARPORT_IRQ_NONE) {
- INIT_WORK(&cam->cb_task, cb, cbdata);
+ cam->cb_func = cb;
+ cam->cb_data = cbdata;
+ INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback);
} else {
retval = -1;
}
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 57e1c02..e60a0a5 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -145,9 +145,9 @@
schedule_work(&ir->work);
}
-static void cx88_ir_work(void *data)
+static void cx88_ir_work(struct work_struct *work)
{
- struct cx88_IR *ir = data;
+ struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
unsigned long timeout;
cx88_ir_handle_key(ir);
@@ -308,7 +308,7 @@
core->ir = ir;
if (ir->polling) {
- INIT_WORK(&ir->work, cx88_ir_work, ir);
+ INIT_WORK(&ir->work, cx88_ir_work);
init_timer(&ir->timer);
ir->timer.function = ir_timer;
ir->timer.data = (unsigned long)ir;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 1457b16..ab87e7b 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -268,9 +268,9 @@
schedule_work(&ir->work);
}
-static void ir_work(void *data)
+static void ir_work(struct work_struct *work)
{
- struct IR_i2c *ir = data;
+ struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
ir_key_poll(ir);
mod_timer(&ir->timer, jiffies+HZ/10);
}
@@ -400,7 +400,7 @@
ir->input->name,ir->input->phys,adap->name);
/* start polling via eventd */
- INIT_WORK(&ir->work, ir_work, ir);
+ INIT_WORK(&ir->work, ir_work);
init_timer(&ir->timer);
ir->timer.function = ir_timer;
ir->timer.data = (unsigned long)ir;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
index f129f31..cf12974 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -45,16 +45,21 @@
}
-static void pvr2_context_poll(struct pvr2_context *mp)
+static void pvr2_context_poll(struct work_struct *work)
{
+ struct pvr2_context *mp =
+ container_of(work, struct pvr2_context, workpoll);
pvr2_context_enter(mp); do {
pvr2_hdw_poll(mp->hdw);
} while (0); pvr2_context_exit(mp);
}
-static void pvr2_context_setup(struct pvr2_context *mp)
+static void pvr2_context_setup(struct work_struct *work)
{
+ struct pvr2_context *mp =
+ container_of(work, struct pvr2_context, workinit);
+
pvr2_context_enter(mp); do {
if (!pvr2_hdw_dev_ok(mp->hdw)) break;
pvr2_hdw_setup(mp->hdw);
@@ -92,8 +97,8 @@
}
mp->workqueue = create_singlethread_workqueue("pvrusb2");
- INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp);
- INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp);
+ INIT_WORK(&mp->workinit, pvr2_context_setup);
+ INIT_WORK(&mp->workpoll, pvr2_context_poll);
queue_work(mp->workqueue,&mp->workinit);
done:
return mp;
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index 7b9859c..92eabf8 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -324,9 +324,9 @@
schedule_work(&s->work);
}
-static void saa6588_work(void *data)
+static void saa6588_work(struct work_struct *work)
{
- struct saa6588 *s = (struct saa6588 *)data;
+ struct saa6588 *s = container_of(work, struct saa6588, work);
saa6588_i2c_poll(s);
mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
@@ -419,7 +419,7 @@
saa6588_configure(s);
/* start polling via eventd */
- INIT_WORK(&s->work, saa6588_work, s);
+ INIT_WORK(&s->work, saa6588_work);
init_timer(&s->timer);
s->timer.function = saa6588_timer;
s->timer.data = (unsigned long)s;
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 65d0440..daaae87 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -343,9 +343,10 @@
.minor = -1,
};
-static void empress_signal_update(void* data)
+static void empress_signal_update(struct work_struct *work)
{
- struct saa7134_dev* dev = (struct saa7134_dev*) data;
+ struct saa7134_dev* dev =
+ container_of(work, struct saa7134_dev, empress_workqueue);
if (dev->nosignal) {
dprintk("no video signal\n");
@@ -378,7 +379,7 @@
"%s empress (%s)", dev->name,
saa7134_boards[dev->board].name);
- INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev);
+ INIT_WORK(&dev->empress_workqueue, empress_signal_update);
err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
empress_nr[dev->nr]);
@@ -399,7 +400,7 @@
sizeof(struct saa7134_buf),
dev);
- empress_signal_update(dev);
+ empress_signal_update(&dev->empress_workqueue);
return 0;
}
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 1dd4917..ef2b55e 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1018,9 +1018,10 @@
}
static void
-mptfc_setup_reset(void *arg)
+mptfc_setup_reset(struct work_struct *work)
{
- MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+ MPT_ADAPTER *ioc =
+ container_of(work, MPT_ADAPTER, fc_setup_reset_work);
u64 pn;
struct mptfc_rport_info *ri;
@@ -1043,9 +1044,10 @@
}
static void
-mptfc_rescan_devices(void *arg)
+mptfc_rescan_devices(struct work_struct *work)
{
- MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+ MPT_ADAPTER *ioc =
+ container_of(work, MPT_ADAPTER, fc_rescan_work);
int ii;
u64 pn;
struct mptfc_rport_info *ri;
@@ -1154,8 +1156,8 @@
}
spin_lock_init(&ioc->fc_rescan_work_lock);
- INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc);
- INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc);
+ INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
+ INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
spin_lock_irqsave(&ioc->FreeQlock, flags);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 314c3a2..b7c4407 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -111,7 +111,8 @@
u32 total_received;
struct net_device_stats stats; /* Per device statistics */
- struct work_struct post_buckets_task;
+ struct delayed_work post_buckets_task;
+ struct net_device *dev;
unsigned long post_buckets_active;
};
@@ -132,7 +133,7 @@
static int mpt_lan_open(struct net_device *dev);
static int mpt_lan_reset(struct net_device *dev);
static int mpt_lan_close(struct net_device *dev);
-static void mpt_lan_post_receive_buckets(void *dev_id);
+static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
int priority);
static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
@@ -345,7 +346,7 @@
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
} else {
- mpt_lan_post_receive_buckets(dev);
+ mpt_lan_post_receive_buckets(priv);
netif_wake_queue(dev);
}
@@ -441,7 +442,7 @@
dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
- mpt_lan_post_receive_buckets(dev);
+ mpt_lan_post_receive_buckets(priv);
printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
@@ -854,7 +855,7 @@
if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
if (priority) {
- schedule_work(&priv->post_buckets_task);
+ schedule_delayed_work(&priv->post_buckets_task, 0);
} else {
schedule_delayed_work(&priv->post_buckets_task, 1);
dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
@@ -1188,10 +1189,9 @@
/* Simple SGE's only at the moment */
static void
-mpt_lan_post_receive_buckets(void *dev_id)
+mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
{
- struct net_device *dev = dev_id;
- struct mpt_lan_priv *priv = dev->priv;
+ struct net_device *dev = priv->dev;
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
MPT_FRAME_HDR *mf;
LANReceivePostRequest_t *pRecvReq;
@@ -1335,6 +1335,13 @@
clear_bit(0, &priv->post_buckets_active);
}
+static void
+mpt_lan_post_receive_buckets_work(struct work_struct *work)
+{
+ mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
+ post_buckets_task.work));
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static struct net_device *
mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
@@ -1350,11 +1357,13 @@
priv = netdev_priv(dev);
+ priv->dev = dev;
priv->mpt_dev = mpt_dev;
priv->pnum = pnum;
- memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
- INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
+ memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
+ INIT_DELAYED_WORK(&priv->post_buckets_task,
+ mpt_lan_post_receive_buckets_work);
priv->post_buckets_active = 0;
dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b752a47..4f0c530 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2006,9 +2006,10 @@
*(Mutex LOCKED)
*/
static void
-mptsas_discovery_work(void * arg)
+mptsas_discovery_work(struct work_struct *work)
{
- struct mptsas_discovery_event *ev = arg;
+ struct mptsas_discovery_event *ev =
+ container_of(work, struct mptsas_discovery_event, work);
MPT_ADAPTER *ioc = ev->ioc;
mutex_lock(&ioc->sas_discovery_mutex);
@@ -2068,9 +2069,9 @@
* Work queue thread to clear the persitency table
*/
static void
-mptsas_persist_clear_table(void * arg)
+mptsas_persist_clear_table(struct work_struct *work)
{
- MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+ MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
}
@@ -2093,9 +2094,10 @@
* Work queue thread to handle SAS hotplug events
*/
static void
-mptsas_hotplug_work(void *arg)
+mptsas_hotplug_work(struct work_struct *work)
{
- struct mptsas_hotplug_event *ev = arg;
+ struct mptsas_hotplug_event *ev =
+ container_of(work, struct mptsas_hotplug_event, work);
MPT_ADAPTER *ioc = ev->ioc;
struct mptsas_phyinfo *phy_info;
struct sas_rphy *rphy;
@@ -2341,7 +2343,7 @@
break;
}
- INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+ INIT_WORK(&ev->work, mptsas_hotplug_work);
ev->ioc = ioc;
ev->handle = le16_to_cpu(sas_event_data->DevHandle);
ev->parent_handle =
@@ -2366,7 +2368,7 @@
* Persistent table is full.
*/
INIT_WORK(&ioc->sas_persist_task,
- mptsas_persist_clear_table, (void *)ioc);
+ mptsas_persist_clear_table);
schedule_work(&ioc->sas_persist_task);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
@@ -2395,7 +2397,7 @@
return;
}
- INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+ INIT_WORK(&ev->work, mptsas_hotplug_work);
ev->ioc = ioc;
ev->id = raid_event_data->VolumeID;
ev->event_type = MPTSAS_IGNORE_EVENT;
@@ -2474,7 +2476,7 @@
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (!ev)
return;
- INIT_WORK(&ev->work, mptsas_discovery_work, ev);
+ INIT_WORK(&ev->work, mptsas_discovery_work);
ev->ioc = ioc;
schedule_work(&ev->work);
};
@@ -2511,8 +2513,7 @@
break;
case MPI_EVENT_PERSISTENT_TABLE_FULL:
INIT_WORK(&ioc->sas_persist_task,
- mptsas_persist_clear_table,
- (void *)ioc);
+ mptsas_persist_clear_table);
schedule_work(&ioc->sas_persist_task);
break;
case MPI_EVENT_SAS_DISCOVERY:
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e4cc3dd..f422c0d 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -646,9 +646,10 @@
int disk;
};
-static void mpt_work_wrapper(void *data)
+static void mpt_work_wrapper(struct work_struct *work)
{
- struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+ struct work_queue_wrapper *wqw =
+ container_of(work, struct work_queue_wrapper, work);
struct _MPT_SCSI_HOST *hd = wqw->hd;
struct Scsi_Host *shost = hd->ioc->sh;
struct scsi_device *sdev;
@@ -695,7 +696,7 @@
disk);
return;
}
- INIT_WORK(&wqw->work, mpt_work_wrapper, wqw);
+ INIT_WORK(&wqw->work, mpt_work_wrapper);
wqw->hd = hd;
wqw->disk = disk;
@@ -784,9 +785,10 @@
* renegotiate for a given target
*/
static void
-mptspi_dv_renegotiate_work(void *data)
+mptspi_dv_renegotiate_work(struct work_struct *work)
{
- struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+ struct work_queue_wrapper *wqw =
+ container_of(work, struct work_queue_wrapper, work);
struct _MPT_SCSI_HOST *hd = wqw->hd;
struct scsi_device *sdev;
@@ -804,7 +806,7 @@
if (!wqw)
return;
- INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
+ INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
wqw->hd = hd;
schedule_work(&wqw->work);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 6413022..7fc7399 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -232,7 +232,7 @@
break;
}
- INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt);
+ INIT_WORK(&evt->work, drv->event);
queue_work(drv->event_queue, &evt->work);
return 1;
}
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index a235064..9e529d8 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -371,8 +371,10 @@
* new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
* again, otherwise send LCT NOTIFY to get informed on next LCT change.
*/
-static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work)
+static void i2o_exec_lct_modified(struct work_struct *_work)
{
+ struct i2o_exec_lct_notify_work *work =
+ container_of(_work, struct i2o_exec_lct_notify_work, work);
u32 change_ind = 0;
struct i2o_controller *c = work->c;
@@ -439,8 +441,7 @@
work->c = c;
- INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified,
- work);
+ INIT_WORK(&work->work, i2o_exec_lct_modified);
queue_work(i2o_exec_driver.event_queue, &work->work);
return 1;
}
@@ -460,13 +461,15 @@
/**
* i2o_exec_event - Event handling function
- * @evt: Event which occurs
+ * @work: Work item in occurring event
*
* Handles events send by the Executive device. At the moment does not do
* anything useful.
*/
-static void i2o_exec_event(struct i2o_event *evt)
+static void i2o_exec_event(struct work_struct *work)
{
+ struct i2o_event *evt = container_of(work, struct i2o_event, work);
+
if (likely(evt->i2o_dev))
osm_debug("Event received from device: %d\n",
evt->i2o_dev->lct_data.tid);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index eaba81b..70ae002 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -419,16 +419,18 @@
/**
* i2o_block_delayed_request_fn - delayed request queue function
- * delayed_request: the delayed request with the queue to start
+ * @work: the delayed request with the queue to start
*
* If the request queue is stopped for a disk, and there is no open
* request, a new event is created, which calls this function to start
* the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
* be started again.
*/
-static void i2o_block_delayed_request_fn(void *delayed_request)
+static void i2o_block_delayed_request_fn(struct work_struct *work)
{
- struct i2o_block_delayed_request *dreq = delayed_request;
+ struct i2o_block_delayed_request *dreq =
+ container_of(work, struct i2o_block_delayed_request,
+ work.work);
struct request_queue *q = dreq->queue;
unsigned long flags;
@@ -538,8 +540,9 @@
return 1;
};
-static void i2o_block_event(struct i2o_event *evt)
+static void i2o_block_event(struct work_struct *work)
{
+ struct i2o_event *evt = container_of(work, struct i2o_event, work);
osm_debug("event received\n");
kfree(evt);
};
@@ -938,8 +941,8 @@
continue;
dreq->queue = q;
- INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
- dreq);
+ INIT_DELAYED_WORK(&dreq->work,
+ i2o_block_delayed_request_fn);
if (!queue_delayed_work(i2o_block_driver.event_queue,
&dreq->work,
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
index 4fdaa5b..d9fdc95 100644
--- a/drivers/message/i2o/i2o_block.h
+++ b/drivers/message/i2o/i2o_block.h
@@ -96,7 +96,7 @@
/* I2O Block device delayed request */
struct i2o_block_delayed_request {
- struct work_struct work;
+ struct delayed_work work;
struct request_queue *queue;
};
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 1ba8754..2ab7add 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -33,9 +33,10 @@
spin_unlock_irqrestore(&fm->lock, flags);
}
-static void tifm_7xx1_remove_media(void *adapter)
+static void tifm_7xx1_remove_media(struct work_struct *work)
{
- struct tifm_adapter *fm = adapter;
+ struct tifm_adapter *fm =
+ container_of(work, struct tifm_adapter, media_remover);
unsigned long flags;
int cnt;
struct tifm_dev *sock;
@@ -169,9 +170,10 @@
return base_addr + ((sock_num + 1) << 10);
}
-static void tifm_7xx1_insert_media(void *adapter)
+static void tifm_7xx1_insert_media(struct work_struct *work)
{
- struct tifm_adapter *fm = adapter;
+ struct tifm_adapter *fm =
+ container_of(work, struct tifm_adapter, media_inserter);
unsigned long flags;
tifm_media_id media_id;
char *card_name = "xx";
@@ -261,7 +263,7 @@
spin_unlock_irqrestore(&fm->lock, flags);
flush_workqueue(fm->wq);
- tifm_7xx1_remove_media(fm);
+ tifm_7xx1_remove_media(&fm->media_remover);
pci_set_power_state(dev, PCI_D3hot);
pci_disable_device(dev);
@@ -328,8 +330,8 @@
if (!fm->sockets)
goto err_out_free;
- INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm);
- INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm);
+ INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
+ INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
fm->eject = tifm_7xx1_eject;
pci_set_drvdata(dev, fm);
@@ -384,7 +386,7 @@
flush_workqueue(fm->wq);
- tifm_7xx1_remove_media(fm);
+ tifm_7xx1_remove_media(&fm->media_remover);
writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
free_irq(dev->irq, fm);
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 9d19002..6f2a282 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -1419,18 +1419,16 @@
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
- if (delay)
- mmc_schedule_delayed_work(&host->detect, delay);
- else
- mmc_schedule_work(&host->detect);
+ mmc_schedule_delayed_work(&host->detect, delay);
}
EXPORT_SYMBOL(mmc_detect_change);
-static void mmc_rescan(void *data)
+static void mmc_rescan(struct work_struct *work)
{
- struct mmc_host *host = data;
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, detect.work);
struct list_head *l, *n;
unsigned char power_mode;
@@ -1513,7 +1511,7 @@
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_LIST_HEAD(&host->cards);
- INIT_WORK(&host->detect, mmc_rescan, host);
+ INIT_DELAYED_WORK(&host->detect, mmc_rescan);
/*
* By default, hosts do not support SGIO or large requests.
@@ -1611,7 +1609,7 @@
*/
int mmc_resume_host(struct mmc_host *host)
{
- mmc_rescan(host);
+ mmc_rescan(&host->detect.work);
return 0;
}
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
index cd5e0ab..149affe 100644
--- a/drivers/mmc/mmc.h
+++ b/drivers/mmc/mmc.h
@@ -20,6 +20,6 @@
void mmc_free_host_sysfs(struct mmc_host *host);
int mmc_schedule_work(struct work_struct *work);
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay);
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay);
void mmc_flush_scheduled_work(void);
#endif
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index ac53296..e334acd 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -321,17 +321,9 @@
static struct workqueue_struct *workqueue;
/*
- * Internal function. Schedule work in the MMC work queue.
- */
-int mmc_schedule_work(struct work_struct *work)
-{
- return queue_work(workqueue, work);
-}
-
-/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
{
return queue_delayed_work(workqueue, work, delay);
}
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index 0fdc55b..e846499 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -99,7 +99,7 @@
struct mmc_request *req;
struct work_struct cmd_handler;
- struct work_struct abort_handler;
+ struct delayed_work abort_handler;
wait_queue_head_t can_eject;
size_t written_blocks;
@@ -496,9 +496,9 @@
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_end_cmd(void *data)
+static void tifm_sd_end_cmd(struct work_struct *work)
{
- struct tifm_sd *host = data;
+ struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct mmc_request *mrq;
@@ -608,9 +608,9 @@
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_end_cmd_nodma(void *data)
+static void tifm_sd_end_cmd_nodma(struct work_struct *work)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct mmc_request *mrq;
@@ -661,11 +661,14 @@
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_abort(void *data)
+static void tifm_sd_abort(struct work_struct *work)
{
+ struct tifm_sd *host =
+ container_of(work, struct tifm_sd, abort_handler.work);
+
printk(KERN_ERR DRIVER_NAME
": card failed to respond for a long period of time");
- tifm_eject(((struct tifm_sd*)data)->dev);
+ tifm_eject(host->dev);
}
static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -762,9 +765,9 @@
.get_ro = tifm_sd_ro
};
-static void tifm_sd_register_host(void *data)
+static void tifm_sd_register_host(struct work_struct *work)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
unsigned long flags;
@@ -772,8 +775,7 @@
spin_lock_irqsave(&sock->lock, flags);
host->flags |= HOST_REG;
PREPARE_WORK(&host->cmd_handler,
- no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
- data);
+ no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd);
spin_unlock_irqrestore(&sock->lock, flags);
dev_dbg(&sock->dev, "adding host\n");
mmc_add_host(mmc);
@@ -799,8 +801,8 @@
host->dev = sock;
host->clk_div = 61;
init_waitqueue_head(&host->can_eject);
- INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host);
- INIT_WORK(&host->abort_handler, tifm_sd_abort, host);
+ INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
+ INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
tifm_set_drvdata(sock, mmc);
sock->signal_irq = tifm_sd_signal_irq;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index d02ed51..931028f 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -594,7 +594,7 @@
u32 rx_config;
struct rtl_extra_stats xstats;
- struct work_struct thread;
+ struct delayed_work thread;
struct mii_if_info mii;
unsigned int regs_len;
@@ -636,8 +636,8 @@
static void rtl8139_set_rx_mode (struct net_device *dev);
static void __set_rx_mode (struct net_device *dev);
static void rtl8139_hw_start (struct net_device *dev);
-static void rtl8139_thread (void *_data);
-static void rtl8139_tx_timeout_task(void *_data);
+static void rtl8139_thread (struct work_struct *work);
+static void rtl8139_tx_timeout_task(struct work_struct *work);
static const struct ethtool_ops rtl8139_ethtool_ops;
/* write MMIO register, with flush */
@@ -1010,7 +1010,7 @@
(debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
spin_lock_init (&tp->lock);
spin_lock_init (&tp->rx_lock);
- INIT_WORK(&tp->thread, rtl8139_thread, dev);
+ INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
tp->mii.dev = dev;
tp->mii.mdio_read = mdio_read;
tp->mii.mdio_write = mdio_write;
@@ -1596,15 +1596,16 @@
RTL_R8 (Config1));
}
-static void rtl8139_thread (void *_data)
+static void rtl8139_thread (struct work_struct *work)
{
- struct net_device *dev = _data;
- struct rtl8139_private *tp = netdev_priv(dev);
+ struct rtl8139_private *tp =
+ container_of(work, struct rtl8139_private, thread.work);
+ struct net_device *dev = tp->mii.dev;
unsigned long thr_delay = next_tick;
if (tp->watchdog_fired) {
tp->watchdog_fired = 0;
- rtl8139_tx_timeout_task(_data);
+ rtl8139_tx_timeout_task(work);
} else if (rtnl_trylock()) {
rtl8139_thread_iter (dev, tp, tp->mmio_addr);
rtnl_unlock ();
@@ -1646,10 +1647,11 @@
/* XXX account for unsent Tx packets in tp->stats.tx_dropped */
}
-static void rtl8139_tx_timeout_task (void *_data)
+static void rtl8139_tx_timeout_task (struct work_struct *work)
{
- struct net_device *dev = _data;
- struct rtl8139_private *tp = netdev_priv(dev);
+ struct rtl8139_private *tp =
+ container_of(work, struct rtl8139_private, thread.work);
+ struct net_device *dev = tp->mii.dev;
void __iomem *ioaddr = tp->mmio_addr;
int i;
u8 tmp8;
@@ -1695,7 +1697,7 @@
struct rtl8139_private *tp = netdev_priv(dev);
if (!tp->have_thread) {
- INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev);
+ INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task);
schedule_delayed_work(&tp->thread, next_tick);
} else
tp->watchdog_fired = 1;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index fc2f1d1..5bacb75 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -4411,9 +4411,9 @@
}
static void
-bnx2_reset_task(void *data)
+bnx2_reset_task(struct work_struct *work)
{
- struct bnx2 *bp = data;
+ struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
if (!netif_running(bp->dev))
return;
@@ -5702,7 +5702,7 @@
bp->pdev = pdev;
spin_lock_init(&bp->phy_lock);
- INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
+ INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index fd2cc13..c812648 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4066,9 +4066,9 @@
return 0;
}
-static void cas_reset_task(void *data)
+static void cas_reset_task(struct work_struct *work)
{
- struct cas *cp = (struct cas *) data;
+ struct cas *cp = container_of(work, struct cas, reset_task);
#if 0
int pending = atomic_read(&cp->reset_task_pending);
#else
@@ -5006,7 +5006,7 @@
atomic_set(&cp->reset_task_pending_spare, 0);
atomic_set(&cp->reset_task_pending_mtu, 0);
#endif
- INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+ INIT_WORK(&cp->reset_task, cas_reset_task);
/* Default link parameters */
if (link_mode >= 0 && link_mode <= 6)
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index b265941..74758d2 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -279,7 +279,7 @@
struct petp *tp;
struct port_info port[MAX_NPORTS];
- struct work_struct stats_update_task;
+ struct delayed_work stats_update_task;
struct timer_list stats_update_timer;
spinlock_t tpi_lock;
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
index 60901f2..cf91434 100644
--- a/drivers/net/chelsio/cphy.h
+++ b/drivers/net/chelsio/cphy.h
@@ -91,7 +91,7 @@
int state; /* Link status state machine */
adapter_t *adapter; /* associated adapter */
- struct work_struct phy_update;
+ struct delayed_work phy_update;
u16 bmsr;
int count;
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 53bec673..de48ead 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -953,10 +953,11 @@
* Periodic accumulation of MAC statistics. This is used only if the MAC
* does not have any other way to prevent stats counter overflow.
*/
-static void mac_stats_task(void *data)
+static void mac_stats_task(struct work_struct *work)
{
int i;
- struct adapter *adapter = data;
+ struct adapter *adapter =
+ container_of(work, struct adapter, stats_update_task.work);
for_each_port(adapter, i) {
struct port_info *p = &adapter->port[i];
@@ -977,9 +978,10 @@
/*
* Processes elmer0 external interrupts in process context.
*/
-static void ext_intr_task(void *data)
+static void ext_intr_task(struct work_struct *work)
{
- struct adapter *adapter = data;
+ struct adapter *adapter =
+ container_of(work, struct adapter, ext_intr_handler_task);
t1_elmer0_ext_intr_handler(adapter);
@@ -1113,9 +1115,9 @@
spin_lock_init(&adapter->mac_lock);
INIT_WORK(&adapter->ext_intr_handler_task,
- ext_intr_task, adapter);
- INIT_WORK(&adapter->stats_update_task, mac_stats_task,
- adapter);
+ ext_intr_task);
+ INIT_DELAYED_WORK(&adapter->stats_update_task,
+ mac_stats_task);
pci_set_drvdata(pdev, netdev);
}
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 0b90014..c7731b6 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -93,9 +93,11 @@
return cphy_cause_link_change;
}
-static void my3216_poll(void *arg)
+static void my3216_poll(struct work_struct *work)
{
- my3126_interrupt_handler(arg);
+ struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
+
+ my3126_interrupt_handler(cphy);
}
static int my3126_set_loopback(struct cphy *cphy, int on)
@@ -171,7 +173,7 @@
if (cphy)
cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
- INIT_WORK(&cphy->phy_update, my3216_poll, cphy);
+ INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
cphy->bmsr = 0;
return (cphy);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3a8df47..03bf164 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2102,9 +2102,10 @@
schedule_work(&nic->tx_timeout_task);
}
-static void e100_tx_timeout_task(struct net_device *netdev)
+static void e100_tx_timeout_task(struct work_struct *work)
{
- struct nic *nic = netdev_priv(netdev);
+ struct nic *nic = container_of(work, struct nic, tx_timeout_task);
+ struct net_device *netdev = nic->netdev;
DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
readb(&nic->csr->scb.status));
@@ -2637,8 +2638,7 @@
nic->blink_timer.function = e100_blink_led;
nic->blink_timer.data = (unsigned long)nic;
- INIT_WORK(&nic->tx_timeout_task,
- (void (*)(void *))e100_tx_timeout_task, netdev);
+ INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
if((err = e100_alloc(nic))) {
DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 32dde0a..73f3a85 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -190,7 +190,7 @@
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
static void e1000_tx_timeout(struct net_device *dev);
-static void e1000_reset_task(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
static void e1000_smartspeed(struct e1000_adapter *adapter);
static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
struct sk_buff *skb);
@@ -914,8 +914,7 @@
adapter->phy_info_timer.function = &e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long) adapter;
- INIT_WORK(&adapter->reset_task,
- (void (*)(void *))e1000_reset_task, netdev);
+ INIT_WORK(&adapter->reset_task, e1000_reset_task);
e1000_check_options(adapter);
@@ -3306,9 +3305,10 @@
}
static void
-e1000_reset_task(struct net_device *netdev)
+e1000_reset_task(struct work_struct *work)
{
- struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_adapter *adapter =
+ container_of(work, struct e1000_adapter, reset_task);
e1000_reinit_locked(adapter);
}
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 6ad6961..83fa32f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2224,11 +2224,12 @@
return ret;
}
-static void ehea_reset_port(void *data)
+static void ehea_reset_port(struct work_struct *work)
{
int ret;
- struct net_device *dev = data;
- struct ehea_port *port = netdev_priv(dev);
+ struct ehea_port *port =
+ container_of(work, struct ehea_port, reset_task);
+ struct net_device *dev = port->netdev;
port->resets++;
down(&port->port_lock);
@@ -2379,7 +2380,7 @@
dev->tx_timeout = &ehea_tx_watchdog;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
- INIT_WORK(&port->reset_task, ehea_reset_port, dev);
+ INIT_WORK(&port->reset_task, ehea_reset_port);
ehea_set_ethtool_ops(dev);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 1ed9ccc..3c33d6f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -168,8 +168,9 @@
int magic;
struct pardevice *pdev;
+ struct net_device *dev;
unsigned int work_running;
- struct work_struct run_work;
+ struct delayed_work run_work;
unsigned int modem;
unsigned int bitrate;
unsigned char stat;
@@ -659,16 +660,18 @@
#define GETTICK(x)
#endif /* __i386__ */
-static void epp_bh(struct net_device *dev)
+static void epp_bh(struct work_struct *work)
{
+ struct net_device *dev;
struct baycom_state *bc;
struct parport *pp;
unsigned char stat;
unsigned char tmp[2];
unsigned int time1 = 0, time2 = 0, time3 = 0;
int cnt, cnt2;
-
- bc = netdev_priv(dev);
+
+ bc = container_of(work, struct baycom_state, run_work.work);
+ dev = bc->dev;
if (!bc->work_running)
return;
baycom_int_freq(bc);
@@ -889,7 +892,7 @@
return -EBUSY;
}
dev->irq = /*pp->irq*/ 0;
- INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev);
+ INIT_DELAYED_WORK(&bc->run_work, epp_bh);
bc->work_running = 1;
bc->modem = EPP_CONVENTIONAL;
if (eppconfig(bc))
@@ -1213,6 +1216,7 @@
/*
* initialize part of the baycom_state struct
*/
+ bc->dev = dev;
bc->magic = BAYCOM_MAGIC;
bc->cfg.fclk = 19666600;
bc->cfg.bps = 9600;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b32c52e..f0c61f3 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -560,9 +560,9 @@
return ret;
}
-static void mcs_speed_work(void *arg)
+static void mcs_speed_work(struct work_struct *work)
{
- struct mcs_cb *mcs = arg;
+ struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
struct net_device *netdev = mcs->netdev;
mcs_speed_change(mcs);
@@ -927,7 +927,7 @@
irda_qos_bits_to_value(&mcs->qos);
/* Speed change work initialisation*/
- INIT_WORK(&mcs->work, mcs_speed_work, mcs);
+ INIT_WORK(&mcs->work, mcs_speed_work);
/* Override the network functions we need to use */
ndev->hard_start_xmit = mcs_hard_xmit;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 9fa294a..2a57bc6 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -22,7 +22,7 @@
struct sir_fsm {
struct semaphore sem;
- struct work_struct work;
+ struct delayed_work work;
unsigned state, substate;
int param;
int result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 3b5854d..17b0c3a 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -100,9 +100,9 @@
* Both must be unlocked/restarted on completion - but only on final exit.
*/
-static void sirdev_config_fsm(void *data)
+static void sirdev_config_fsm(struct work_struct *work)
{
- struct sir_dev *dev = data;
+ struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
struct sir_fsm *fsm = &dev->fsm;
int next_state;
int ret = -1;
@@ -309,8 +309,8 @@
fsm->param = param;
fsm->result = 0;
- INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
- queue_work(irda_sir_wq, &fsm->work);
+ INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
+ queue_delayed_work(irda_sir_wq, &fsm->work, 0);
return 0;
}
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 2284e2c..d6f4f18 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -166,7 +166,7 @@
struct veth_lpar_connection {
HvLpIndex remote_lp;
- struct work_struct statemachine_wq;
+ struct delayed_work statemachine_wq;
struct veth_msg *msgs;
int num_events;
struct veth_cap_data local_caps;
@@ -456,7 +456,7 @@
static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
{
- schedule_work(&cnx->statemachine_wq);
+ schedule_delayed_work(&cnx->statemachine_wq, 0);
}
static void veth_take_cap(struct veth_lpar_connection *cnx,
@@ -638,9 +638,11 @@
}
/* FIXME: The gotos here are a bit dubious */
-static void veth_statemachine(void *p)
+static void veth_statemachine(struct work_struct *work)
{
- struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p;
+ struct veth_lpar_connection *cnx =
+ container_of(work, struct veth_lpar_connection,
+ statemachine_wq.work);
int rlp = cnx->remote_lp;
int rc;
@@ -827,7 +829,7 @@
cnx->remote_lp = rlp;
spin_lock_init(&cnx->lock);
- INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx);
+ INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
init_timer(&cnx->ack_timer);
cnx->ack_timer.function = veth_timed_ack;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7b12721..e628126 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -106,7 +106,7 @@
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
void ixgb_set_ethtool_ops(struct net_device *netdev);
static void ixgb_tx_timeout(struct net_device *dev);
-static void ixgb_tx_timeout_task(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -489,8 +489,7 @@
adapter->watchdog_timer.function = &ixgb_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
- INIT_WORK(&adapter->tx_timeout_task,
- (void (*)(void *))ixgb_tx_timeout_task, netdev);
+ INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev)))
@@ -1493,9 +1492,10 @@
}
static void
-ixgb_tx_timeout_task(struct net_device *netdev)
+ixgb_tx_timeout_task(struct work_struct *work)
{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_adapter *adapter =
+ container_of(work, struct ixgb_adapter, tx_timeout_task);
adapter->tx_timeout_count++;
ixgb_down(adapter, TRUE);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 36350e6..38df428 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2615,9 +2615,10 @@
* This watchdog is used to check whether the board has suffered
* from a parity error and needs to be recovered.
*/
-static void myri10ge_watchdog(void *arg)
+static void myri10ge_watchdog(struct work_struct *work)
{
- struct myri10ge_priv *mgp = arg;
+ struct myri10ge_priv *mgp =
+ container_of(work, struct myri10ge_priv, watchdog_work);
u32 reboot;
int status;
u16 cmd, vendor;
@@ -2887,7 +2888,7 @@
(unsigned long)mgp);
SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
- INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+ INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
if (status != 0) {
dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index d925053..9c588af 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -714,6 +714,7 @@
spinlock_t lock;
struct work_struct watchdog_task;
struct work_struct tx_timeout_task;
+ struct net_device *netdev;
struct timer_list watchdog_timer;
u32 curr_window;
@@ -921,7 +922,7 @@
struct netxen_port *port);
int netxen_nic_rx_has_work(struct netxen_adapter *adapter);
int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
-void netxen_watchdog_task(unsigned long v);
+void netxen_watchdog_task(struct work_struct *work);
void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
u32 ringid);
void netxen_process_cmd_ring(unsigned long data);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 0dca029..eae1823 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -710,12 +710,13 @@
return rv;
}
-void netxen_watchdog_task(unsigned long v)
+void netxen_watchdog_task(struct work_struct *work)
{
int port_num;
struct netxen_port *port;
struct net_device *netdev;
- struct netxen_adapter *adapter = (struct netxen_adapter *)v;
+ struct netxen_adapter *adapter =
+ container_of(work, struct netxen_adapter, watchdog_task);
if (netxen_nic_check_temp(adapter))
return;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 1cb662d..df0bb36 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -64,7 +64,7 @@
static int netxen_nic_close(struct net_device *netdev);
static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
static void netxen_tx_timeout(struct net_device *netdev);
-static void netxen_tx_timeout_task(struct net_device *netdev);
+static void netxen_tx_timeout_task(struct work_struct *work);
static void netxen_watchdog(unsigned long);
static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
static int netxen_nic_ioctl(struct net_device *netdev,
@@ -274,8 +274,7 @@
adapter->ahw.xg_linkup = 0;
adapter->watchdog_timer.function = &netxen_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
- INIT_WORK(&adapter->watchdog_task,
- (void (*)(void *))netxen_watchdog_task, adapter);
+ INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
adapter->ahw.pdev = pdev;
adapter->proc_cmd_buf_counter = 0;
pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id);
@@ -379,8 +378,8 @@
dev_addr);
}
}
- INIT_WORK(&adapter->tx_timeout_task,
- (void (*)(void *))netxen_tx_timeout_task, netdev);
+ adapter->netdev = netdev;
+ INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
netif_carrier_off(netdev);
netif_stop_queue(netdev);
@@ -938,18 +937,20 @@
schedule_work(&adapter->tx_timeout_task);
}
-static void netxen_tx_timeout_task(struct net_device *netdev)
+static void netxen_tx_timeout_task(struct work_struct *work)
{
- struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
+ struct netxen_adapter *adapter =
+ container_of(work, struct netxen_adapter, tx_timeout_task);
+ struct net_device *netdev = adapter->netdev;
unsigned long flags;
printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
netxen_nic_driver_name, netdev->name);
- spin_lock_irqsave(&port->adapter->lock, flags);
+ spin_lock_irqsave(&adapter->lock, flags);
netxen_nic_close(netdev);
netxen_nic_open(netdev);
- spin_unlock_irqrestore(&port->adapter->lock, flags);
+ spin_unlock_irqrestore(&adapter->lock, flags);
netdev->trans_start = jiffies;
netif_wake_queue(netdev);
}
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index b0127c7..312e0e3 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -427,6 +427,7 @@
u8 __iomem *base;
struct pci_dev *pci_dev;
+ struct net_device *ndev;
#ifdef NS83820_VLAN_ACCEL_SUPPORT
struct vlan_group *vlgrp;
@@ -631,10 +632,10 @@
}
/* REFILL */
-static inline void queue_refill(void *_dev)
+static inline void queue_refill(struct work_struct *work)
{
- struct net_device *ndev = _dev;
- struct ns83820 *dev = PRIV(ndev);
+ struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
+ struct net_device *ndev = dev->ndev;
rx_refill(ndev, GFP_KERNEL);
if (dev->rx_info.up)
@@ -1841,6 +1842,7 @@
ndev = alloc_etherdev(sizeof(struct ns83820));
dev = PRIV(ndev);
+ dev->ndev = ndev;
err = -ENOMEM;
if (!dev)
goto out;
@@ -1853,7 +1855,7 @@
SET_MODULE_OWNER(ndev);
SET_NETDEV_DEV(ndev, &pci_dev->dev);
- INIT_WORK(&dev->tq_refill, queue_refill, ndev);
+ INIT_WORK(&dev->tq_refill, queue_refill);
tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
err = pci_enable_device(pci_dev);
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 69813406..8478dca 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -332,6 +332,7 @@
*/
typedef struct local_info_t {
+ struct net_device *dev;
struct pcmcia_device *p_dev;
dev_node_t node;
struct net_device_stats stats;
@@ -353,7 +354,7 @@
*/
static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void do_tx_timeout(struct net_device *dev);
-static void xirc2ps_tx_timeout_task(void *data);
+static void xirc2ps_tx_timeout_task(struct work_struct *work);
static struct net_device_stats *do_get_stats(struct net_device *dev);
static void set_addresses(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -567,6 +568,7 @@
if (!dev)
return -ENOMEM;
local = netdev_priv(dev);
+ local->dev = dev;
local->p_dev = link;
link->priv = dev;
@@ -591,7 +593,7 @@
#ifdef HAVE_TX_TIMEOUT
dev->tx_timeout = do_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
- INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
+ INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task);
#endif
return xirc2ps_config(link);
@@ -1324,9 +1326,11 @@
/*====================================================================*/
static void
-xirc2ps_tx_timeout_task(void *data)
+xirc2ps_tx_timeout_task(struct work_struct *work)
{
- struct net_device *dev = data;
+ local_info_t *local =
+ container_of(work, local_info_t, tx_timeout_task);
+ struct net_device *dev = local->dev;
/* reset the card */
do_reset(dev,1);
dev->trans_start = jiffies;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 88237bd..4044bb1 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -397,7 +397,7 @@
EXPORT_SYMBOL(phy_start_aneg);
-static void phy_change(void *data);
+static void phy_change(struct work_struct *work);
static void phy_timer(unsigned long data);
/* phy_start_machine:
@@ -555,7 +555,7 @@
{
int err = 0;
- INIT_WORK(&phydev->phy_queue, phy_change, phydev);
+ INIT_WORK(&phydev->phy_queue, phy_change);
if (request_irq(phydev->irq, phy_interrupt,
IRQF_SHARED,
@@ -598,10 +598,11 @@
/* Scheduled by the phy_interrupt/timer to handle PHY changes */
-static void phy_change(void *data)
+static void phy_change(struct work_struct *work)
{
int err;
- struct phy_device *phydev = data;
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, phy_queue);
err = phy_disable_interrupts(phydev);
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 71afb27..6bb085f 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -138,9 +138,9 @@
#define PLIP_NIBBLE_WAIT 3000
/* Bottom halves */
-static void plip_kick_bh(struct net_device *dev);
-static void plip_bh(struct net_device *dev);
-static void plip_timer_bh(struct net_device *dev);
+static void plip_kick_bh(struct work_struct *work);
+static void plip_bh(struct work_struct *work);
+static void plip_timer_bh(struct work_struct *work);
/* Interrupt handler */
static void plip_interrupt(int irq, void *dev_id);
@@ -207,9 +207,10 @@
struct net_local {
struct net_device_stats enet_stats;
+ struct net_device *dev;
struct work_struct immediate;
- struct work_struct deferred;
- struct work_struct timer;
+ struct delayed_work deferred;
+ struct delayed_work timer;
struct plip_local snd_data;
struct plip_local rcv_data;
struct pardevice *pardev;
@@ -306,11 +307,11 @@
nl->nibble = PLIP_NIBBLE_WAIT;
/* Initialize task queue structures */
- INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
- INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
+ INIT_WORK(&nl->immediate, plip_bh);
+ INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
if (dev->irq == -1)
- INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
+ INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
spin_lock_init(&nl->lock);
}
@@ -319,9 +320,10 @@
This routine is kicked by do_timer().
Request `plip_bh' to be invoked. */
static void
-plip_kick_bh(struct net_device *dev)
+plip_kick_bh(struct work_struct *work)
{
- struct net_local *nl = netdev_priv(dev);
+ struct net_local *nl =
+ container_of(work, struct net_local, deferred.work);
if (nl->is_deferred)
schedule_work(&nl->immediate);
@@ -362,9 +364,9 @@
/* Bottom half handler of PLIP. */
static void
-plip_bh(struct net_device *dev)
+plip_bh(struct work_struct *work)
{
- struct net_local *nl = netdev_priv(dev);
+ struct net_local *nl = container_of(work, struct net_local, immediate);
struct plip_local *snd = &nl->snd_data;
struct plip_local *rcv = &nl->rcv_data;
plip_func f;
@@ -372,20 +374,21 @@
nl->is_deferred = 0;
f = connection_state_table[nl->connection];
- if ((r = (*f)(dev, nl, snd, rcv)) != OK
- && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
+ if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
+ && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
nl->is_deferred = 1;
schedule_delayed_work(&nl->deferred, 1);
}
}
static void
-plip_timer_bh(struct net_device *dev)
+plip_timer_bh(struct work_struct *work)
{
- struct net_local *nl = netdev_priv(dev);
+ struct net_local *nl =
+ container_of(work, struct net_local, timer.work);
if (!(atomic_read (&nl->kill_timer))) {
- plip_interrupt (-1, dev);
+ plip_interrupt (-1, nl->dev);
schedule_delayed_work(&nl->timer, 1);
}
@@ -1284,6 +1287,7 @@
}
nl = netdev_priv(dev);
+ nl->dev = dev;
nl->pardev = parport_register_device(port, name, plip_preempt,
plip_wakeup, plip_interrupt,
0, dev);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index ec640f6..d79d141 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2008,7 +2008,7 @@
"%s: Another function issued a reset to the "
"chip. ISR value = %x.\n", ndev->name, value);
}
- queue_work(qdev->workqueue, &qdev->reset_work);
+ queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
spin_unlock(&qdev->adapter_lock);
} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
ql_disable_interrupts(qdev);
@@ -3182,11 +3182,13 @@
/*
* Wake up the worker to process this event.
*/
- queue_work(qdev->workqueue, &qdev->tx_timeout_work);
+ queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
}
-static void ql_reset_work(struct ql3_adapter *qdev)
+static void ql_reset_work(struct work_struct *work)
{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, reset_work.work);
struct net_device *ndev = qdev->ndev;
u32 value;
struct ql_tx_buf_cb *tx_cb;
@@ -3278,9 +3280,12 @@
}
}
-static void ql_tx_timeout_work(struct ql3_adapter *qdev)
+static void ql_tx_timeout_work(struct work_struct *work)
{
- ql_cycle_adapter(qdev,QL_DO_RESET);
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+ ql_cycle_adapter(qdev, QL_DO_RESET);
}
static void ql_get_board_info(struct ql3_adapter *qdev)
@@ -3459,9 +3464,8 @@
netif_stop_queue(ndev);
qdev->workqueue = create_singlethread_workqueue(ndev->name);
- INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
- INIT_WORK(&qdev->tx_timeout_work,
- (void (*)(void *))ql_tx_timeout_work, qdev);
+ INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+ INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
init_timer(&qdev->adapter_timer);
qdev->adapter_timer.function = ql3xxx_timer;
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 65da2c0..ea94de7 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -1186,8 +1186,8 @@
u32 numPorts;
struct net_device_stats stats;
struct workqueue_struct *workqueue;
- struct work_struct reset_work;
- struct work_struct tx_timeout_work;
+ struct delayed_work reset_work;
+ struct delayed_work tx_timeout_work;
u32 max_frame_size;
};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 45d3ca4..85a392f 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -424,6 +424,7 @@
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev; /* Index of PCI device */
+ struct net_device *dev;
struct net_device_stats stats; /* statistics of net device */
spinlock_t lock; /* spin lock flag */
u32 msg_enable;
@@ -455,7 +456,7 @@
void (*phy_reset_enable)(void __iomem *);
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
- struct work_struct task;
+ struct delayed_work task;
unsigned wol_enabled : 1;
};
@@ -1510,6 +1511,7 @@
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
tp = netdev_priv(dev);
+ tp->dev = dev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
/* enable device (incl. PCI PM wakeup and hotplug setup) */
@@ -1782,7 +1784,7 @@
if (retval < 0)
goto err_free_rx;
- INIT_WORK(&tp->task, NULL, dev);
+ INIT_DELAYED_WORK(&tp->task, NULL);
rtl8169_hw_start(dev);
@@ -2105,11 +2107,11 @@
tp->cur_tx = tp->dirty_tx = 0;
}
-static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
+static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
{
struct rtl8169_private *tp = netdev_priv(dev);
- PREPARE_WORK(&tp->task, task, dev);
+ PREPARE_DELAYED_WORK(&tp->task, task);
schedule_delayed_work(&tp->task, 4);
}
@@ -2128,9 +2130,11 @@
netif_poll_enable(dev);
}
-static void rtl8169_reinit_task(void *_data)
+static void rtl8169_reinit_task(struct work_struct *work)
{
- struct net_device *dev = _data;
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
int ret;
if (netif_running(dev)) {
@@ -2153,10 +2157,11 @@
}
}
-static void rtl8169_reset_task(void *_data)
+static void rtl8169_reset_task(struct work_struct *work)
{
- struct net_device *dev = _data;
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
if (!netif_running(dev))
return;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 33569ec..250cdbe 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5872,9 +5872,9 @@
* Description: Sets the link status for the adapter
*/
-static void s2io_set_link(unsigned long data)
+static void s2io_set_link(struct work_struct *work)
{
- nic_t *nic = (nic_t *) data;
+ nic_t *nic = container_of(work, nic_t, set_link_task);
struct net_device *dev = nic->dev;
XENA_dev_config_t __iomem *bar0 = nic->bar0;
register u64 val64;
@@ -6379,10 +6379,10 @@
* spin lock.
*/
-static void s2io_restart_nic(unsigned long data)
+static void s2io_restart_nic(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *) data;
- nic_t *sp = dev->priv;
+ nic_t *sp = container_of(work, nic_t, rst_timer_task);
+ struct net_device *dev = sp->dev;
s2io_card_down(sp);
if (s2io_card_up(sp)) {
@@ -6992,10 +6992,8 @@
dev->tx_timeout = &s2io_tx_watchdog;
dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
- INIT_WORK(&sp->rst_timer_task,
- (void (*)(void *)) s2io_restart_nic, dev);
- INIT_WORK(&sp->set_link_task,
- (void (*)(void *)) s2io_set_link, sp);
+ INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
+ INIT_WORK(&sp->set_link_task, s2io_set_link);
pci_save_state(sp->pdev);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 12b719f..3b0bafd 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1000,7 +1000,7 @@
static irqreturn_t s2io_isr(int irq, void *dev_id);
static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
static const struct ethtool_ops netdev_ethtool_ops;
-static void s2io_set_link(unsigned long data);
+static void s2io_set_link(struct work_struct *work);
static int s2io_set_swapper(nic_t * sp);
static void s2io_card_down(nic_t *nic);
static int s2io_card_up(nic_t *nic);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index aaba458..b70ed79 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -280,6 +280,7 @@
struct sis190_private {
void __iomem *mmio_addr;
struct pci_dev *pci_dev;
+ struct net_device *dev;
struct net_device_stats stats;
spinlock_t lock;
u32 rx_buf_sz;
@@ -897,10 +898,11 @@
netif_start_queue(dev);
}
-static void sis190_phy_task(void * data)
+static void sis190_phy_task(struct work_struct *work)
{
- struct net_device *dev = data;
- struct sis190_private *tp = netdev_priv(dev);
+ struct sis190_private *tp =
+ container_of(work, struct sis190_private, phy_task);
+ struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->mmio_addr;
int phy_id = tp->mii_if.phy_id;
u16 val;
@@ -1047,7 +1049,7 @@
if (rc < 0)
goto err_free_rx_1;
- INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+ INIT_WORK(&tp->phy_task, sis190_phy_task);
sis190_request_timer(dev);
@@ -1436,6 +1438,7 @@
SET_NETDEV_DEV(dev, &pdev->dev);
tp = netdev_priv(dev);
+ tp->dev = dev;
tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
rc = pci_enable_device(pdev);
@@ -1798,7 +1801,7 @@
sis190_init_rxfilter(dev);
- INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+ INIT_WORK(&tp->phy_task, sis190_phy_task);
dev->open = sis190_open;
dev->stop = sis190_close;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 5513907..b60f045 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1327,10 +1327,11 @@
* Since internal PHY is wired to a level triggered pin, can't
* get an interrupt when carrier is detected.
*/
-static void xm_link_timer(void *arg)
+static void xm_link_timer(struct work_struct *work)
{
- struct net_device *dev = arg;
- struct skge_port *skge = netdev_priv(arg);
+ struct skge_port *skge =
+ container_of(work, struct skge_port, link_thread.work);
+ struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
@@ -3072,9 +3073,9 @@
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(void *arg)
+static void skge_extirq(struct work_struct *work)
{
- struct skge_hw *hw = arg;
+ struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
int port;
mutex_lock(&hw->phy_mutex);
@@ -3456,7 +3457,7 @@
skge->port = port;
/* Only used for Genesis XMAC */
- INIT_WORK(&skge->link_thread, xm_link_timer, dev);
+ INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
if (hw->chip_id != CHIP_ID_GENESIS) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@ -3543,7 +3544,7 @@
hw->pdev = pdev;
mutex_init(&hw->phy_mutex);
- INIT_WORK(&hw->phy_work, skge_extirq, hw);
+ INIT_WORK(&hw->phy_work, skge_extirq);
spin_lock_init(&hw->hw_lock);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 537c0aa..23e5275 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2456,7 +2456,7 @@
struct net_device_stats net_stats;
- struct work_struct link_thread;
+ struct delayed_work link_thread;
enum pause_control flow_control;
enum pause_status flow_status;
u8 rx_csum;
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 95b6478..e62a958 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -210,6 +210,7 @@
/* work queue */
struct work_struct phy_configure;
+ struct net_device *dev;
int work_pending;
spinlock_t lock;
@@ -1114,10 +1115,11 @@
* of autonegotiation.) If the RPC ANEG bit is cleared, the selection
* is controlled by the RPC SPEED and RPC DPLX bits.
*/
-static void smc_phy_configure(void *data)
+static void smc_phy_configure(struct work_struct *work)
{
- struct net_device *dev = data;
- struct smc_local *lp = netdev_priv(dev);
+ struct smc_local *lp =
+ container_of(work, struct smc_local, phy_configure);
+ struct net_device *dev = lp->dev;
void __iomem *ioaddr = lp->base;
int phyaddr = lp->mii.phy_id;
int my_phy_caps; /* My PHY capabilities */
@@ -1592,7 +1594,7 @@
/* Configure the PHY, initialize the link state */
if (lp->phy_type != 0)
- smc_phy_configure(dev);
+ smc_phy_configure(&lp->phy_configure);
else {
spin_lock_irq(&lp->lock);
smc_10bt_check_media(dev, 1);
@@ -1972,7 +1974,8 @@
#endif
tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
- INIT_WORK(&lp->phy_configure, smc_phy_configure, dev);
+ INIT_WORK(&lp->phy_configure, smc_phy_configure);
+ lp->dev = dev;
lp->mii.phy_id_mask = 0x1f;
lp->mii.reg_num_mask = 0x1f;
lp->mii.force_media = 0;
@@ -2322,7 +2325,7 @@
smc_reset(ndev);
smc_enable(ndev);
if (lp->phy_type != 0)
- smc_phy_configure(ndev);
+ smc_phy_configure(&lp->phy_configure);
netif_device_attach(ndev);
}
}
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 13e0a43..ebb6aa3 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1939,10 +1939,11 @@
* called as task when tx hangs, resets interface (if interface is up)
*/
static void
-spider_net_tx_timeout_task(void *data)
+spider_net_tx_timeout_task(struct work_struct *work)
{
- struct net_device *netdev = data;
- struct spider_net_card *card = netdev_priv(netdev);
+ struct spider_net_card *card =
+ container_of(work, struct spider_net_card, tx_timeout_task);
+ struct net_device *netdev = card->netdev;
if (!(netdev->flags & IFF_UP))
goto out;
@@ -2116,7 +2117,7 @@
card = netdev_priv(netdev);
card->netdev = netdev;
card->msg_enable = SPIDER_NET_DEFAULT_MSG;
- INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
+ INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
init_waitqueue_head(&card->waitq);
atomic_set(&card->tx_timeout_task_counter, 0);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index cf44e72..785e4a5 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2282,9 +2282,9 @@
}
}
-static void gem_reset_task(void *data)
+static void gem_reset_task(struct work_struct *work)
{
- struct gem *gp = (struct gem *) data;
+ struct gem *gp = container_of(work, struct gem, reset_task);
mutex_lock(&gp->pm_mutex);
@@ -3044,7 +3044,7 @@
gp->link_timer.function = gem_link_timer;
gp->link_timer.data = (unsigned long) gp;
- INIT_WORK(&gp->reset_task, gem_reset_task, gp);
+ INIT_WORK(&gp->reset_task, gem_reset_task);
gp->lstate = link_down;
gp->timer_ticks = 0;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index c20bb99..d9123c9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3654,9 +3654,9 @@
}
#endif
-static void tg3_reset_task(void *_data)
+static void tg3_reset_task(struct work_struct *work)
{
- struct tg3 *tp = _data;
+ struct tg3 *tp = container_of(work, struct tg3, reset_task);
unsigned int restart_timer;
tg3_full_lock(tp, 0);
@@ -11734,7 +11734,7 @@
#endif
spin_lock_init(&tp->lock);
spin_lock_init(&tp->indirect_lock);
- INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+ INIT_WORK(&tp->reset_task, tg3_reset_task);
tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
if (tp->regs == 0UL) {
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index e14f5a0..f85f002 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -296,6 +296,7 @@
static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent);
static void TLan_tx_timeout( struct net_device *dev);
+static void TLan_tx_timeout_work(struct work_struct *work);
static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
static u32 TLan_HandleInvalid( struct net_device *, u16 );
@@ -562,6 +563,7 @@
priv = netdev_priv(dev);
priv->pciDev = pdev;
+ priv->dev = dev;
/* Is this a PCI device? */
if (pdev) {
@@ -634,7 +636,7 @@
/* This will be used when we get an adapter error from
* within our irq handler */
- INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev);
+ INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
spin_lock_init(&priv->lock);
@@ -1040,6 +1042,25 @@
}
+ /***************************************************************
+ * TLan_tx_timeout_work
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * work work item of device which timed out
+ *
+ **************************************************************/
+
+static void TLan_tx_timeout_work(struct work_struct *work)
+{
+ TLanPrivateInfo *priv =
+ container_of(work, TLanPrivateInfo, tlan_tqueue);
+
+ TLan_tx_timeout(priv->dev);
+}
+
+
/***************************************************************
* TLan_StartTx
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index a44e2f2..41ce0b6 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -170,6 +170,7 @@
typedef struct tlan_private_tag {
struct net_device *nextDevice;
struct pci_dev *pciDev;
+ struct net_device *dev;
void *dmaStorage;
dma_addr_t dmaStorageDMA;
unsigned int dmaSize;
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index fa3a2bb..942b839 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -26,10 +26,11 @@
/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
of available transceivers. */
-void t21142_media_task(void *data)
+void t21142_media_task(struct work_struct *work)
{
- struct net_device *dev = data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp =
+ container_of(work, struct tulip_private, media_work);
+ struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int csr12 = ioread32(ioaddr + CSR12);
int next_tick = 60*HZ;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 066e5d6..df326fe 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -18,10 +18,11 @@
#include "tulip.h"
-void tulip_media_task(void *data)
+void tulip_media_task(struct work_struct *work)
{
- struct net_device *dev = data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp =
+ container_of(work, struct tulip_private, media_work);
+ struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
u32 csr12 = ioread32(ioaddr + CSR12);
int next_tick = 2*HZ;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index ad107f4..25f25da 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -44,7 +44,7 @@
int valid_intrs; /* CSR7 interrupt enable settings */
int flags;
void (*media_timer) (unsigned long);
- void (*media_task) (void *);
+ work_func_t media_task;
};
@@ -392,6 +392,7 @@
int csr12_shadow;
int pad0; /* Used for 8-byte alignment */
struct work_struct media_work;
+ struct net_device *dev;
};
@@ -406,7 +407,7 @@
/* 21142.c */
extern u16 t21142_csr14[];
-void t21142_media_task(void *data);
+void t21142_media_task(struct work_struct *work);
void t21142_start_nway(struct net_device *dev);
void t21142_lnk_change(struct net_device *dev, int csr5);
@@ -444,7 +445,7 @@
void pnic_timer(unsigned long data);
/* timer.c */
-void tulip_media_task(void *data);
+void tulip_media_task(struct work_struct *work);
void mxic_timer(unsigned long data);
void comet_timer(unsigned long data);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0aee618..5a35354 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1367,6 +1367,7 @@
* it is zeroed and aligned in alloc_etherdev
*/
tp = netdev_priv(dev);
+ tp->dev = dev;
tp->rx_ring = pci_alloc_consistent(pdev,
sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
@@ -1389,7 +1390,7 @@
tp->timer.data = (unsigned long)dev;
tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
- INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev);
+ INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
dev->base_addr = (unsigned long)ioaddr;
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 931cbdf6..b2a23ae 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -125,8 +125,8 @@
static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
static void cpc_tty_flush_buffer(struct tty_struct *tty);
static void cpc_tty_hangup(struct tty_struct *tty);
-static void cpc_tty_rx_work(void *data);
-static void cpc_tty_tx_work(void *data);
+static void cpc_tty_rx_work(struct work_struct *work);
+static void cpc_tty_tx_work(struct work_struct *work);
static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
@@ -261,8 +261,8 @@
cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
cpc_tty->pc300dev = pc300dev;
- INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty);
- INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port);
+ INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work);
+ INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work);
cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
@@ -659,21 +659,23 @@
* o call the line disc. read
* o free memory
*/
-static void cpc_tty_rx_work(void * data)
+static void cpc_tty_rx_work(struct work_struct *work)
{
+ st_cpc_tty_area *cpc_tty;
unsigned long port;
int i, j;
- st_cpc_tty_area *cpc_tty;
volatile st_cpc_rx_buf *buf;
char flags=0,flg_rx=1;
struct tty_ldisc *ld;
if (cpc_tty_cnt == 0) return;
-
for (i=0; (i < 4) && flg_rx ; i++) {
flg_rx = 0;
- port = (unsigned long)data;
+
+ cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work);
+ port = cpc_tty - cpc_tty_area;
+
for (j=0; j < CPC_TTY_NPORTS; j++) {
cpc_tty = &cpc_tty_area[port];
@@ -882,9 +884,10 @@
* o if need call line discipline wakeup
* o call wake_up_interruptible
*/
-static void cpc_tty_tx_work(void *data)
+static void cpc_tty_tx_work(struct work_struct *work)
{
- st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data;
+ st_cpc_tty_area *cpc_tty =
+ container_of(work, st_cpc_tty_area, tty_tx_work);
struct tty_struct *tty;
CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 94dfb92..8286678 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -819,7 +819,7 @@
struct tasklet_struct isr_tasklet;
/* Periodic tasks */
- struct work_struct periodic_work;
+ struct delayed_work periodic_work;
unsigned int periodic_state;
struct work_struct restart_work;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 5b3c273..2ec2e5a 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3215,9 +3215,10 @@
schedule_delayed_work(&bcm->periodic_work, HZ * 15);
}
-static void bcm43xx_periodic_work_handler(void *d)
+static void bcm43xx_periodic_work_handler(struct work_struct *work)
{
- struct bcm43xx_private *bcm = d;
+ struct bcm43xx_private *bcm =
+ container_of(work, struct bcm43xx_private, periodic_work.work);
struct net_device *net_dev = bcm->net_dev;
unsigned long flags;
u32 savedirqs = 0;
@@ -3279,11 +3280,11 @@
void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
{
- struct work_struct *work = &(bcm->periodic_work);
+ struct delayed_work *work = &bcm->periodic_work;
assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
- INIT_WORK(work, bcm43xx_periodic_work_handler, bcm);
- schedule_work(work);
+ INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
+ schedule_delayed_work(work, 0);
}
static void bcm43xx_security_init(struct bcm43xx_private *bcm)
@@ -3635,7 +3636,7 @@
bcm43xx_periodic_tasks_setup(bcm);
/*FIXME: This should be handled by softmac instead. */
- schedule_work(&bcm->softmac->associnfo.work);
+ schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
out:
mutex_unlock(&(bcm)->mutex);
@@ -4182,9 +4183,10 @@
/* Hard-reset the chip. Do not call this directly.
* Use bcm43xx_controller_restart()
*/
-static void bcm43xx_chip_reset(void *_bcm)
+static void bcm43xx_chip_reset(struct work_struct *work)
{
- struct bcm43xx_private *bcm = _bcm;
+ struct bcm43xx_private *bcm =
+ container_of(work, struct bcm43xx_private, restart_work);
struct bcm43xx_phyinfo *phy;
int err = -ENODEV;
@@ -4211,7 +4213,7 @@
if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
return;
printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
- INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
+ INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
schedule_work(&bcm->restart_work);
}
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index e663518..e89c890 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -35,7 +35,7 @@
struct net_device_stats *hostap_get_stats(struct net_device *dev);
void hostap_setup_dev(struct net_device *dev, local_info_t *local,
int main_dev);
-void hostap_set_multicast_list_queue(void *data);
+void hostap_set_multicast_list_queue(struct work_struct *work);
int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
void hostap_cleanup(local_info_t *local);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index ba13125..08bc57a 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -49,10 +49,10 @@
static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
static void hostap_event_expired_sta(struct net_device *dev,
struct sta_info *sta);
-static void handle_add_proc_queue(void *data);
+static void handle_add_proc_queue(struct work_struct *work);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void handle_wds_oper_queue(void *data);
+static void handle_wds_oper_queue(struct work_struct *work);
static void prism2_send_mgmt(struct net_device *dev,
u16 type_subtype, char *body,
int body_len, u8 *addr, u16 tx_cb_idx);
@@ -807,7 +807,7 @@
INIT_LIST_HEAD(&ap->sta_list);
/* Initialize task queue structure for AP management */
- INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap);
+ INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
ap->tx_callback_idx =
hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
@@ -815,7 +815,7 @@
printk(KERN_WARNING "%s: failed to register TX callback for "
"AP\n", local->dev->name);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local);
+ INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
ap->tx_callback_auth =
hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
@@ -1062,9 +1062,10 @@
}
-static void handle_add_proc_queue(void *data)
+static void handle_add_proc_queue(struct work_struct *work)
{
- struct ap_data *ap = (struct ap_data *) data;
+ struct ap_data *ap = container_of(work, struct ap_data,
+ add_sta_proc_queue);
struct sta_info *sta;
char name[20];
struct add_sta_proc_data *entry, *prev;
@@ -1952,9 +1953,11 @@
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void handle_wds_oper_queue(void *data)
+static void handle_wds_oper_queue(struct work_struct *work)
{
- local_info_t *local = data;
+ struct ap_data *ap = container_of(work, struct ap_data,
+ wds_oper_queue);
+ local_info_t *local = ap->local;
struct wds_oper_data *entry, *prev;
spin_lock_bh(&local->lock);
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ed00ebb..c19e686 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1645,9 +1645,9 @@
/* Called only as scheduled task after noticing card timeout in interrupt
* context */
-static void handle_reset_queue(void *data)
+static void handle_reset_queue(struct work_struct *work)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = container_of(work, local_info_t, reset_queue);
printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
prism2_hw_reset(local->dev);
@@ -2896,9 +2896,10 @@
/* Called only as a scheduled task when communications quality values should
* be updated. */
-static void handle_comms_qual_update(void *data)
+static void handle_comms_qual_update(struct work_struct *work)
{
- local_info_t *local = data;
+ local_info_t *local =
+ container_of(work, local_info_t, comms_qual_update);
prism2_update_comms_qual(local->dev);
}
@@ -3050,9 +3051,9 @@
}
-static void handle_set_tim_queue(void *data)
+static void handle_set_tim_queue(struct work_struct *work)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = container_of(work, local_info_t, set_tim_queue);
struct set_tim_data *entry;
u16 val;
@@ -3209,15 +3210,15 @@
local->scan_channel_mask = 0xffff;
/* Initialize task queue structures */
- INIT_WORK(&local->reset_queue, handle_reset_queue, local);
+ INIT_WORK(&local->reset_queue, handle_reset_queue);
INIT_WORK(&local->set_multicast_list_queue,
- hostap_set_multicast_list_queue, local->dev);
+ hostap_set_multicast_list_queue);
- INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local);
+ INIT_WORK(&local->set_tim_queue, handle_set_tim_queue);
INIT_LIST_HEAD(&local->set_tim_list);
spin_lock_init(&local->set_tim_lock);
- INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local);
+ INIT_WORK(&local->comms_qual_update, handle_comms_qual_update);
/* Initialize tasklets for handling hardware IRQ related operations
* outside hw IRQ handler */
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 50f72d8..5fd2b1a 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -474,9 +474,9 @@
/* Called only as scheduled task after receiving info frames (used to avoid
* pending too much time in HW IRQ handler). */
-static void handle_info_queue(void *data)
+static void handle_info_queue(struct work_struct *work)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = container_of(work, local_info_t, info_queue);
if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
&local->pending_info))
@@ -493,7 +493,7 @@
{
skb_queue_head_init(&local->info_list);
#ifndef PRISM2_NO_STATION_MODES
- INIT_WORK(&local->info_queue, handle_info_queue, local);
+ INIT_WORK(&local->info_queue, handle_info_queue);
#endif /* PRISM2_NO_STATION_MODES */
}
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 53374fc..0796be9 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -767,14 +767,14 @@
/* TODO: to be further implemented as soon as Prism2 fully supports
* GroupAddresses and correct documentation is available */
-void hostap_set_multicast_list_queue(void *data)
+void hostap_set_multicast_list_queue(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *) data;
+ local_info_t *local =
+ container_of(work, local_info_t, set_multicast_list_queue);
+ struct net_device *dev = local->dev;
struct hostap_interface *iface;
- local_info_t *local;
iface = netdev_priv(dev);
- local = iface->local;
if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
local->is_promisc)) {
printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 79607b8..1bcd352 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -316,7 +316,7 @@
struct ipw2100_fw *fw);
static int ipw2100_ucode_download(struct ipw2100_priv *priv,
struct ipw2100_fw *fw);
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv);
+static void ipw2100_wx_event_work(struct work_struct *work);
static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
static struct iw_handler_def ipw2100_wx_handler_def;
@@ -679,7 +679,8 @@
queue_delayed_work(priv->workqueue, &priv->reset_work,
priv->reset_backoff * HZ);
else
- queue_work(priv->workqueue, &priv->reset_work);
+ queue_delayed_work(priv->workqueue, &priv->reset_work,
+ 0);
if (priv->reset_backoff < MAX_RESET_BACKOFF)
priv->reset_backoff++;
@@ -1873,8 +1874,10 @@
netif_stop_queue(priv->net_dev);
}
-static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
+static void ipw2100_reset_adapter(struct work_struct *work)
{
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, reset_work.work);
unsigned long flags;
union iwreq_data wrqu = {
.ap_addr = {
@@ -2071,9 +2074,9 @@
return;
if (priv->status & STATUS_SECURITY_UPDATED)
- queue_work(priv->workqueue, &priv->security_work);
+ queue_delayed_work(priv->workqueue, &priv->security_work, 0);
- queue_work(priv->workqueue, &priv->wx_event_work);
+ queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
}
static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -5524,8 +5527,11 @@
return err;
}
-static void ipw2100_security_work(struct ipw2100_priv *priv)
+static void ipw2100_security_work(struct work_struct *work)
{
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, security_work.work);
+
/* If we happen to have reconnected before we get a chance to
* process this, then update the security settings--which causes
* a disassociation to occur */
@@ -5748,7 +5754,7 @@
priv->reset_backoff = 0;
mutex_unlock(&priv->action_mutex);
- ipw2100_reset_adapter(priv);
+ ipw2100_reset_adapter(&priv->reset_work.work);
return 0;
done:
@@ -5910,9 +5916,10 @@
.get_drvinfo = ipw_ethtool_get_drvinfo,
};
-static void ipw2100_hang_check(void *adapter)
+static void ipw2100_hang_check(struct work_struct *work)
{
- struct ipw2100_priv *priv = adapter;
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, hang_check.work);
unsigned long flags;
u32 rtc = 0xa5a5a5a5;
u32 len = sizeof(rtc);
@@ -5952,9 +5959,10 @@
spin_unlock_irqrestore(&priv->low_lock, flags);
}
-static void ipw2100_rf_kill(void *adapter)
+static void ipw2100_rf_kill(struct work_struct *work)
{
- struct ipw2100_priv *priv = adapter;
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, rf_kill.work);
unsigned long flags;
spin_lock_irqsave(&priv->low_lock, flags);
@@ -6103,14 +6111,11 @@
priv->workqueue = create_workqueue(DRV_NAME);
- INIT_WORK(&priv->reset_work,
- (void (*)(void *))ipw2100_reset_adapter, priv);
- INIT_WORK(&priv->security_work,
- (void (*)(void *))ipw2100_security_work, priv);
- INIT_WORK(&priv->wx_event_work,
- (void (*)(void *))ipw2100_wx_event_work, priv);
- INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
- INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
+ INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
+ INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
+ INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
+ INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
+ INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
ipw2100_irq_tasklet, (unsigned long)priv);
@@ -8281,8 +8286,10 @@
.get_wireless_stats = ipw2100_wx_wireless_stats,
};
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
+static void ipw2100_wx_event_work(struct work_struct *work)
{
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, wx_event_work.work);
union iwreq_data wrqu;
int len = ETH_ALEN;
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
index 55b7227..de7d384 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2100.h
@@ -583,11 +583,11 @@
struct tasklet_struct irq_tasklet;
struct workqueue_struct *workqueue;
- struct work_struct reset_work;
- struct work_struct security_work;
- struct work_struct wx_event_work;
- struct work_struct hang_check;
- struct work_struct rf_kill;
+ struct delayed_work reset_work;
+ struct delayed_work security_work;
+ struct delayed_work wx_event_work;
+ struct delayed_work hang_check;
+ struct delayed_work rf_kill;
u32 interrupts;
int tx_interrupts;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index c692d01..e82e56b 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -187,9 +187,9 @@
static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
static void ipw_rx_queue_replenish(void *);
static int ipw_up(struct ipw_priv *);
-static void ipw_bg_up(void *);
+static void ipw_bg_up(struct work_struct *work);
static void ipw_down(struct ipw_priv *);
-static void ipw_bg_down(void *);
+static void ipw_bg_down(struct work_struct *work);
static int ipw_config(struct ipw_priv *);
static int init_supported_rates(struct ipw_priv *priv,
struct ipw_supported_rates *prates);
@@ -862,11 +862,12 @@
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipw_bg_led_link_on(void *data)
+static void ipw_bg_led_link_on(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, led_link_on.work);
mutex_lock(&priv->mutex);
- ipw_led_link_on(data);
+ ipw_led_link_on(priv);
mutex_unlock(&priv->mutex);
}
@@ -906,11 +907,12 @@
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipw_bg_led_link_off(void *data)
+static void ipw_bg_led_link_off(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, led_link_off.work);
mutex_lock(&priv->mutex);
- ipw_led_link_off(data);
+ ipw_led_link_off(priv);
mutex_unlock(&priv->mutex);
}
@@ -985,11 +987,12 @@
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipw_bg_led_activity_off(void *data)
+static void ipw_bg_led_activity_off(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, led_act_off.work);
mutex_lock(&priv->mutex);
- ipw_led_activity_off(data);
+ ipw_led_activity_off(priv);
mutex_unlock(&priv->mutex);
}
@@ -2228,11 +2231,12 @@
}
}
-static void ipw_bg_adapter_restart(void *data)
+static void ipw_bg_adapter_restart(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, adapter_restart);
mutex_lock(&priv->mutex);
- ipw_adapter_restart(data);
+ ipw_adapter_restart(priv);
mutex_unlock(&priv->mutex);
}
@@ -2249,11 +2253,12 @@
}
}
-static void ipw_bg_scan_check(void *data)
+static void ipw_bg_scan_check(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, scan_check.work);
mutex_lock(&priv->mutex);
- ipw_scan_check(data);
+ ipw_scan_check(priv);
mutex_unlock(&priv->mutex);
}
@@ -3831,17 +3836,19 @@
return 1;
}
-static void ipw_bg_disassociate(void *data)
+static void ipw_bg_disassociate(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, disassociate);
mutex_lock(&priv->mutex);
- ipw_disassociate(data);
+ ipw_disassociate(priv);
mutex_unlock(&priv->mutex);
}
-static void ipw_system_config(void *data)
+static void ipw_system_config(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, system_config);
#ifdef CONFIG_IPW2200_PROMISCUOUS
if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
@@ -4208,11 +4215,12 @@
IPW_STATS_INTERVAL);
}
-static void ipw_bg_gather_stats(void *data)
+static void ipw_bg_gather_stats(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, gather_stats.work);
mutex_lock(&priv->mutex);
- ipw_gather_stats(data);
+ ipw_gather_stats(priv);
mutex_unlock(&priv->mutex);
}
@@ -4268,8 +4276,8 @@
if (!(priv->status & STATUS_ROAMING)) {
priv->status |= STATUS_ROAMING;
if (!(priv->status & STATUS_SCANNING))
- queue_work(priv->workqueue,
- &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
}
return;
}
@@ -4607,8 +4615,8 @@
#ifdef CONFIG_IPW2200_MONITOR
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
priv->status |= STATUS_SCAN_FORCED;
- queue_work(priv->workqueue,
- &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
break;
}
priv->status &= ~STATUS_SCAN_FORCED;
@@ -4631,8 +4639,8 @@
/* Don't schedule if we aborted the scan */
priv->status &= ~STATUS_ROAMING;
} else if (priv->status & STATUS_SCAN_PENDING)
- queue_work(priv->workqueue,
- &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
else if (priv->config & CFG_BACKGROUND_SCAN
&& priv->status & STATUS_ASSOCIATED)
queue_delayed_work(priv->workqueue,
@@ -5055,11 +5063,12 @@
ipw_rx_queue_restock(priv);
}
-static void ipw_bg_rx_queue_replenish(void *data)
+static void ipw_bg_rx_queue_replenish(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, rx_replenish);
mutex_lock(&priv->mutex);
- ipw_rx_queue_replenish(data);
+ ipw_rx_queue_replenish(priv);
mutex_unlock(&priv->mutex);
}
@@ -5489,9 +5498,10 @@
return 1;
}
-static void ipw_merge_adhoc_network(void *data)
+static void ipw_merge_adhoc_network(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, merge_networks);
struct ieee80211_network *network = NULL;
struct ipw_network_match match = {
.network = priv->assoc_network
@@ -5948,11 +5958,12 @@
priv->assoc_request.beacon_interval);
}
-static void ipw_bg_adhoc_check(void *data)
+static void ipw_bg_adhoc_check(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, adhoc_check.work);
mutex_lock(&priv->mutex);
- ipw_adhoc_check(data);
+ ipw_adhoc_check(priv);
mutex_unlock(&priv->mutex);
}
@@ -6299,19 +6310,26 @@
return err;
}
-static int ipw_request_passive_scan(struct ipw_priv *priv) {
- return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
-}
-
-static int ipw_request_scan(struct ipw_priv *priv) {
- return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
-}
-
-static void ipw_bg_abort_scan(void *data)
+static void ipw_request_passive_scan(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, request_passive_scan);
+ ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
+}
+
+static void ipw_request_scan(struct work_struct *work)
+{
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, request_scan.work);
+ ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
+}
+
+static void ipw_bg_abort_scan(struct work_struct *work)
+{
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, abort_scan);
mutex_lock(&priv->mutex);
- ipw_abort_scan(data);
+ ipw_abort_scan(priv);
mutex_unlock(&priv->mutex);
}
@@ -7084,9 +7102,10 @@
/*
* background support to run QoS activate functionality
*/
-static void ipw_bg_qos_activate(void *data)
+static void ipw_bg_qos_activate(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, qos_activate);
if (priv == NULL)
return;
@@ -7394,11 +7413,12 @@
priv->status &= ~STATUS_ROAMING;
}
-static void ipw_bg_roam(void *data)
+static void ipw_bg_roam(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, roam);
mutex_lock(&priv->mutex);
- ipw_roam(data);
+ ipw_roam(priv);
mutex_unlock(&priv->mutex);
}
@@ -7479,8 +7499,8 @@
&priv->request_scan,
SCAN_INTERVAL);
else
- queue_work(priv->workqueue,
- &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
}
return 0;
@@ -7491,11 +7511,12 @@
return 1;
}
-static void ipw_bg_associate(void *data)
+static void ipw_bg_associate(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, associate);
mutex_lock(&priv->mutex);
- ipw_associate(data);
+ ipw_associate(priv);
mutex_unlock(&priv->mutex);
}
@@ -9410,7 +9431,7 @@
IPW_DEBUG_WX("Start scan\n");
- queue_work(priv->workqueue, &priv->request_scan);
+ queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
return 0;
}
@@ -10547,11 +10568,12 @@
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipw_bg_rf_kill(void *data)
+static void ipw_bg_rf_kill(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, rf_kill.work);
mutex_lock(&priv->mutex);
- ipw_rf_kill(data);
+ ipw_rf_kill(priv);
mutex_unlock(&priv->mutex);
}
@@ -10582,11 +10604,12 @@
queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
}
-static void ipw_bg_link_up(void *data)
+static void ipw_bg_link_up(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, link_up);
mutex_lock(&priv->mutex);
- ipw_link_up(data);
+ ipw_link_up(priv);
mutex_unlock(&priv->mutex);
}
@@ -10606,15 +10629,16 @@
if (!(priv->status & STATUS_EXIT_PENDING)) {
/* Queue up another scan... */
- queue_work(priv->workqueue, &priv->request_scan);
+ queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
}
}
-static void ipw_bg_link_down(void *data)
+static void ipw_bg_link_down(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, link_down);
mutex_lock(&priv->mutex);
- ipw_link_down(data);
+ ipw_link_down(priv);
mutex_unlock(&priv->mutex);
}
@@ -10626,38 +10650,30 @@
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
- INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
- INIT_WORK(&priv->associate, ipw_bg_associate, priv);
- INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
- INIT_WORK(&priv->system_config, ipw_system_config, priv);
- INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
- INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
- INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
- INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
- INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
- INIT_WORK(&priv->request_scan,
- (void (*)(void *))ipw_request_scan, priv);
- INIT_WORK(&priv->request_passive_scan,
- (void (*)(void *))ipw_request_passive_scan, priv);
- INIT_WORK(&priv->gather_stats,
- (void (*)(void *))ipw_bg_gather_stats, priv);
- INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
- INIT_WORK(&priv->roam, ipw_bg_roam, priv);
- INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
- INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
- INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
- INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
- priv);
- INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
- priv);
- INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
- priv);
- INIT_WORK(&priv->merge_networks,
- (void (*)(void *))ipw_merge_adhoc_network, priv);
+ INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
+ INIT_WORK(&priv->associate, ipw_bg_associate);
+ INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
+ INIT_WORK(&priv->system_config, ipw_system_config);
+ INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
+ INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
+ INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
+ INIT_WORK(&priv->up, ipw_bg_up);
+ INIT_WORK(&priv->down, ipw_bg_down);
+ INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
+ INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
+ INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
+ INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
+ INIT_WORK(&priv->roam, ipw_bg_roam);
+ INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
+ INIT_WORK(&priv->link_up, ipw_bg_link_up);
+ INIT_WORK(&priv->link_down, ipw_bg_link_down);
+ INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
+ INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
+ INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
+ INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
#ifdef CONFIG_IPW2200_QOS
- INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
- priv);
+ INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
#endif /* CONFIG_IPW2200_QOS */
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@ -11190,7 +11206,8 @@
/* If configure to try and auto-associate, kick
* off a scan. */
- queue_work(priv->workqueue, &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
return 0;
}
@@ -11211,11 +11228,12 @@
return -EIO;
}
-static void ipw_bg_up(void *data)
+static void ipw_bg_up(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, up);
mutex_lock(&priv->mutex);
- ipw_up(data);
+ ipw_up(priv);
mutex_unlock(&priv->mutex);
}
@@ -11282,11 +11300,12 @@
ipw_led_radio_off(priv);
}
-static void ipw_bg_down(void *data)
+static void ipw_bg_down(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, down);
mutex_lock(&priv->mutex);
- ipw_down(data);
+ ipw_down(priv);
mutex_unlock(&priv->mutex);
}
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index dad5eed..626a240 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1290,21 +1290,21 @@
struct workqueue_struct *workqueue;
- struct work_struct adhoc_check;
+ struct delayed_work adhoc_check;
struct work_struct associate;
struct work_struct disassociate;
struct work_struct system_config;
struct work_struct rx_replenish;
- struct work_struct request_scan;
+ struct delayed_work request_scan;
struct work_struct request_passive_scan;
struct work_struct adapter_restart;
- struct work_struct rf_kill;
+ struct delayed_work rf_kill;
struct work_struct up;
struct work_struct down;
- struct work_struct gather_stats;
+ struct delayed_work gather_stats;
struct work_struct abort_scan;
struct work_struct roam;
- struct work_struct scan_check;
+ struct delayed_work scan_check;
struct work_struct link_up;
struct work_struct link_down;
@@ -1319,9 +1319,9 @@
u32 led_ofdm_on;
u32 led_ofdm_off;
- struct work_struct led_link_on;
- struct work_struct led_link_off;
- struct work_struct led_act_off;
+ struct delayed_work led_link_on;
+ struct delayed_work led_link_off;
+ struct delayed_work led_act_off;
struct work_struct merge_networks;
struct ipw_cmd_log *cmdlog;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 336caba..936c888 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -980,9 +980,11 @@
}
/* Search scan results for requested BSSID, join it if found */
-static void orinoco_join_ap(struct net_device *dev)
+static void orinoco_join_ap(struct work_struct *work)
{
- struct orinoco_private *priv = netdev_priv(dev);
+ struct orinoco_private *priv =
+ container_of(work, struct orinoco_private, join_work);
+ struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
int err;
unsigned long flags;
@@ -1055,9 +1057,11 @@
}
/* Send new BSSID to userspace */
-static void orinoco_send_wevents(struct net_device *dev)
+static void orinoco_send_wevents(struct work_struct *work)
{
- struct orinoco_private *priv = netdev_priv(dev);
+ struct orinoco_private *priv =
+ container_of(work, struct orinoco_private, wevent_work);
+ struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
union iwreq_data wrqu;
int err;
@@ -1864,9 +1868,11 @@
/* This must be called from user context, without locks held - use
* schedule_work() */
-static void orinoco_reset(struct net_device *dev)
+static void orinoco_reset(struct work_struct *work)
{
- struct orinoco_private *priv = netdev_priv(dev);
+ struct orinoco_private *priv =
+ container_of(work, struct orinoco_private, reset_work);
+ struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
int err;
unsigned long flags;
@@ -2434,9 +2440,9 @@
priv->hw_unavailable = 1; /* orinoco_init() must clear this
* before anything else touches the
* hardware */
- INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
- INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev);
- INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev);
+ INIT_WORK(&priv->reset_work, orinoco_reset);
+ INIT_WORK(&priv->join_work, orinoco_join_ap);
+ INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
netif_carrier_off(dev);
priv->last_linkstatus = 0xffff;
@@ -3608,7 +3614,7 @@
printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
/* Firmware reset */
- orinoco_reset(dev);
+ orinoco_reset(&priv->reset_work);
} else {
printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
@@ -4154,7 +4160,7 @@
return 0;
if (priv->broken_disableport) {
- orinoco_reset(dev);
+ orinoco_reset(&priv->reset_work);
return 0;
}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 4a20e45..a87eb51 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -157,8 +157,9 @@
* schedule_work(), thus we can as well use sleeping semaphore
* locking */
void
-prism54_update_stats(islpci_private *priv)
+prism54_update_stats(struct work_struct *work)
{
+ islpci_private *priv = container_of(work, islpci_private, stats_work);
char *data;
int j;
struct obj_bss bss, *bss2;
@@ -2493,9 +2494,10 @@
* interrupt context, no locks held.
*/
void
-prism54_process_trap(void *data)
+prism54_process_trap(struct work_struct *work)
{
- struct islpci_mgmtframe *frame = data;
+ struct islpci_mgmtframe *frame =
+ container_of(work, struct islpci_mgmtframe, ws);
struct net_device *ndev = frame->ndev;
enum oid_num_t n = mgt_oidtonum(frame->header->oid);
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index e8183d3..bcfbfb9 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -31,12 +31,12 @@
void prism54_mib_init(islpci_private *);
struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
-void prism54_update_stats(islpci_private *);
+void prism54_update_stats(struct work_struct *);
void prism54_acl_init(struct islpci_acl *);
void prism54_acl_clean(struct islpci_acl *);
-void prism54_process_trap(void *);
+void prism54_process_trap(struct work_struct *);
void prism54_wpa_bss_ie_init(islpci_private *priv);
void prism54_wpa_bss_ie_clean(islpci_private *priv);
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 1e0603c..f057fd9 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -860,11 +860,10 @@
priv->state_off = 1;
/* initialize workqueue's */
- INIT_WORK(&priv->stats_work,
- (void (*)(void *)) prism54_update_stats, priv);
+ INIT_WORK(&priv->stats_work, prism54_update_stats);
priv->stats_timestamp = 0;
- INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv);
+ INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
priv->reset_task_pending = 0;
/* allocate various memory areas */
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 676d838..b112291 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -480,9 +480,9 @@
}
void
-islpci_do_reset_and_wake(void *data)
+islpci_do_reset_and_wake(struct work_struct *work)
{
- islpci_private *priv = data;
+ islpci_private *priv = container_of(work, islpci_private, reset_task);
islpci_reset(priv, 1);
priv->reset_task_pending = 0;
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 2678945..5bf820d 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -67,6 +67,6 @@
int islpci_eth_transmit(struct sk_buff *, struct net_device *);
int islpci_eth_receive(islpci_private *);
void islpci_eth_tx_timeout(struct net_device *);
-void islpci_do_reset_and_wake(void *data);
+void islpci_do_reset_and_wake(struct work_struct *);
#endif /* _ISL_GEN_H */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 036a875..2246f79 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -386,7 +386,7 @@
/* Create work to handle trap out of interrupt
* context. */
- INIT_WORK(&frame->ws, prism54_process_trap, frame);
+ INIT_WORK(&frame->ws, prism54_process_trap);
schedule_work(&frame->ws);
} else {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 2696f95..f1573a9 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -32,8 +32,8 @@
static void ieee_init(struct ieee80211_device *ieee);
static void softmac_init(struct ieee80211softmac_device *sm);
-static void set_rts_cts_work(void *d);
-static void set_basic_rates_work(void *d);
+static void set_rts_cts_work(struct work_struct *work);
+static void set_basic_rates_work(struct work_struct *work);
static void housekeeping_init(struct zd_mac *mac);
static void housekeeping_enable(struct zd_mac *mac);
@@ -48,8 +48,8 @@
memset(mac, 0, sizeof(*mac));
spin_lock_init(&mac->lock);
mac->netdev = netdev;
- INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac);
- INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac);
+ INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
+ INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work);
ieee_init(ieee);
softmac_init(ieee80211_priv(netdev));
@@ -366,9 +366,10 @@
spin_unlock_irqrestore(&mac->lock, flags);
}
-static void set_rts_cts_work(void *d)
+static void set_rts_cts_work(struct work_struct *work)
{
- struct zd_mac *mac = d;
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, set_rts_cts_work.work);
unsigned long flags;
u8 rts_rate;
unsigned int short_preamble;
@@ -387,9 +388,10 @@
try_enable_tx(mac);
}
-static void set_basic_rates_work(void *d)
+static void set_basic_rates_work(struct work_struct *work)
{
- struct zd_mac *mac = d;
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, set_basic_rates_work.work);
unsigned long flags;
u16 basic_rates;
@@ -467,12 +469,13 @@
if (need_set_rts_cts && !mac->updating_rts_rate) {
mac->updating_rts_rate = 1;
netif_stop_queue(mac->netdev);
- queue_work(zd_workqueue, &mac->set_rts_cts_work);
+ queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0);
}
if (need_set_rates && !mac->updating_basic_rates) {
mac->updating_basic_rates = 1;
netif_stop_queue(mac->netdev);
- queue_work(zd_workqueue, &mac->set_basic_rates_work);
+ queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work,
+ 0);
}
spin_unlock_irqrestore(&mac->lock, flags);
}
@@ -1182,9 +1185,10 @@
#define LINK_LED_WORK_DELAY HZ
-static void link_led_handler(void *p)
+static void link_led_handler(struct work_struct *work)
{
- struct zd_mac *mac = p;
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, housekeeping.link_led_work.work);
struct zd_chip *chip = &mac->chip;
struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
int is_associated;
@@ -1205,7 +1209,7 @@
static void housekeeping_init(struct zd_mac *mac)
{
- INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac);
+ INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
}
static void housekeeping_enable(struct zd_mac *mac)
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 5dcfb25..d4e8b87 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -119,7 +119,7 @@
#define ZD_RX_ERROR 0x80
struct housekeeping {
- struct work_struct link_led_work;
+ struct delayed_work link_led_work;
};
#define ZD_MAC_STATS_BUFFER_SIZE 16
@@ -133,8 +133,8 @@
struct iw_statistics iw_stats;
struct housekeeping housekeeping;
- struct work_struct set_rts_cts_work;
- struct work_struct set_basic_rates_work;
+ struct delayed_work set_rts_cts_work;
+ struct delayed_work set_basic_rates_work;
unsigned int stats_count;
u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index fc4bc9b..a83c3db 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -29,7 +29,7 @@
struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
-static void wq_sync_buffer(void *);
+static void wq_sync_buffer(struct work_struct *work);
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
static int work_enabled;
@@ -65,7 +65,7 @@
b->sample_received = 0;
b->sample_lost_overflow = 0;
b->cpu = i;
- INIT_WORK(&b->work, wq_sync_buffer, b);
+ INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
}
return 0;
@@ -282,9 +282,10 @@
* By using schedule_delayed_work_on and then schedule_delayed_work
* we guarantee this will stay on the correct cpu
*/
-static void wq_sync_buffer(void * data)
+static void wq_sync_buffer(struct work_struct *work)
{
- struct oprofile_cpu_buffer * b = data;
+ struct oprofile_cpu_buffer * b =
+ container_of(work, struct oprofile_cpu_buffer, work.work);
if (b->cpu != smp_processor_id()) {
printk("WQ on CPU%d, prefer CPU%d\n",
smp_processor_id(), b->cpu);
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 09abb80..49900d9 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -43,7 +43,7 @@
unsigned long sample_lost_overflow;
unsigned long backtrace_aborted;
int cpu;
- struct work_struct work;
+ struct delayed_work work;
} ____cacheline_aligned;
extern struct oprofile_cpu_buffer cpu_buffer[];
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index ea2087c..5075769 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -70,7 +70,7 @@
struct hotplug_slot *hotplug_slot;
struct list_head slot_list;
char name[SLOT_NAME_SIZE];
- struct work_struct work; /* work for button event */
+ struct delayed_work work; /* work for button event */
struct mutex lock;
};
@@ -187,7 +187,7 @@
extern int shpchp_unconfigure_device(struct slot *p_slot);
extern void shpchp_remove_ctrl_files(struct controller *ctrl);
extern void cleanup_slots(struct controller *ctrl);
-extern void queue_pushbutton_work(void *data);
+extern void queue_pushbutton_work(struct work_struct *work);
#ifdef CONFIG_ACPI
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 235c18a..4eac85b 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -159,7 +159,7 @@
goto error_info;
slot->number = sun;
- INIT_WORK(&slot->work, queue_pushbutton_work, slot);
+ INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work);
/* register this slot with the hotplug pci core */
hotplug_slot->private = slot;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index c39901d..158ac78 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -36,7 +36,7 @@
#include "../pci.h"
#include "shpchp.h"
-static void interrupt_event_handler(void *data);
+static void interrupt_event_handler(struct work_struct *work);
static int shpchp_enable_slot(struct slot *p_slot);
static int shpchp_disable_slot(struct slot *p_slot);
@@ -50,7 +50,7 @@
info->event_type = event_type;
info->p_slot = p_slot;
- INIT_WORK(&info->work, interrupt_event_handler, info);
+ INIT_WORK(&info->work, interrupt_event_handler);
schedule_work(&info->work);
@@ -408,9 +408,10 @@
* Handles all pending events and exits.
*
*/
-static void shpchp_pushbutton_thread(void *data)
+static void shpchp_pushbutton_thread(struct work_struct *work)
{
- struct pushbutton_work_info *info = data;
+ struct pushbutton_work_info *info =
+ container_of(work, struct pushbutton_work_info, work);
struct slot *p_slot = info->p_slot;
mutex_lock(&p_slot->lock);
@@ -436,9 +437,9 @@
kfree(info);
}
-void queue_pushbutton_work(void *data)
+void queue_pushbutton_work(struct work_struct *work)
{
- struct slot *p_slot = data;
+ struct slot *p_slot = container_of(work, struct slot, work.work);
struct pushbutton_work_info *info;
info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -447,7 +448,7 @@
return;
}
info->p_slot = p_slot;
- INIT_WORK(&info->work, shpchp_pushbutton_thread, info);
+ INIT_WORK(&info->work, shpchp_pushbutton_thread);
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
@@ -541,9 +542,9 @@
}
}
-static void interrupt_event_handler(void *data)
+static void interrupt_event_handler(struct work_struct *work)
{
- struct event_info *info = data;
+ struct event_info *info = container_of(work, struct event_info, work);
struct slot *p_slot = info->p_slot;
mutex_lock(&p_slot->lock);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 04c43ef..55866b6 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -160,7 +160,7 @@
rpc->e_lock = SPIN_LOCK_UNLOCKED;
rpc->rpd = dev;
- INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev);
+ INIT_WORK(&rpc->dpc_handler, aer_isr);
rpc->prod_idx = rpc->cons_idx = 0;
mutex_init(&rpc->rpc_mutex);
init_waitqueue_head(&rpc->wait_release);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index daf0cad..3c0a58f 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -118,7 +118,7 @@
extern void aer_enable_rootport(struct aer_rpc *rpc);
extern void aer_delete_rootport(struct aer_rpc *rpc);
extern int aer_init(struct pcie_device *dev);
-extern void aer_isr(void *context);
+extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
extern int aer_osc_setup(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 1c7e660..08e1303 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -690,14 +690,14 @@
/**
* aer_isr - consume errors detected by root port
- * @context: pointer to a private data of pcie device
+ * @work: definition of this work item
*
* Invoked, as DPC, when root port records new detected error
**/
-void aer_isr(void *context)
+void aer_isr(struct work_struct *work)
{
- struct pcie_device *p_device = (struct pcie_device *) context;
- struct aer_rpc *rpc = get_service_data(p_device);
+ struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
+ struct pcie_device *p_device = rpc->rpd;
struct aer_err_source *e_src;
mutex_lock(&rpc->rpc_mutex);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 45df12e..7355eb4 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -675,9 +675,10 @@
}
-static void pcmcia_delayed_add_device(void *data)
+static void pcmcia_delayed_add_device(struct work_struct *work)
{
- struct pcmcia_socket *s = data;
+ struct pcmcia_socket *s =
+ container_of(work, struct pcmcia_socket, device_add);
ds_dbg(1, "adding additional device to %d\n", s->sock);
pcmcia_device_add(s, s->pcmcia_state.mfc_pfc);
s->pcmcia_state.device_add_pending = 0;
@@ -1349,7 +1350,7 @@
init_waitqueue_head(&socket->queue);
#endif
INIT_LIST_HEAD(&socket->devices_list);
- INIT_WORK(&socket->device_add, pcmcia_delayed_add_device, socket);
+ INIT_WORK(&socket->device_add, pcmcia_delayed_add_device);
memset(&socket->pcmcia_state, 0, sizeof(u8));
socket->device_count = 0;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 814b9e1..828b329 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -53,9 +53,10 @@
* Routine to poll RTC seconds field for change as often as possible,
* after first RTC_UIE use timer to reduce polling
*/
-static void rtc_uie_task(void *data)
+static void rtc_uie_task(struct work_struct *work)
{
- struct rtc_device *rtc = data;
+ struct rtc_device *rtc =
+ container_of(work, struct rtc_device, uie_task);
struct rtc_time tm;
int num = 0;
int err;
@@ -411,7 +412,7 @@
spin_lock_init(&rtc->irq_lock);
init_waitqueue_head(&rtc->irq_queue);
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
- INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc);
+ INIT_WORK(&rtc->uie_task, rtc_uie_task);
setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
#endif
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index a6aa910..bb3cb33 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -849,7 +849,7 @@
hostdata->issue_queue = NULL;
hostdata->disconnected_queue = NULL;
- INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata);
+ INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
#ifdef NCR5380_STATS
for (i = 0; i < 8; ++i) {
@@ -1016,7 +1016,7 @@
/* Run the coroutine if it isn't already running. */
/* Kick off command processing */
- schedule_work(&hostdata->coroutine);
+ schedule_delayed_work(&hostdata->coroutine, 0);
return 0;
}
@@ -1033,9 +1033,10 @@
* host lock and called routines may take the isa dma lock.
*/
-static void NCR5380_main(void *p)
+static void NCR5380_main(struct work_struct *work)
{
- struct NCR5380_hostdata *hostdata = p;
+ struct NCR5380_hostdata *hostdata =
+ container_of(work, struct NCR5380_hostdata, coroutine.work);
struct Scsi_Host *instance = hostdata->host;
Scsi_Cmnd *tmp, *prev;
int done;
@@ -1221,7 +1222,7 @@
} /* if BASR_IRQ */
spin_unlock_irqrestore(instance->host_lock, flags);
if(!done)
- schedule_work(&hostdata->coroutine);
+ schedule_delayed_work(&hostdata->coroutine, 0);
} while (!done);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 1bc73de..713a108 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -271,7 +271,7 @@
unsigned long time_expires; /* in jiffies, set prior to sleeping */
int select_time; /* timer in select for target response */
volatile Scsi_Cmnd *selecting;
- struct work_struct coroutine; /* our co-routine */
+ struct delayed_work coroutine; /* our co-routine */
#ifdef NCR5380_STATS
unsigned timebase; /* Base for time calcs */
long time_read[8]; /* time to do reads */
@@ -298,7 +298,7 @@
#ifndef DONT_USE_INTR
static irqreturn_t NCR5380_intr(int irq, void *dev_id);
#endif
-static void NCR5380_main(void *ptr);
+static void NCR5380_main(struct work_struct *work);
static void NCR5380_print_options(struct Scsi_Host *instance);
#ifdef NDEBUG
static void NCR5380_print_phase(struct Scsi_Host *instance);
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 306f46b..0cec742 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1443,7 +1443,7 @@
* Run service completions on the card with interrupts enabled.
*
*/
-static void run(void)
+static void run(struct work_struct *work)
{
struct aha152x_hostdata *hd;
@@ -1499,7 +1499,7 @@
HOSTDATA(shpnt)->service=1;
/* Poke the BH handler */
- INIT_WORK(&aha152x_tq, (void *) run, NULL);
+ INIT_WORK(&aha152x_tq, run);
schedule_work(&aha152x_tq);
}
DO_UNLOCK(flags);
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index e31f612..0464c18 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -36,7 +36,7 @@
int base_hi; /* Hi Base address for ECP-ISA chipset */
int mode; /* Transfer mode */
struct scsi_cmnd *cur_cmd; /* Current queued command */
- struct work_struct imm_tq; /* Polling interrupt stuff */
+ struct delayed_work imm_tq; /* Polling interrupt stuff */
unsigned long jstart; /* Jiffies at start */
unsigned failed:1; /* Failure flag */
unsigned dp:1; /* Data phase present */
@@ -733,9 +733,9 @@
* the scheduler's task queue to generate a stream of call-backs and
* complete the request when the drive is ready.
*/
-static void imm_interrupt(void *data)
+static void imm_interrupt(struct work_struct *work)
{
- imm_struct *dev = (imm_struct *) data;
+ imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
struct scsi_cmnd *cmd = dev->cur_cmd;
struct Scsi_Host *host = cmd->device->host;
unsigned long flags;
@@ -745,7 +745,6 @@
return;
}
if (imm_engine(dev, cmd)) {
- INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev);
schedule_delayed_work(&dev->imm_tq, 1);
return;
}
@@ -953,8 +952,7 @@
cmd->result = DID_ERROR << 16; /* default return code */
cmd->SCp.phase = 0; /* bus free */
- INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
- schedule_work(&dev->imm_tq);
+ schedule_delayed_work(&dev->imm_tq, 0);
imm_pb_claim(dev);
@@ -1225,7 +1223,7 @@
else
ports = 8;
- INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
+ INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt);
err = -ENOMEM;
host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2d83fbb..ccd4daf 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2307,7 +2307,7 @@
/**
* ipr_worker_thread - Worker thread
- * @data: ioa config struct
+ * @work: ioa config struct
*
* Called at task level from a work thread. This function takes care
* of adding and removing device from the mid-layer as configuration
@@ -2316,13 +2316,14 @@
* Return value:
* nothing
**/
-static void ipr_worker_thread(void *data)
+static void ipr_worker_thread(struct work_struct *work)
{
unsigned long lock_flags;
struct ipr_resource_entry *res;
struct scsi_device *sdev;
struct ipr_dump *dump;
- struct ipr_ioa_cfg *ioa_cfg = data;
+ struct ipr_ioa_cfg *ioa_cfg =
+ container_of(work, struct ipr_ioa_cfg, work_q);
u8 bus, target, lun;
int did_work;
@@ -7121,7 +7122,7 @@
INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
- INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
+ INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
ioa_cfg->sdt_state = INACTIVE;
if (ipr_enable_cache)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 5d88621..e11b23c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -719,9 +719,10 @@
return rc;
}
-static void iscsi_xmitworker(void *data)
+static void iscsi_xmitworker(struct work_struct *work)
{
- struct iscsi_conn *conn = data;
+ struct iscsi_conn *conn =
+ container_of(work, struct iscsi_conn, xmitwork);
int rc;
/*
* serialize Xmit worker on a per-connection basis.
@@ -1512,7 +1513,7 @@
if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
goto mgmtqueue_alloc_fail;
- INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_mtask used for the login/text sequences */
spin_lock_bh(&session->lock);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d977bd4..fb7df7b 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -647,10 +647,12 @@
* Discover process only interrogates devices in order to discover the
* domain.
*/
-static void sas_discover_domain(void *data)
+static void sas_discover_domain(struct work_struct *work)
{
int error = 0;
- struct asd_sas_port *port = data;
+ struct sas_discovery_event *ev =
+ container_of(work, struct sas_discovery_event, work);
+ struct asd_sas_port *port = ev->port;
sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
&port->disc.pending);
@@ -692,10 +694,12 @@
current->pid, error);
}
-static void sas_revalidate_domain(void *data)
+static void sas_revalidate_domain(struct work_struct *work)
{
int res = 0;
- struct asd_sas_port *port = data;
+ struct sas_discovery_event *ev =
+ container_of(work, struct sas_discovery_event, work);
+ struct asd_sas_port *port = ev->port;
sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
&port->disc.pending);
@@ -722,7 +726,7 @@
BUG_ON(ev >= DISC_NUM_EVENTS);
sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
- &disc->disc_work[ev], port->ha->core.shost);
+ &disc->disc_work[ev].work, port->ha->core.shost);
return 0;
}
@@ -737,13 +741,15 @@
{
int i;
- static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = {
+ static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
};
spin_lock_init(&disc->disc_event_lock);
disc->pending = 0;
- for (i = 0; i < DISC_NUM_EVENTS; i++)
- INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port);
+ for (i = 0; i < DISC_NUM_EVENTS; i++) {
+ INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
+ disc->disc_work[i].port = port;
+ }
}
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 19110ed..d83392e 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -31,7 +31,7 @@
BUG_ON(event >= HA_NUM_EVENTS);
sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
- &sas_ha->ha_events[event], sas_ha->core.shost);
+ &sas_ha->ha_events[event].work, sas_ha->core.shost);
}
static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
@@ -41,7 +41,7 @@
BUG_ON(event >= PORT_NUM_EVENTS);
sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
- &phy->port_events[event], ha->core.shost);
+ &phy->port_events[event].work, ha->core.shost);
}
static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
@@ -51,12 +51,12 @@
BUG_ON(event >= PHY_NUM_EVENTS);
sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
- &phy->phy_events[event], ha->core.shost);
+ &phy->phy_events[event].work, ha->core.shost);
}
int sas_init_events(struct sas_ha_struct *sas_ha)
{
- static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = {
+ static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
[HAE_RESET] = sas_hae_reset,
};
@@ -64,8 +64,10 @@
spin_lock_init(&sas_ha->event_lock);
- for (i = 0; i < HA_NUM_EVENTS; i++)
- INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha);
+ for (i = 0; i < HA_NUM_EVENTS; i++) {
+ INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
+ sas_ha->ha_events[i].ha = sas_ha;
+ }
sas_ha->notify_ha_event = notify_ha_event;
sas_ha->notify_port_event = notify_port_event;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 0fb347b..d65bc4e 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -65,9 +65,11 @@
/* ---------- HA events ---------- */
-void sas_hae_reset(void *data)
+void sas_hae_reset(struct work_struct *work)
{
- struct sas_ha_struct *ha = data;
+ struct sas_ha_event *ev =
+ container_of(work, struct sas_ha_event, work);
+ struct sas_ha_struct *ha = ev->ha;
sas_begin_event(HAE_RESET, &ha->event_lock,
&ha->pending);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index bffcee4..137d7e4 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -60,11 +60,11 @@
void sas_deform_port(struct asd_sas_phy *phy);
-void sas_porte_bytes_dmaed(void *);
-void sas_porte_broadcast_rcvd(void *);
-void sas_porte_link_reset_err(void *);
-void sas_porte_timer_event(void *);
-void sas_porte_hard_reset(void *);
+void sas_porte_bytes_dmaed(struct work_struct *work);
+void sas_porte_broadcast_rcvd(struct work_struct *work);
+void sas_porte_link_reset_err(struct work_struct *work);
+void sas_porte_timer_event(struct work_struct *work);
+void sas_porte_hard_reset(struct work_struct *work);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -75,7 +75,7 @@
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
-void sas_hae_reset(void *);
+void sas_hae_reset(struct work_struct *work);
static inline void sas_queue_event(int event, spinlock_t *lock,
unsigned long *pending,
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 9340cdb..b459c4b 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -30,9 +30,11 @@
/* ---------- Phy events ---------- */
-static void sas_phye_loss_of_signal(void *data)
+static void sas_phye_loss_of_signal(struct work_struct *work)
{
- struct asd_sas_phy *phy = data;
+ struct asd_sas_event *ev =
+ container_of(work, struct asd_sas_event, work);
+ struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
&phy->phy_events_pending);
@@ -40,18 +42,22 @@
sas_deform_port(phy);
}
-static void sas_phye_oob_done(void *data)
+static void sas_phye_oob_done(struct work_struct *work)
{
- struct asd_sas_phy *phy = data;
+ struct asd_sas_event *ev =
+ container_of(work, struct asd_sas_event, work);
+ struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
&phy->phy_events_pending);
phy->error = 0;
}
-static void sas_phye_oob_error(void *data)
+static void sas_phye_oob_error(struct work_struct *work)
{
- struct asd_sas_phy *phy = data;
+ struct asd_sas_event *ev =
+ container_of(work, struct asd_sas_event, work);
+ struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *i =
@@ -80,9 +86,11 @@
}
}
-static void sas_phye_spinup_hold(void *data)
+static void sas_phye_spinup_hold(struct work_struct *work)
{
- struct asd_sas_phy *phy = data;
+ struct asd_sas_event *ev =
+ container_of(work, struct asd_sas_event, work);
+ struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
@@ -100,14 +108,14 @@
{
int i;
- static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = {
+ static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
[PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
[PHYE_OOB_DONE] = sas_phye_oob_done,
[PHYE_OOB_ERROR] = sas_phye_oob_error,
[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
};
- static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = {
+ static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
[PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
[PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
[PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
@@ -122,13 +130,18 @@
phy->error = 0;
INIT_LIST_HEAD(&phy->port_phy_el);
- for (k = 0; k < PORT_NUM_EVENTS; k++)
- INIT_WORK(&phy->port_events[k], sas_port_event_fns[k],
- phy);
+ for (k = 0; k < PORT_NUM_EVENTS; k++) {
+ INIT_WORK(&phy->port_events[k].work,
+ sas_port_event_fns[k]);
+ phy->port_events[k].phy = phy;
+ }
- for (k = 0; k < PHY_NUM_EVENTS; k++)
- INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
- phy);
+ for (k = 0; k < PHY_NUM_EVENTS; k++) {
+ INIT_WORK(&phy->phy_events[k].work,
+ sas_phy_event_fns[k]);
+ phy->phy_events[k].phy = phy;
+ }
+
phy->port = NULL;
phy->ha = sas_ha;
spin_lock_init(&phy->frame_rcvd_lock);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 253cdcf..971c37c 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -181,9 +181,11 @@
/* ---------- SAS port events ---------- */
-void sas_porte_bytes_dmaed(void *data)
+void sas_porte_bytes_dmaed(struct work_struct *work)
{
- struct asd_sas_phy *phy = data;
+ struct asd_sas_event *ev =
+ container_of(work, struct asd_sas_event, work);
+ struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
&phy->port_events_pending);
@@ -191,11 +193,13 @@
sas_form_port(phy);
}
-void sas_porte_broadcast_rcvd(void *data)
+void sas_porte_broadcast_rcvd(struct work_struct *work)
{
+ struct asd_sas_event *ev =
+ container_of(work, struct asd_sas_event, work);
+ struct asd_sas_phy *phy = ev->phy;
unsigned long flags;
u32 prim;
- struct asd_sas_phy *phy = data;
sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
&phy->port_events_pending);
@@ -208,9 +212,11 @@
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
}
-void sas_porte_link_reset_err(void *data)
+void sas_porte_link_reset_err(struct work_struct *work)
{
- struct asd_sas_phy *phy = data;
+ struct asd_sas_event *ev =
+ container_of(work, struct asd_sas_event, work);
+ struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
&phy->port_events_pending);
@@ -218,9 +224,11 @@
sas_deform_port(phy);
}
-void sas_porte_timer_event(void *data)
+void sas_porte_timer_event(struct work_struct *work)
{
- struct asd_sas_phy *phy = data;
+ struct asd_sas_event *ev =
+ container_of(work, struct asd_sas_event, work);
+ struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
&phy->port_events_pending);
@@ -228,9 +236,11 @@
sas_deform_port(phy);
}
-void sas_porte_hard_reset(void *data)
+void sas_porte_hard_reset(struct work_struct *work)
{
- struct asd_sas_phy *phy = data;
+ struct asd_sas_event *ev =
+ container_of(work, struct asd_sas_event, work);
+ struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
&phy->port_events_pending);
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 89a2a9f..584ba4d 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -31,7 +31,7 @@
int base; /* Actual port address */
int mode; /* Transfer mode */
struct scsi_cmnd *cur_cmd; /* Current queued command */
- struct work_struct ppa_tq; /* Polling interrupt stuff */
+ struct delayed_work ppa_tq; /* Polling interrupt stuff */
unsigned long jstart; /* Jiffies at start */
unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */
unsigned int failed:1; /* Failure flag */
@@ -627,9 +627,9 @@
* the scheduler's task queue to generate a stream of call-backs and
* complete the request when the drive is ready.
*/
-static void ppa_interrupt(void *data)
+static void ppa_interrupt(struct work_struct *work)
{
- ppa_struct *dev = (ppa_struct *) data;
+ ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
struct scsi_cmnd *cmd = dev->cur_cmd;
if (!cmd) {
@@ -637,7 +637,6 @@
return;
}
if (ppa_engine(dev, cmd)) {
- dev->ppa_tq.data = (void *) dev;
schedule_delayed_work(&dev->ppa_tq, 1);
return;
}
@@ -822,8 +821,7 @@
cmd->result = DID_ERROR << 16; /* default return code */
cmd->SCp.phase = 0; /* bus free */
- dev->ppa_tq.data = dev;
- schedule_work(&dev->ppa_tq);
+ schedule_delayed_work(&dev->ppa_tq, 0);
ppa_pb_claim(dev);
@@ -1086,7 +1084,7 @@
else
ports = 8;
- INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev);
+ INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
err = -ENOMEM;
host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index db9d88e..969c9e4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -961,9 +961,10 @@
* the mid-level tries to sleep when it reaches the driver threshold
* "host->can_queue". This can cause a panic if we were in our interrupt code.
**/
-static void qla4xxx_do_dpc(void *data)
+static void qla4xxx_do_dpc(struct work_struct *work)
{
- struct scsi_qla_host *ha = (struct scsi_qla_host *) data;
+ struct scsi_qla_host *ha =
+ container_of(work, struct scsi_qla_host, dpc_work);
struct ddb_entry *ddb_entry, *dtemp;
DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
@@ -1253,7 +1254,7 @@
ret = -ENODEV;
goto probe_failed;
}
- INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha);
+ INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
ret = request_irq(pdev->irq, qla4xxx_intr_handler,
SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 4d65614..14e635a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -437,9 +437,10 @@
goto retry;
}
-static void scsi_target_reap_usercontext(void *data)
+static void scsi_target_reap_usercontext(struct work_struct *work)
{
- struct scsi_target *starget = data;
+ struct scsi_target *starget =
+ container_of(work, struct scsi_target, ew.work);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
unsigned long flags;
@@ -475,7 +476,7 @@
starget->state = STARGET_DEL;
spin_unlock_irqrestore(shost->host_lock, flags);
execute_in_process_context(scsi_target_reap_usercontext,
- starget, &starget->ew);
+ &starget->ew);
return;
}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e1a9166..259c90c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -218,16 +218,16 @@
put_device(&sdev->sdev_gendev);
}
-static void scsi_device_dev_release_usercontext(void *data)
+static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
- struct device *dev = data;
struct scsi_device *sdev;
struct device *parent;
struct scsi_target *starget;
unsigned long flags;
- parent = dev->parent;
- sdev = to_scsi_device(dev);
+ sdev = container_of(work, struct scsi_device, ew.work);
+
+ parent = sdev->sdev_gendev.parent;
starget = to_scsi_target(parent);
spin_lock_irqsave(sdev->host->host_lock, flags);
@@ -258,7 +258,7 @@
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
- execute_in_process_context(scsi_device_dev_release_usercontext, dev,
+ execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 38c215a..3571ce8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -241,9 +241,9 @@
#define FC_MGMTSRVR_PORTID 0x00000a
-static void fc_timeout_deleted_rport(void *data);
-static void fc_timeout_fail_rport_io(void *data);
-static void fc_scsi_scan_rport(void *data);
+static void fc_timeout_deleted_rport(struct work_struct *work);
+static void fc_timeout_fail_rport_io(struct work_struct *work);
+static void fc_scsi_scan_rport(struct work_struct *work);
/*
* Attribute counts pre object type...
@@ -1613,7 +1613,7 @@
* 1 on success / 0 already queued / < 0 for error
**/
static int
-fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
+fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
unsigned long delay)
{
if (unlikely(!fc_host_devloss_work_q(shost))) {
@@ -1625,9 +1625,6 @@
return -EINVAL;
}
- if (delay == 0)
- return queue_work(fc_host_devloss_work_q(shost), work);
-
return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
}
@@ -1712,12 +1709,13 @@
* fc_starget_delete - called to delete the scsi decendents of an rport
* (target and all sdevs)
*
- * @data: remote port to be operated on.
+ * @work: remote port to be operated on.
**/
static void
-fc_starget_delete(void *data)
+fc_starget_delete(struct work_struct *work)
{
- struct fc_rport *rport = (struct fc_rport *)data;
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, stgt_delete_work);
struct Scsi_Host *shost = rport_to_shost(rport);
unsigned long flags;
struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1751,12 +1749,13 @@
/**
* fc_rport_final_delete - finish rport termination and delete it.
*
- * @data: remote port to be deleted.
+ * @work: remote port to be deleted.
**/
static void
-fc_rport_final_delete(void *data)
+fc_rport_final_delete(struct work_struct *work)
{
- struct fc_rport *rport = (struct fc_rport *)data;
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, rport_delete_work);
struct device *dev = &rport->dev;
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1770,7 +1769,7 @@
/* Delete SCSI target and sdevs */
if (rport->scsi_target_id != -1)
- fc_starget_delete(data);
+ fc_starget_delete(&rport->stgt_delete_work);
else if (i->f->dev_loss_tmo_callbk)
i->f->dev_loss_tmo_callbk(rport);
else if (i->f->terminate_rport_io)
@@ -1829,11 +1828,11 @@
rport->channel = channel;
rport->fast_io_fail_tmo = -1;
- INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport);
- INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport);
- INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport);
- INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport);
- INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport);
+ INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
+ INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
+ INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
+ INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
+ INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
spin_lock_irqsave(shost->host_lock, flags);
@@ -1963,7 +1962,7 @@
}
if (match) {
- struct work_struct *work =
+ struct delayed_work *work =
&rport->dev_loss_work;
memcpy(&rport->node_name, &ids->node_name,
@@ -2267,12 +2266,13 @@
* was a SCSI target (thus was blocked), and failed
* to return in the alloted time.
*
- * @data: rport target that failed to reappear in the alloted time.
+ * @work: rport target that failed to reappear in the alloted time.
**/
static void
-fc_timeout_deleted_rport(void *data)
+fc_timeout_deleted_rport(struct work_struct *work)
{
- struct fc_rport *rport = (struct fc_rport *)data;
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, dev_loss_work.work);
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
unsigned long flags;
@@ -2366,15 +2366,16 @@
* fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
* disconnected SCSI target.
*
- * @data: rport to terminate io on.
+ * @work: rport to terminate io on.
*
* Notes: Only requests the failure of the io, not that all are flushed
* prior to returning.
**/
static void
-fc_timeout_fail_rport_io(void *data)
+fc_timeout_fail_rport_io(struct work_struct *work)
{
- struct fc_rport *rport = (struct fc_rport *)data;
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, fail_io_work.work);
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -2387,12 +2388,13 @@
/**
* fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
*
- * @data: remote port to be scanned.
+ * @work: remote port to be scanned.
**/
static void
-fc_scsi_scan_rport(void *data)
+fc_scsi_scan_rport(struct work_struct *work)
{
- struct fc_rport *rport = (struct fc_rport *)data;
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, scan_work);
struct Scsi_Host *shost = rport_to_shost(rport);
unsigned long flags;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9b25124..9c22f13 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -234,9 +234,11 @@
return 0;
}
-static void session_recovery_timedout(void *data)
+static void session_recovery_timedout(struct work_struct *work)
{
- struct iscsi_cls_session *session = data;
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ recovery_work.work);
dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
"out after %d secs\n", session->recovery_tmo);
@@ -276,7 +278,7 @@
session->transport = transport;
session->recovery_tmo = 120;
- INIT_WORK(&session->recovery_work, session_recovery_timedout, session);
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
INIT_LIST_HEAD(&session->host_list);
INIT_LIST_HEAD(&session->sess_list);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 9f070f0..3fded48 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -964,9 +964,10 @@
};
static void
-spi_dv_device_work_wrapper(void *data)
+spi_dv_device_work_wrapper(struct work_struct *work)
{
- struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+ struct work_queue_wrapper *wqw =
+ container_of(work, struct work_queue_wrapper, work);
struct scsi_device *sdev = wqw->sdev;
kfree(wqw);
@@ -1006,7 +1007,7 @@
return;
}
- INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw);
+ INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
wqw->sdev = sdev;
schedule_work(&wqw->work);
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 72025df..494d9b8 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -148,7 +148,7 @@
void (*cs_control)(u32 command);
};
-static void pump_messages(void *data);
+static void pump_messages(struct work_struct *work);
static int flush(struct driver_data *drv_data)
{
@@ -884,9 +884,10 @@
}
}
-static void pump_messages(void *data)
+static void pump_messages(struct work_struct *work)
{
- struct driver_data *drv_data = data;
+ struct driver_data *drv_data =
+ container_of(work, struct driver_data, pump_messages);
unsigned long flags;
/* Lock queue and check for queue work */
@@ -1098,7 +1099,7 @@
tasklet_init(&drv_data->pump_transfers,
pump_transfers, (unsigned long)drv_data);
- INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
+ INIT_WORK(&drv_data->pump_messages, pump_messages);
drv_data->workqueue = create_singlethread_workqueue(
drv_data->master->cdev.dev->bus_id);
if (drv_data->workqueue == NULL)
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index a23862e..08c1c57 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -265,9 +265,10 @@
* Drivers can provide word-at-a-time i/o primitives, or provide
* transfer-at-a-time ones to leverage dma or fifo hardware.
*/
-static void bitbang_work(void *_bitbang)
+static void bitbang_work(struct work_struct *work)
{
- struct spi_bitbang *bitbang = _bitbang;
+ struct spi_bitbang *bitbang =
+ container_of(work, struct spi_bitbang, work);
unsigned long flags;
spin_lock_irqsave(&bitbang->lock, flags);
@@ -456,7 +457,7 @@
if (!bitbang->master || !bitbang->chipselect)
return -EINVAL;
- INIT_WORK(&bitbang->work, bitbang_work, bitbang);
+ INIT_WORK(&bitbang->work, bitbang_work);
spin_lock_init(&bitbang->lock);
INIT_LIST_HEAD(&bitbang->queue);
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index e656563..3dfa3e4 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -158,7 +158,7 @@
const struct cxacru_modem_type *modem_type;
int line_status;
- struct work_struct poll_work;
+ struct delayed_work poll_work;
/* contol handles */
struct mutex cm_serialize;
@@ -347,7 +347,7 @@
return 0;
}
-static void cxacru_poll_status(struct cxacru_data *instance);
+static void cxacru_poll_status(struct work_struct *work);
static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
struct atm_dev *atm_dev)
@@ -376,12 +376,14 @@
}
/* Start status polling */
- cxacru_poll_status(instance);
+ cxacru_poll_status(&instance->poll_work.work);
return 0;
}
-static void cxacru_poll_status(struct cxacru_data *instance)
+static void cxacru_poll_status(struct work_struct *work)
{
+ struct cxacru_data *instance =
+ container_of(work, struct cxacru_data, poll_work.work);
u32 buf[CXINF_MAX] = {};
struct usbatm_data *usbatm = instance->usbatm;
struct atm_dev *atm_dev = usbatm->atm_dev;
@@ -720,7 +722,7 @@
mutex_init(&instance->cm_serialize);
- INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance);
+ INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);
usbatm_instance->driver_data = instance;
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index a823486..8ed6c75 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -142,7 +142,7 @@
struct speedtch_params params; /* set in probe, constant afterwards */
- struct work_struct status_checker;
+ struct delayed_work status_checker;
unsigned char last_status;
@@ -498,8 +498,11 @@
return ret;
}
-static void speedtch_check_status(struct speedtch_instance_data *instance)
+static void speedtch_check_status(struct work_struct *work)
{
+ struct speedtch_instance_data *instance =
+ container_of(work, struct speedtch_instance_data,
+ status_checker.work);
struct usbatm_data *usbatm = instance->usbatm;
struct atm_dev *atm_dev = usbatm->atm_dev;
unsigned char *buf = instance->scratch_buffer;
@@ -576,7 +579,7 @@
{
struct speedtch_instance_data *instance = (void *)data;
- schedule_work(&instance->status_checker);
+ schedule_delayed_work(&instance->status_checker, 0);
/* The following check is racy, but the race is harmless */
if (instance->poll_delay < MAX_POLL_DELAY)
@@ -596,7 +599,7 @@
if (int_urb) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
if (!ret)
- schedule_work(&instance->status_checker);
+ schedule_delayed_work(&instance->status_checker, 0);
else {
atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
@@ -640,7 +643,7 @@
if ((int_urb = instance->int_urb)) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
- schedule_work(&instance->status_checker);
+ schedule_delayed_work(&instance->status_checker, 0);
if (ret < 0) {
atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
goto fail;
@@ -855,7 +858,7 @@
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
- INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance);
+ INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
instance->status_checker.timer.function = speedtch_status_poll;
instance->status_checker.timer.data = (unsigned long)instance;
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index c137c04..f2d196f 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -655,9 +655,9 @@
/*
* The uea_load_page() function must be called within a process context
*/
-static void uea_load_page(void *xsc)
+static void uea_load_page(struct work_struct *work)
{
- struct uea_softc *sc = xsc;
+ struct uea_softc *sc = container_of(work, struct uea_softc, task);
u16 pageno = sc->pageno;
u16 ovl = sc->ovl;
struct block_info bi;
@@ -1348,7 +1348,7 @@
uea_enters(INS_TO_USBDEV(sc));
- INIT_WORK(&sc->task, uea_load_page, sc);
+ INIT_WORK(&sc->task, uea_load_page);
init_waitqueue_head(&sc->sync_q);
init_waitqueue_head(&sc->cmv_ack_wait);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ec3438d..7f1fa95 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -421,9 +421,9 @@
schedule_work(&acm->work);
}
-static void acm_softint(void *private)
+static void acm_softint(struct work_struct *work)
{
- struct acm *acm = private;
+ struct acm *acm = container_of(work, struct acm, work);
dbg("Entering acm_softint.");
if (!ACM_READY(acm))
@@ -927,7 +927,7 @@
acm->rx_buflimit = num_rx_buf;
acm->urb_task.func = acm_rx_tasklet;
acm->urb_task.data = (unsigned long) acm;
- INIT_WORK(&acm->work, acm_softint, acm);
+ INIT_WORK(&acm->work, acm_softint);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0ce393e..9be41ed 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -68,7 +68,7 @@
unsigned has_indicators:1;
u8 indicator[USB_MAXCHILDREN];
- struct work_struct leds;
+ struct delayed_work leds;
};
@@ -218,9 +218,10 @@
#define LED_CYCLE_PERIOD ((2*HZ)/3)
-static void led_work (void *__hub)
+static void led_work (struct work_struct *work)
{
- struct usb_hub *hub = __hub;
+ struct usb_hub *hub =
+ container_of(work, struct usb_hub, leds.work);
struct usb_device *hdev = hub->hdev;
unsigned i;
unsigned changed = 0;
@@ -405,9 +406,10 @@
* talking to TTs must queue control transfers (not just bulk and iso), so
* both can talk to the same hub concurrently.
*/
-static void hub_tt_kevent (void *arg)
+static void hub_tt_kevent (struct work_struct *work)
{
- struct usb_hub *hub = arg;
+ struct usb_hub *hub =
+ container_of(work, struct usb_hub, tt.kevent);
unsigned long flags;
spin_lock_irqsave (&hub->tt.lock, flags);
@@ -694,7 +696,7 @@
spin_lock_init (&hub->tt.lock);
INIT_LIST_HEAD (&hub->tt.clear_list);
- INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub);
+ INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
switch (hdev->descriptor.bDeviceProtocol) {
case 0:
break;
@@ -938,7 +940,7 @@
INIT_LIST_HEAD(&hub->event_list);
hub->intfdev = &intf->dev;
hub->hdev = hdev;
- INIT_WORK(&hub->leds, led_work, hub);
+ INIT_DELAYED_WORK(&hub->leds, led_work);
usb_set_intfdata (intf, hub);
intf->needs_remote_wakeup = 1;
@@ -2381,7 +2383,7 @@
/* hub LEDs are probably harder to miss than syslog */
if (hub->has_indicators) {
hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
- schedule_work (&hub->leds);
+ schedule_delayed_work (&hub->leds, 0);
}
}
kfree(qual);
@@ -2555,7 +2557,7 @@
if (hub->has_indicators) {
hub->indicator[port1-1] =
INDICATOR_AMBER_BLINK;
- schedule_work (&hub->leds);
+ schedule_delayed_work (&hub->leds, 0);
}
status = -ENOTCONN; /* Don't retry */
goto loop_disable;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 29b0fa9..7390b67 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1501,9 +1501,10 @@
};
/* Worker routine for usb_driver_set_configuration() */
-static void driver_set_config_work(void *_req)
+static void driver_set_config_work(struct work_struct *work)
{
- struct set_config_request *req = _req;
+ struct set_config_request *req =
+ container_of(work, struct set_config_request, work);
usb_lock_device(req->udev);
usb_set_configuration(req->udev, req->config);
@@ -1541,7 +1542,7 @@
return -ENOMEM;
req->udev = udev;
req->config = config;
- INIT_WORK(&req->work, driver_set_config_work, req);
+ INIT_WORK(&req->work, driver_set_config_work);
usb_get_dev(udev);
if (!schedule_work(&req->work)) {
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 81cb525..02426d0 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -203,9 +203,10 @@
#ifdef CONFIG_USB_SUSPEND
/* usb_autosuspend_work - callback routine to autosuspend a USB device */
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
{
- struct usb_device *udev = _udev;
+ struct usb_device *udev =
+ container_of(work, struct usb_device, autosuspend.work);
usb_pm_lock(udev);
udev->auto_pm = 1;
@@ -215,7 +216,7 @@
#else
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
{}
#endif /* CONFIG_USB_SUSPEND */
@@ -304,7 +305,7 @@
#ifdef CONFIG_PM
mutex_init(&dev->pm_mutex);
- INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev);
+ INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
#endif
return dev;
}
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 3bd1dfe..d15bf22 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -1833,9 +1833,9 @@
spin_unlock_irqrestore(&dev->req_lock, flags);
}
-static void eth_work (void *_dev)
+static void eth_work (struct work_struct *work)
{
- struct eth_dev *dev = _dev;
+ struct eth_dev *dev = container_of(work, struct eth_dev, work);
if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
if (netif_running (dev->net))
@@ -2398,7 +2398,7 @@
dev = netdev_priv(net);
spin_lock_init (&dev->lock);
spin_lock_init (&dev->req_lock);
- INIT_WORK (&dev->work, eth_work, dev);
+ INIT_WORK (&dev->work, eth_work);
INIT_LIST_HEAD (&dev->tx_reqs);
INIT_LIST_HEAD (&dev->rx_reqs);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index ef54e310b..a9d7119 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -163,7 +163,7 @@
u16 queue_next;
struct urb *urb_list[ENDP_QUEUE_SIZE];
struct list_head urb_more;
- struct work_struct scheduler;
+ struct delayed_work scheduler;
};
struct u132_ring {
unsigned in_use:1;
@@ -171,7 +171,7 @@
u8 number;
struct u132 *u132;
struct u132_endp *curr_endp;
- struct work_struct scheduler;
+ struct delayed_work scheduler;
};
#define OHCI_QUIRK_AMD756 0x01
#define OHCI_QUIRK_SUPERIO 0x02
@@ -198,7 +198,7 @@
u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
int flags;
unsigned long next_statechange;
- struct work_struct monitor;
+ struct delayed_work monitor;
int num_endpoints;
struct u132_addr addr[MAX_U132_ADDRS];
struct u132_udev udev[MAX_U132_UDEVS];
@@ -310,7 +310,7 @@
if (delta > 0) {
if (queue_delayed_work(workqueue, &ring->scheduler, delta))
return;
- } else if (queue_work(workqueue, &ring->scheduler))
+ } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
return;
kref_put(&u132->kref, u132_hcd_delete);
return;
@@ -389,12 +389,8 @@
static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(workqueue, &endp->scheduler, delta))
- kref_get(&endp->kref);
- } else if (queue_work(workqueue, &endp->scheduler))
- kref_get(&endp->kref);
- return;
+ if (queue_delayed_work(workqueue, &endp->scheduler, delta))
+ kref_get(&endp->kref);
}
static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
@@ -410,24 +406,14 @@
static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(workqueue, &u132->monitor, delta)) {
- kref_get(&u132->kref);
- }
- } else if (queue_work(workqueue, &u132->monitor))
- kref_get(&u132->kref);
- return;
+ if (queue_delayed_work(workqueue, &u132->monitor, delta))
+ kref_get(&u132->kref);
}
static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(workqueue, &u132->monitor, delta))
- return;
- } else if (queue_work(workqueue, &u132->monitor))
- return;
- kref_put(&u132->kref, u132_hcd_delete);
- return;
+ if (!queue_delayed_work(workqueue, &u132->monitor, delta))
+ kref_put(&u132->kref, u132_hcd_delete);
}
static void u132_monitor_cancel_work(struct u132 *u132)
@@ -489,9 +475,9 @@
return 0;
}
-static void u132_hcd_monitor_work(void *data)
+static void u132_hcd_monitor_work(struct work_struct *work)
{
- struct u132 *u132 = data;
+ struct u132 *u132 = container_of(work, struct u132, monitor.work);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
@@ -1315,15 +1301,14 @@
}
}
-static void u132_hcd_ring_work_scheduler(void *data);
-static void u132_hcd_endp_work_scheduler(void *data);
/*
* this work function is only executed from the work queue
*
*/
-static void u132_hcd_ring_work_scheduler(void *data)
+static void u132_hcd_ring_work_scheduler(struct work_struct *work)
{
- struct u132_ring *ring = data;
+ struct u132_ring *ring =
+ container_of(work, struct u132_ring, scheduler.work);
struct u132 *u132 = ring->u132;
down(&u132->scheduler_lock);
if (ring->in_use) {
@@ -1382,10 +1367,11 @@
}
}
-static void u132_hcd_endp_work_scheduler(void *data)
+static void u132_hcd_endp_work_scheduler(struct work_struct *work)
{
struct u132_ring *ring;
- struct u132_endp *endp = data;
+ struct u132_endp *endp =
+ container_of(work, struct u132_endp, scheduler.work);
struct u132 *u132 = endp->u132;
down(&u132->scheduler_lock);
ring = endp->ring;
@@ -1943,7 +1929,7 @@
if (!endp) {
return -ENOMEM;
}
- INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+ INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
@@ -2032,7 +2018,7 @@
if (!endp) {
return -ENOMEM;
}
- INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+ INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
endp->dequeueing = 0;
@@ -2117,7 +2103,7 @@
if (!endp) {
return -ENOMEM;
}
- INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+ INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
@@ -3096,10 +3082,10 @@
ring->number = rings + 1;
ring->length = 0;
ring->curr_endp = NULL;
- INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler,
- (void *)ring);
+ INIT_DELAYED_WORK(&ring->scheduler,
+ u132_hcd_ring_work_scheduler);
} down(&u132->sw_lock);
- INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132);
+ INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
while (ports-- > 0) {
struct u132_port *port = &u132->port[ports];
port->u132 = u132;
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index a49644b..4295bab 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -969,9 +969,10 @@
}
/* Workqueue routine to reset the device or clear a halt */
-static void hid_reset(void *_hid)
+static void hid_reset(struct work_struct *work)
{
- struct hid_device *hid = (struct hid_device *) _hid;
+ struct hid_device *hid =
+ container_of(work, struct hid_device, reset_work);
int rc_lock, rc = 0;
if (test_bit(HID_CLEAR_HALT, &hid->iofl)) {
@@ -2043,7 +2044,7 @@
init_waitqueue_head(&hid->wait);
- INIT_WORK(&hid->reset_work, hid_reset, hid);
+ INIT_WORK(&hid->reset_work, hid_reset);
setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&hid->inlock);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index ba30ca6..02cbb7f 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -76,7 +76,7 @@
char *urbdata; /* interrupt URB data buffer */
char *msgdata; /* control message data buffer */
- struct work_struct work;
+ struct delayed_work work;
int button_pressed;
spinlock_t lock;
};
@@ -117,7 +117,7 @@
case ACD_BTN_BRIGHT_UP:
case ACD_BTN_BRIGHT_DOWN:
pdata->button_pressed = 1;
- queue_work(wq, &pdata->work);
+ queue_delayed_work(wq, &pdata->work, 0);
break;
case ACD_BTN_NONE:
default:
@@ -184,9 +184,10 @@
.max_brightness = 0xFF
};
-static void appledisplay_work(void *private)
+static void appledisplay_work(struct work_struct *work)
{
- struct appledisplay *pdata = private;
+ struct appledisplay *pdata =
+ container_of(work, struct appledisplay, work.work);
int retval;
up(&pdata->bd->sem);
@@ -238,7 +239,7 @@
pdata->udev = udev;
spin_lock_init(&pdata->lock);
- INIT_WORK(&pdata->work, appledisplay_work, pdata);
+ INIT_DELAYED_WORK(&pdata->work, appledisplay_work);
/* Allocate buffer for control messages */
pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index cb0ba31..18b1925 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -156,9 +156,9 @@
struct usb_device *udev;
struct usb_interface *interface;
struct usb_class_driver *class;
- struct work_struct status_work;
- struct work_struct command_work;
- struct work_struct respond_work;
+ struct delayed_work status_work;
+ struct delayed_work command_work;
+ struct delayed_work respond_work;
struct u132_platform_data platform_data;
struct resource resources[0];
struct platform_device platform_dev;
@@ -210,23 +210,14 @@
static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
- return;
- } else if (queue_work(status_queue, &ftdi->status_work))
- return;
- kref_put(&ftdi->kref, ftdi_elan_delete);
- return;
+ if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
+ kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
- kref_get(&ftdi->kref);
- } else if (queue_work(status_queue, &ftdi->status_work))
- kref_get(&ftdi->kref);
- return;
+ if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
+ kref_get(&ftdi->kref);
}
static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
@@ -237,25 +228,14 @@
static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(command_queue, &ftdi->command_work,
- delta))
- return;
- } else if (queue_work(command_queue, &ftdi->command_work))
- return;
- kref_put(&ftdi->kref, ftdi_elan_delete);
- return;
+ if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
+ kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(command_queue, &ftdi->command_work,
- delta))
- kref_get(&ftdi->kref);
- } else if (queue_work(command_queue, &ftdi->command_work))
- kref_get(&ftdi->kref);
- return;
+ if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
+ kref_get(&ftdi->kref);
}
static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
@@ -267,25 +247,14 @@
static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(respond_queue, &ftdi->respond_work,
- delta))
- return;
- } else if (queue_work(respond_queue, &ftdi->respond_work))
- return;
- kref_put(&ftdi->kref, ftdi_elan_delete);
- return;
+ if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+ kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(respond_queue, &ftdi->respond_work,
- delta))
- kref_get(&ftdi->kref);
- } else if (queue_work(respond_queue, &ftdi->respond_work))
- kref_get(&ftdi->kref);
- return;
+ if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+ kref_get(&ftdi->kref);
}
static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
@@ -475,9 +444,11 @@
return;
}
-static void ftdi_elan_command_work(void *data)
+static void ftdi_elan_command_work(struct work_struct *work)
{
- struct usb_ftdi *ftdi = data;
+ struct usb_ftdi *ftdi =
+ container_of(work, struct usb_ftdi, command_work.work);
+
if (ftdi->disconnected > 0) {
ftdi_elan_put_kref(ftdi);
return;
@@ -500,9 +471,10 @@
return;
}
-static void ftdi_elan_respond_work(void *data)
+static void ftdi_elan_respond_work(struct work_struct *work)
{
- struct usb_ftdi *ftdi = data;
+ struct usb_ftdi *ftdi =
+ container_of(work, struct usb_ftdi, respond_work.work);
if (ftdi->disconnected > 0) {
ftdi_elan_put_kref(ftdi);
return;
@@ -534,9 +506,10 @@
* after the FTDI has been synchronized
*
*/
-static void ftdi_elan_status_work(void *data)
+static void ftdi_elan_status_work(struct work_struct *work)
{
- struct usb_ftdi *ftdi = data;
+ struct usb_ftdi *ftdi =
+ container_of(work, struct usb_ftdi, status_work.work);
int work_delay_in_msec = 0;
if (ftdi->disconnected > 0) {
ftdi_elan_put_kref(ftdi);
@@ -2677,12 +2650,9 @@
ftdi->class = NULL;
dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a"
"ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber);
- INIT_WORK(&ftdi->status_work, ftdi_elan_status_work,
- (void *)ftdi);
- INIT_WORK(&ftdi->command_work, ftdi_elan_command_work,
- (void *)ftdi);
- INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work,
- (void *)ftdi);
+ INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work);
+ INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work);
+ INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work);
ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000));
return 0;
} else {
diff --git a/drivers/usb/misc/phidgetkit.c b/drivers/usb/misc/phidgetkit.c
index 9110793..9659c79 100644
--- a/drivers/usb/misc/phidgetkit.c
+++ b/drivers/usb/misc/phidgetkit.c
@@ -81,8 +81,8 @@
unsigned char *data;
dma_addr_t data_dma;
- struct work_struct do_notify;
- struct work_struct do_resubmit;
+ struct delayed_work do_notify;
+ struct delayed_work do_resubmit;
unsigned long input_events;
unsigned long sensor_events;
};
@@ -374,7 +374,7 @@
}
if (kit->input_events || kit->sensor_events)
- schedule_work(&kit->do_notify);
+ schedule_delayed_work(&kit->do_notify, 0);
resubmit:
status = usb_submit_urb(urb, SLAB_ATOMIC);
@@ -384,9 +384,10 @@
kit->udev->devpath, status);
}
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
{
- struct interfacekit *kit = data;
+ struct interfacekit *kit =
+ container_of(work, struct interfacekit, do_notify.work);
int i;
char sysfs_file[8];
@@ -405,9 +406,11 @@
}
}
-static void do_resubmit(void *data)
+static void do_resubmit(struct work_struct *work)
{
- set_outputs(data);
+ struct interfacekit *kit =
+ container_of(work, struct interfacekit, do_resubmit.work);
+ set_outputs(kit);
}
#define show_set_output(value) \
@@ -575,8 +578,8 @@
kit->udev = usb_get_dev(dev);
kit->intf = intf;
- INIT_WORK(&kit->do_notify, do_notify, kit);
- INIT_WORK(&kit->do_resubmit, do_resubmit, kit);
+ INIT_DELAYED_WORK(&kit->do_notify, do_notify);
+ INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit);
usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data,
maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
interfacekit_irq, kit, endpoint->bInterval);
diff --git a/drivers/usb/misc/phidgetmotorcontrol.c b/drivers/usb/misc/phidgetmotorcontrol.c
index c3469b0..2bb4fa5 100644
--- a/drivers/usb/misc/phidgetmotorcontrol.c
+++ b/drivers/usb/misc/phidgetmotorcontrol.c
@@ -41,7 +41,7 @@
unsigned char *data;
dma_addr_t data_dma;
- struct work_struct do_notify;
+ struct delayed_work do_notify;
unsigned long input_events;
unsigned long speed_events;
unsigned long exceed_events;
@@ -148,7 +148,7 @@
set_bit(1, &mc->exceed_events);
if (mc->input_events || mc->exceed_events || mc->speed_events)
- schedule_work(&mc->do_notify);
+ schedule_delayed_work(&mc->do_notify, 0);
resubmit:
status = usb_submit_urb(urb, SLAB_ATOMIC);
@@ -159,9 +159,10 @@
mc->udev->devpath, status);
}
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
{
- struct motorcontrol *mc = data;
+ struct motorcontrol *mc =
+ container_of(work, struct motorcontrol, do_notify.work);
int i;
char sysfs_file[8];
@@ -348,7 +349,7 @@
mc->udev = usb_get_dev(dev);
mc->intf = intf;
mc->acceleration[0] = mc->acceleration[1] = 10;
- INIT_WORK(&mc->do_notify, do_notify, mc);
+ INIT_DELAYED_WORK(&mc->do_notify, do_notify);
usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data,
maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
motorcontrol_irq, mc, endpoint->bInterval);
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index 7c906a4..fa78326 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -222,7 +222,7 @@
int suspend_lowmem_ctrl;
int linkstate;
int opened;
- struct work_struct lowmem_work;
+ struct delayed_work lowmem_work;
struct usb_device *dev;
struct net_device *net;
@@ -530,9 +530,10 @@
kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC);
}
-static void kaweth_resubmit_tl(void *d)
+static void kaweth_resubmit_tl(struct work_struct *work)
{
- struct kaweth_device *kaweth = (struct kaweth_device *)d;
+ struct kaweth_device *kaweth =
+ container_of(work, struct kaweth_device, lowmem_work.work);
if (IS_BLOCKED(kaweth->status))
return;
@@ -1126,7 +1127,7 @@
/* kaweth is zeroed as part of alloc_netdev */
- INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth);
+ INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
SET_MODULE_OWNER(netdev);
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index 69eb0db..b5690b3 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -1281,9 +1281,9 @@
static struct workqueue_struct *pegasus_workqueue = NULL;
#define CARRIER_CHECK_DELAY (2 * HZ)
-static void check_carrier(void *data)
+static void check_carrier(struct work_struct *work)
{
- pegasus_t *pegasus = data;
+ pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
set_carrier(pegasus->net);
if (!(pegasus->flags & PEGASUS_UNPLUG)) {
queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
@@ -1319,7 +1319,7 @@
tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
- INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus);
+ INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
pegasus->intf = intf;
pegasus->usb = dev;
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h
index 0064380..98f6898 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/usb/net/pegasus.h
@@ -95,7 +95,7 @@
int dev_index;
int intr_interval;
struct tasklet_struct rx_tl;
- struct work_struct carrier_check;
+ struct delayed_work carrier_check;
struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb;
struct sk_buff *rx_pool[RX_SKBS];
struct sk_buff *rx_skb;
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 7672e11..327f975 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -782,9 +782,10 @@
* especially now that control transfers can be queued.
*/
static void
-kevent (void *data)
+kevent (struct work_struct *work)
{
- struct usbnet *dev = data;
+ struct usbnet *dev =
+ container_of(work, struct usbnet, kevent);
int status;
/* usb_clear_halt() needs a thread context */
@@ -1146,7 +1147,7 @@
skb_queue_head_init (&dev->done);
dev->bh.func = usbnet_bh;
dev->bh.data = (unsigned long) dev;
- INIT_WORK (&dev->kevent, kevent, dev);
+ INIT_WORK (&dev->kevent, kevent);
dev->delay.function = usbnet_bh;
dev->delay.data = (unsigned long) dev;
init_timer (&dev->delay);
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b1b5707..86bcf63 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -92,6 +92,7 @@
struct circ_buf *rx_buf; /* read buffer */
int rx_flags; /* for throttilng */
struct work_struct rx_work; /* work cue for the receiving line */
+ struct usb_serial_port *port; /* USB port with which associated */
};
/* Private methods */
@@ -251,10 +252,11 @@
schedule_work(&port->work);
}
-static void aircable_read(void *params)
+static void aircable_read(struct work_struct *work)
{
- struct usb_serial_port *port = params;
- struct aircable_private *priv = usb_get_serial_port_data(port);
+ struct aircable_private *priv =
+ container_of(work, struct aircable_private, rx_work);
+ struct usb_serial_port *port = priv->port;
struct tty_struct *tty;
unsigned char *data;
int count;
@@ -349,7 +351,8 @@
}
priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
- INIT_WORK(&priv->rx_work, aircable_read, port);
+ priv->port = port;
+ INIT_WORK(&priv->rx_work, aircable_read);
usb_set_serial_port_data(serial->port[0], priv);
@@ -516,7 +519,7 @@
package_length - shift);
}
}
- aircable_read(port);
+ aircable_read(&priv->rx_work);
}
/* Schedule the next read _if_ we are still open */
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 5e3ac28..83d0e21 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -430,13 +430,14 @@
int dp_in_close; /* close in progress */
wait_queue_head_t dp_close_wait; /* wait queue for close */
struct work_struct dp_wakeup_work;
+ struct usb_serial_port *dp_port;
};
/* Local Function Declarations */
static void digi_wakeup_write( struct usb_serial_port *port );
-static void digi_wakeup_write_lock(void *);
+static void digi_wakeup_write_lock(struct work_struct *work);
static int digi_write_oob_command( struct usb_serial_port *port,
unsigned char *buf, int count, int interruptible );
static int digi_write_inb_command( struct usb_serial_port *port,
@@ -598,11 +599,12 @@
* on writes.
*/
-static void digi_wakeup_write_lock(void *arg)
+static void digi_wakeup_write_lock(struct work_struct *work)
{
- struct usb_serial_port *port = arg;
+ struct digi_port *priv =
+ container_of(work, struct digi_port, dp_wakeup_work);
+ struct usb_serial_port *port = priv->dp_port;
unsigned long flags;
- struct digi_port *priv = usb_get_serial_port_data(port);
spin_lock_irqsave( &priv->dp_port_lock, flags );
@@ -1702,8 +1704,8 @@
init_waitqueue_head( &priv->dp_flush_wait );
priv->dp_in_close = 0;
init_waitqueue_head( &priv->dp_close_wait );
- INIT_WORK(&priv->dp_wakeup_work,
- digi_wakeup_write_lock, serial->port[i]);
+ INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
+ priv->dp_port = serial->port[i];
/* initialize write wait queue for this port */
init_waitqueue_head( &serial->port[i]->write_wait );
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89ce277..72e4d48 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -559,7 +559,8 @@
char prev_status, diff_status; /* Used for TIOCMIWAIT */
__u8 rx_flags; /* receive state flags (throttling) */
spinlock_t rx_lock; /* spinlock for receive state */
- struct work_struct rx_work;
+ struct delayed_work rx_work;
+ struct usb_serial_port *port;
int rx_processed;
unsigned long rx_bytes;
@@ -593,7 +594,7 @@
static int ftdi_chars_in_buffer (struct usb_serial_port *port);
static void ftdi_write_bulk_callback (struct urb *urb);
static void ftdi_read_bulk_callback (struct urb *urb);
-static void ftdi_process_read (void *param);
+static void ftdi_process_read (struct work_struct *work);
static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old);
static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file);
static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
@@ -1201,7 +1202,8 @@
port->read_urb->transfer_buffer_length = BUFSZ;
}
- INIT_WORK(&priv->rx_work, ftdi_process_read, port);
+ INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
+ priv->port = port;
/* Free port's existing write urb and transfer buffer. */
if (port->write_urb) {
@@ -1640,17 +1642,18 @@
priv->rx_bytes += countread;
spin_unlock_irqrestore(&priv->rx_lock, flags);
- ftdi_process_read(port);
+ ftdi_process_read(&priv->rx_work.work);
} /* ftdi_read_bulk_callback */
-static void ftdi_process_read (void *param)
+static void ftdi_process_read (struct work_struct *work)
{ /* ftdi_process_read */
- struct usb_serial_port *port = (struct usb_serial_port*)param;
+ struct ftdi_private *priv =
+ container_of(work, struct ftdi_private, rx_work.work);
+ struct usb_serial_port *port = priv->port;
struct urb *urb;
struct tty_struct *tty;
- struct ftdi_private *priv;
char error_flag;
unsigned char *data;
@@ -2179,7 +2182,7 @@
spin_unlock_irqrestore(&priv->rx_lock, flags);
if (actually_throttled)
- schedule_work(&priv->rx_work);
+ schedule_delayed_work(&priv->rx_work, 0);
}
static int __init ftdi_init (void)
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 9090051..e09a0bf 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -120,6 +120,8 @@
int tx_throttled;
struct work_struct wakeup_work;
struct work_struct unthrottle_work;
+ struct usb_serial *serial;
+ struct usb_serial_port *port;
};
@@ -175,9 +177,11 @@
};
#endif
-static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
+static void keyspan_pda_wakeup_write(struct work_struct *work)
{
-
+ struct keyspan_pda_private *priv =
+ container_of(work, struct keyspan_pda_private, wakeup_work);
+ struct usb_serial_port *port = priv->port;
struct tty_struct *tty = port->tty;
/* wake up port processes */
@@ -187,8 +191,11 @@
tty_wakeup(tty);
}
-static void keyspan_pda_request_unthrottle( struct usb_serial *serial )
+static void keyspan_pda_request_unthrottle(struct work_struct *work)
{
+ struct keyspan_pda_private *priv =
+ container_of(work, struct keyspan_pda_private, unthrottle_work);
+ struct usb_serial *serial = priv->serial;
int result;
dbg(" request_unthrottle");
@@ -765,11 +772,10 @@
return (1); /* error */
usb_set_serial_port_data(serial->port[0], priv);
init_waitqueue_head(&serial->port[0]->write_wait);
- INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write,
- (void *)(serial->port[0]));
- INIT_WORK(&priv->unthrottle_work,
- (void *)keyspan_pda_request_unthrottle,
- (void *)(serial));
+ INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
+ INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
+ priv->serial = serial;
+ priv->port = serial->port[0];
return (0);
}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index c1257d5..3d5072f 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -533,9 +533,10 @@
schedule_work(&port->work);
}
-static void usb_serial_port_work(void *private)
+static void usb_serial_port_work(struct work_struct *work)
{
- struct usb_serial_port *port = private;
+ struct usb_serial_port *port =
+ container_of(work, struct usb_serial_port, work);
struct tty_struct *tty;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -799,7 +800,7 @@
port->serial = serial;
spin_lock_init(&port->lock);
mutex_init(&port->mutex);
- INIT_WORK(&port->work, usb_serial_port_work, port);
+ INIT_WORK(&port->work, usb_serial_port_work);
serial->port[i] = port;
}
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 4d1cd7a..154c7d2 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -227,6 +227,7 @@
struct list_head rx_urbs_submitted;
struct list_head rx_urb_q;
struct work_struct rx_work;
+ struct usb_serial_port *port;
struct list_head tx_urbs_free;
struct list_head tx_urbs_submitted;
};
@@ -241,7 +242,7 @@
static int start_port_read(struct usb_serial_port *port);
static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head);
static struct list_head *list_first(struct list_head *head);
-static void rx_data_softint(void *private);
+static void rx_data_softint(struct work_struct *work);
static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize);
static int firm_open(struct usb_serial_port *port);
@@ -424,7 +425,8 @@
spin_lock_init(&info->lock);
info->flags = 0;
info->mcr = 0;
- INIT_WORK(&info->rx_work, rx_data_softint, port);
+ INIT_WORK(&info->rx_work, rx_data_softint);
+ info->port = port;
INIT_LIST_HEAD(&info->rx_urbs_free);
INIT_LIST_HEAD(&info->rx_urbs_submitted);
@@ -949,7 +951,7 @@
spin_unlock_irqrestore(&info->lock, flags);
if (actually_throttled)
- rx_data_softint(port);
+ rx_data_softint(&info->rx_work);
return;
}
@@ -1400,10 +1402,11 @@
}
-static void rx_data_softint(void *private)
+static void rx_data_softint(struct work_struct *work)
{
- struct usb_serial_port *port = (struct usb_serial_port *)private;
- struct whiteheat_private *info = usb_get_serial_port_data(port);
+ struct whiteheat_private *info =
+ container_of(work, struct whiteheat_private, rx_work);
+ struct usb_serial_port *port = info->port;
struct tty_struct *tty = port->tty;
struct whiteheat_urb_wrap *wrap;
struct urb *urb;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 302174b..31f476a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -383,9 +383,9 @@
softback_top = 0;
}
-static void fb_flashcursor(void *private)
+static void fb_flashcursor(struct work_struct *work)
{
- struct fb_info *info = private;
+ struct fb_info *info = container_of(work, struct fb_info, queue);
struct fbcon_ops *ops = info->fbcon_par;
struct display *p;
struct vc_data *vc = NULL;
@@ -442,7 +442,7 @@
if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
!(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
if (!info->queue.func)
- INIT_WORK(&info->queue, fb_flashcursor, info);
+ INIT_WORK(&info->queue, fb_flashcursor);
init_timer(&ops->cursor_timer);
ops->cursor_timer.function = cursor_timer_handler;
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 8a8ae55..38eb0b6 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -964,9 +964,10 @@
* Our LCD controller task (which is called when we blank or unblank)
* via keventd.
*/
-static void pxafb_task(void *dummy)
+static void pxafb_task(struct work_struct *work)
{
- struct pxafb_info *fbi = dummy;
+ struct pxafb_info *fbi =
+ container_of(work, struct pxafb_info, task);
u_int state = xchg(&fbi->task_state, -1);
set_ctrlr_state(fbi, state);
@@ -1159,7 +1160,7 @@
}
init_waitqueue_head(&fbi->ctrlr_wait);
- INIT_WORK(&fbi->task, pxafb_task, fbi);
+ INIT_WORK(&fbi->task, pxafb_task);
init_MUTEX(&fbi->ctrlr_sem);
return fbi;
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 90a79c7..944273c 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -110,8 +110,8 @@
};
static int v9fs_poll_proc(void *);
-static void v9fs_read_work(void *);
-static void v9fs_write_work(void *);
+static void v9fs_read_work(struct work_struct *work);
+static void v9fs_write_work(struct work_struct *work);
static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
poll_table * p);
static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
@@ -297,8 +297,8 @@
m->rbuf = NULL;
m->wpos = m->wsize = 0;
m->wbuf = NULL;
- INIT_WORK(&m->rq, v9fs_read_work, m);
- INIT_WORK(&m->wq, v9fs_write_work, m);
+ INIT_WORK(&m->rq, v9fs_read_work);
+ INIT_WORK(&m->wq, v9fs_write_work);
m->wsched = 0;
memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
m->poll_task = NULL;
@@ -458,13 +458,13 @@
/**
* v9fs_write_work - called when a transport can send some data
*/
-static void v9fs_write_work(void *a)
+static void v9fs_write_work(struct work_struct *work)
{
int n, err;
struct v9fs_mux_data *m;
struct v9fs_req *req;
- m = a;
+ m = container_of(work, struct v9fs_mux_data, wq);
if (m->err < 0) {
clear_bit(Wworksched, &m->wsched);
@@ -564,7 +564,7 @@
/**
* v9fs_read_work - called when there is some data to be read from a transport
*/
-static void v9fs_read_work(void *a)
+static void v9fs_read_work(struct work_struct *work)
{
int n, err;
struct v9fs_mux_data *m;
@@ -572,7 +572,7 @@
struct v9fs_fcall *rcall;
char *rbuf;
- m = a;
+ m = container_of(work, struct v9fs_mux_data, rq);
if (m->err < 0)
return;
diff --git a/fs/aio.c b/fs/aio.c
index 277a5f2..287a1bc 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -53,13 +53,13 @@
static struct workqueue_struct *aio_wq;
/* Used for rare fput completion. */
-static void aio_fput_routine(void *);
-static DECLARE_WORK(fput_work, aio_fput_routine, NULL);
+static void aio_fput_routine(struct work_struct *);
+static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);
-static void aio_kick_handler(void *);
+static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
/* aio_setup
@@ -227,7 +227,7 @@
INIT_LIST_HEAD(&ctx->active_reqs);
INIT_LIST_HEAD(&ctx->run_list);
- INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
+ INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
if (aio_setup_ring(ctx) < 0)
goto out_freectx;
@@ -469,7 +469,7 @@
wake_up(&ctx->wait);
}
-static void aio_fput_routine(void *data)
+static void aio_fput_routine(struct work_struct *data)
{
spin_lock_irq(&fput_lock);
while (likely(!list_empty(&fput_head))) {
@@ -857,9 +857,9 @@
* space.
* Run on aiod's context.
*/
-static void aio_kick_handler(void *data)
+static void aio_kick_handler(struct work_struct *work)
{
- struct kioctx *ctx = data;
+ struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
mm_segment_t oldfs = get_fs();
int requeue;
@@ -874,7 +874,7 @@
* we're in a worker thread already, don't use queue_delayed_work,
*/
if (requeue)
- queue_work(aio_wq, &ctx->wq);
+ queue_delayed_work(aio_wq, &ctx->wq, 0);
}
diff --git a/fs/bio.c b/fs/bio.c
index aa4d09b..50c40ce 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -940,16 +940,16 @@
* run one bio_put() against the BIO.
*/
-static void bio_dirty_fn(void *data);
+static void bio_dirty_fn(struct work_struct *work);
-static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
static DEFINE_SPINLOCK(bio_dirty_lock);
static struct bio *bio_dirty_list;
/*
* This runs in process context
*/
-static void bio_dirty_fn(void *data)
+static void bio_dirty_fn(struct work_struct *work)
{
unsigned long flags;
struct bio *bio;
diff --git a/fs/file.c b/fs/file.c
index 8e81775..3787e82 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -91,8 +91,10 @@
spin_unlock(&fddef->lock);
}
-static void free_fdtable_work(struct fdtable_defer *f)
+static void free_fdtable_work(struct work_struct *work)
{
+ struct fdtable_defer *f =
+ container_of(work, struct fdtable_defer, wq);
struct fdtable *fdt;
spin_lock_bh(&f->lock);
@@ -351,7 +353,7 @@
{
struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
spin_lock_init(&fddef->lock);
- INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef);
+ INIT_WORK(&fddef->wq, free_fdtable_work);
init_timer(&fddef->timer);
fddef->timer.data = (unsigned long)fddef;
fddef->timer.function = fdtable_timer;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 78fe0fa..55f5333 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -35,7 +35,7 @@
struct greedy {
struct gfs2_holder gr_gh;
- struct work_struct gr_work;
+ struct delayed_work gr_work;
};
struct gfs2_gl_hash_bucket {
@@ -1368,9 +1368,9 @@
glops->go_xmote_th(gl, state, flags);
}
-static void greedy_work(void *data)
+static void greedy_work(struct work_struct *work)
{
- struct greedy *gr = data;
+ struct greedy *gr = container_of(work, struct greedy, gr_work.work);
struct gfs2_holder *gh = &gr->gr_gh;
struct gfs2_glock *gl = gh->gh_gl;
const struct gfs2_glock_operations *glops = gl->gl_ops;
@@ -1422,7 +1422,7 @@
gfs2_holder_init(gl, 0, 0, gh);
set_bit(HIF_GREEDY, &gh->gh_iflags);
- INIT_WORK(&gr->gr_work, greedy_work, gr);
+ INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
schedule_delayed_work(&gr->gr_work, time);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 42e3bef..72dad55 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -577,12 +577,12 @@
server->rcv.ptr = (unsigned char*)&server->rcv.buf;
server->rcv.len = 10;
server->rcv.state = 0;
- INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server);
- INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server);
+ INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
+ INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
sock->sk->sk_write_space = ncp_tcp_write_space;
} else {
- INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server);
- INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server);
+ INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
+ INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
server->timeout_tm.data = (unsigned long)server;
server->timeout_tm.function = ncpdgram_timeout_call;
}
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 11c2b25..e496d8b 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -350,9 +350,10 @@
}
}
-void ncpdgram_rcv_proc(void *s)
+void ncpdgram_rcv_proc(struct work_struct *work)
{
- struct ncp_server *server = s;
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, rcv.tq);
struct socket* sock;
sock = server->ncp_sock;
@@ -468,9 +469,10 @@
}
}
-void ncpdgram_timeout_proc(void *s)
+void ncpdgram_timeout_proc(struct work_struct *work)
{
- struct ncp_server *server = s;
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, timeout_tq);
mutex_lock(&server->rcv.creq_mutex);
__ncpdgram_timeout_proc(server);
mutex_unlock(&server->rcv.creq_mutex);
@@ -652,18 +654,20 @@
}
}
-void ncp_tcp_rcv_proc(void *s)
+void ncp_tcp_rcv_proc(struct work_struct *work)
{
- struct ncp_server *server = s;
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, rcv.tq);
mutex_lock(&server->rcv.creq_mutex);
__ncptcp_rcv_proc(server);
mutex_unlock(&server->rcv.creq_mutex);
}
-void ncp_tcp_tx_proc(void *s)
+void ncp_tcp_tx_proc(struct work_struct *work)
{
- struct ncp_server *server = s;
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, tx.tq);
mutex_lock(&server->rcv.creq_mutex);
__ncptcp_try_send(server);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 5fea638..23ab145 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@
INIT_LIST_HEAD(&clp->cl_state_owners);
INIT_LIST_HEAD(&clp->cl_unused);
spin_lock_init(&clp->cl_lock);
- INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
+ INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index ec1114b..371b804e 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -18,10 +18,10 @@
#define NFSDBG_FACILITY NFSDBG_VFS
-static void nfs_expire_automounts(void *list);
+static void nfs_expire_automounts(struct work_struct *work);
LIST_HEAD(nfs_automount_list);
-static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
+static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
int nfs_mountpoint_expiry_timeout = 500 * HZ;
static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -164,9 +164,9 @@
.follow_link = nfs_follow_mountpoint,
};
-static void nfs_expire_automounts(void *data)
+static void nfs_expire_automounts(struct work_struct *work)
{
- struct list_head *list = (struct list_head *)data;
+ struct list_head *list = &nfs_automount_list;
mark_mounts_for_expiry(list);
if (!list_empty(list))
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6f34667..c26cd97 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,7 +185,7 @@
extern void nfs4_schedule_state_renewal(struct nfs_client *);
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
extern void nfs4_kill_renewd(struct nfs_client *);
-extern void nfs4_renew_state(void *);
+extern void nfs4_renew_state(struct work_struct *);
/* nfs4state.c */
struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 7b6df18..8232985 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,9 +59,10 @@
#define NFSDBG_FACILITY NFSDBG_PROC
void
-nfs4_renew_state(void *data)
+nfs4_renew_state(struct work_struct *work)
{
- struct nfs_client *clp = (struct nfs_client *)data;
+ struct nfs_client *clp =
+ container_of(work, struct nfs_client, cl_renewd.work);
struct rpc_cred *cred;
long lease, timeout;
unsigned long last, now;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 293b649..e431e93 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1829,9 +1829,8 @@
}
static struct workqueue_struct *laundry_wq;
-static struct work_struct laundromat_work;
-static void laundromat_main(void *);
-static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
+static void laundromat_main(struct work_struct *);
+static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
__be32
nfsd4_renew(clientid_t *clid)
@@ -1940,7 +1939,7 @@
}
void
-laundromat_main(void *not_used)
+laundromat_main(struct work_struct *not_used)
{
time_t t;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 85a048b..edc91ca 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1197,10 +1197,12 @@
return status;
}
-static void ocfs2_truncate_log_worker(void *data)
+static void ocfs2_truncate_log_worker(struct work_struct *work)
{
int status;
- struct ocfs2_super *osb = data;
+ struct ocfs2_super *osb =
+ container_of(work, struct ocfs2_super,
+ osb_truncate_log_wq.work);
mlog_entry_void();
@@ -1432,7 +1434,8 @@
/* ocfs2_truncate_log_shutdown keys on the existence of
* osb->osb_tl_inode so we don't set any of the osb variables
* until we're sure all is well. */
- INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb);
+ INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
+ ocfs2_truncate_log_worker);
osb->osb_tl_bh = tl_bh;
osb->osb_tl_inode = tl_inode;
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 305cba3..4cd9a95 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -141,7 +141,7 @@
* recognizes a node going up and down in one iteration */
u64 hr_generation;
- struct work_struct hr_write_timeout_work;
+ struct delayed_work hr_write_timeout_work;
unsigned long hr_last_timeout_start;
/* Used during o2hb_check_slot to hold a copy of the block
@@ -156,9 +156,11 @@
int wc_error;
};
-static void o2hb_write_timeout(void *arg)
+static void o2hb_write_timeout(struct work_struct *work)
{
- struct o2hb_region *reg = arg;
+ struct o2hb_region *reg =
+ container_of(work, struct o2hb_region,
+ hr_write_timeout_work.work);
mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
"milliseconds\n", reg->hr_dev_name,
@@ -1404,7 +1406,7 @@
goto out;
}
- INIT_WORK(®->hr_write_timeout_work, o2hb_write_timeout, reg);
+ INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout);
/*
* A node is considered live after it has beat LIVE_THRESHOLD
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 7bba98f..4705d65 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -88,7 +88,7 @@
o2quo_fence_self();
}
-static void o2quo_make_decision(void *arg)
+static void o2quo_make_decision(struct work_struct *work)
{
int quorum;
int lowest_hb, lowest_reachable = 0, fence = 0;
@@ -306,7 +306,7 @@
struct o2quo_state *qs = &o2quo_state;
spin_lock_init(&qs->qs_lock);
- INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL);
+ INIT_WORK(&qs->qs_work, o2quo_make_decision);
}
void o2quo_exit(void)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index b650efa..9b3209d 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -140,11 +140,11 @@
[O2NET_ERR_DIED] = -EHOSTDOWN,};
/* can't quite avoid *all* internal declarations :/ */
-static void o2net_sc_connect_completed(void *arg);
-static void o2net_rx_until_empty(void *arg);
-static void o2net_shutdown_sc(void *arg);
+static void o2net_sc_connect_completed(struct work_struct *work);
+static void o2net_rx_until_empty(struct work_struct *work);
+static void o2net_shutdown_sc(struct work_struct *work);
static void o2net_listen_data_ready(struct sock *sk, int bytes);
-static void o2net_sc_send_keep_req(void *arg);
+static void o2net_sc_send_keep_req(struct work_struct *work);
static void o2net_idle_timer(unsigned long data);
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
@@ -308,10 +308,10 @@
o2nm_node_get(node);
sc->sc_node = node;
- INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc);
- INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc);
- INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc);
- INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc);
+ INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
+ INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
+ INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
+ INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
init_timer(&sc->sc_idle_timeout);
sc->sc_idle_timeout.function = o2net_idle_timer;
@@ -342,7 +342,7 @@
sc_put(sc);
}
static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
- struct work_struct *work,
+ struct delayed_work *work,
int delay)
{
sc_get(sc);
@@ -350,7 +350,7 @@
sc_put(sc);
}
static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
- struct work_struct *work)
+ struct delayed_work *work)
{
if (cancel_delayed_work(work))
sc_put(sc);
@@ -564,9 +564,11 @@
* ourselves as state_change couldn't get the nn_lock and call set_nn_state
* itself.
*/
-static void o2net_shutdown_sc(void *arg)
+static void o2net_shutdown_sc(struct work_struct *work)
{
- struct o2net_sock_container *sc = arg;
+ struct o2net_sock_container *sc =
+ container_of(work, struct o2net_sock_container,
+ sc_shutdown_work);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
sclog(sc, "shutting down\n");
@@ -1201,9 +1203,10 @@
/* this work func is triggerd by data ready. it reads until it can read no
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
* our work the work struct will be marked and we'll be called again. */
-static void o2net_rx_until_empty(void *arg)
+static void o2net_rx_until_empty(struct work_struct *work)
{
- struct o2net_sock_container *sc = arg;
+ struct o2net_sock_container *sc =
+ container_of(work, struct o2net_sock_container, sc_rx_work);
int ret;
do {
@@ -1249,9 +1252,11 @@
/* called when a connect completes and after a sock is accepted. the
* rx path will see the response and mark the sc valid */
-static void o2net_sc_connect_completed(void *arg)
+static void o2net_sc_connect_completed(struct work_struct *work)
{
- struct o2net_sock_container *sc = arg;
+ struct o2net_sock_container *sc =
+ container_of(work, struct o2net_sock_container,
+ sc_connect_work);
mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
(unsigned long long)O2NET_PROTOCOL_VERSION,
@@ -1262,9 +1267,11 @@
}
/* this is called as a work_struct func. */
-static void o2net_sc_send_keep_req(void *arg)
+static void o2net_sc_send_keep_req(struct work_struct *work)
{
- struct o2net_sock_container *sc = arg;
+ struct o2net_sock_container *sc =
+ container_of(work, struct o2net_sock_container,
+ sc_keepalive_work.work);
o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
sc_put(sc);
@@ -1314,14 +1321,15 @@
* having a connect attempt fail, etc. This centralizes the logic which decides
* if a connect attempt should be made or if we should give up and all future
* transmit attempts should fail */
-static void o2net_start_connect(void *arg)
+static void o2net_start_connect(struct work_struct *work)
{
- struct o2net_node *nn = arg;
+ struct o2net_node *nn =
+ container_of(work, struct o2net_node, nn_connect_work.work);
struct o2net_sock_container *sc = NULL;
struct o2nm_node *node = NULL, *mynode = NULL;
struct socket *sock = NULL;
struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
- int ret = 0;
+ int ret = 0, stop;
/* if we're greater we initiate tx, otherwise we accept */
if (o2nm_this_node() <= o2net_num_from_nn(nn))
@@ -1342,10 +1350,9 @@
spin_lock(&nn->nn_lock);
/* see if we already have one pending or have given up */
- if (nn->nn_sc || nn->nn_persistent_error)
- arg = NULL;
+ stop = (nn->nn_sc || nn->nn_persistent_error);
spin_unlock(&nn->nn_lock);
- if (arg == NULL) /* *shrug*, needed some indicator */
+ if (stop)
goto out;
nn->nn_last_connect_attempt = jiffies;
@@ -1421,9 +1428,10 @@
return;
}
-static void o2net_connect_expired(void *arg)
+static void o2net_connect_expired(struct work_struct *work)
{
- struct o2net_node *nn = arg;
+ struct o2net_node *nn =
+ container_of(work, struct o2net_node, nn_connect_expired.work);
spin_lock(&nn->nn_lock);
if (!nn->nn_sc_valid) {
@@ -1436,9 +1444,10 @@
spin_unlock(&nn->nn_lock);
}
-static void o2net_still_up(void *arg)
+static void o2net_still_up(struct work_struct *work)
{
- struct o2net_node *nn = arg;
+ struct o2net_node *nn =
+ container_of(work, struct o2net_node, nn_still_up.work);
o2quo_hb_still_up(o2net_num_from_nn(nn));
}
@@ -1644,9 +1653,9 @@
return ret;
}
-static void o2net_accept_many(void *arg)
+static void o2net_accept_many(struct work_struct *work)
{
- struct socket *sock = arg;
+ struct socket *sock = o2net_listen_sock;
while (o2net_accept_one(sock) == 0)
cond_resched();
}
@@ -1700,7 +1709,7 @@
write_unlock_bh(&sock->sk->sk_callback_lock);
o2net_listen_sock = sock;
- INIT_WORK(&o2net_listen_work, o2net_accept_many, sock);
+ INIT_WORK(&o2net_listen_work, o2net_accept_many);
sock->sk->sk_reuse = 1;
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
@@ -1819,9 +1828,10 @@
struct o2net_node *nn = o2net_nn_from_num(i);
spin_lock_init(&nn->nn_lock);
- INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn);
- INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn);
- INIT_WORK(&nn->nn_still_up, o2net_still_up, nn);
+ INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
+ INIT_DELAYED_WORK(&nn->nn_connect_expired,
+ o2net_connect_expired);
+ INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
/* until we see hb from a node we'll return einval */
nn->nn_persistent_error = -ENOTCONN;
init_waitqueue_head(&nn->nn_sc_wq);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 4b46aac..daebbd3 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -86,18 +86,18 @@
* connect attempt fails and so can be self-arming. shutdown is
* careful to first mark the nn such that no connects will be attempted
* before canceling delayed connect work and flushing the queue. */
- struct work_struct nn_connect_work;
+ struct delayed_work nn_connect_work;
unsigned long nn_last_connect_attempt;
/* this is queued as nodes come up and is canceled when a connection is
* established. this expiring gives up on the node and errors out
* transmits */
- struct work_struct nn_connect_expired;
+ struct delayed_work nn_connect_expired;
/* after we give up on a socket we wait a while before deciding
* that it is still heartbeating and that we should do some
* quorum work */
- struct work_struct nn_still_up;
+ struct delayed_work nn_still_up;
};
struct o2net_sock_container {
@@ -129,7 +129,7 @@
struct work_struct sc_shutdown_work;
struct timer_list sc_idle_timeout;
- struct work_struct sc_keepalive_work;
+ struct delayed_work sc_keepalive_work;
unsigned sc_handshake_ok:1;
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index fa96818..6b6ff76 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -153,7 +153,7 @@
* called functions that cannot be directly called from the
* net message handlers for some reason, usually because
* they need to send net messages of their own. */
-void dlm_dispatch_work(void *data);
+void dlm_dispatch_work(struct work_struct *work);
struct dlm_lock_resource;
struct dlm_work_item;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index f6cdab3..420a375 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1297,7 +1297,7 @@
spin_lock_init(&dlm->work_lock);
INIT_LIST_HEAD(&dlm->work_list);
- INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm);
+ INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
kref_init(&dlm->dlm_refs);
dlm->dlm_state = DLM_CTXT_NEW;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 9d950d7..fb3e2b0 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -153,9 +153,10 @@
}
/* Worker function used during recovery. */
-void dlm_dispatch_work(void *data)
+void dlm_dispatch_work(struct work_struct *work)
{
- struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
+ struct dlm_ctxt *dlm =
+ container_of(work, struct dlm_ctxt, dispatched_work);
LIST_HEAD(tmp_list);
struct list_head *iter, *iter2;
struct dlm_work_item *item;
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index eead48b..7d2f578 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -171,15 +171,14 @@
BUG();
}
-static void user_dlm_unblock_lock(void *opaque);
+static void user_dlm_unblock_lock(struct work_struct *work);
static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
{
if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
user_dlm_grab_inode_ref(lockres);
- INIT_WORK(&lockres->l_work, user_dlm_unblock_lock,
- lockres);
+ INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
queue_work(user_dlm_worker, &lockres->l_work);
lockres->l_flags |= USER_LOCK_QUEUED;
@@ -279,10 +278,11 @@
iput(inode);
}
-static void user_dlm_unblock_lock(void *opaque)
+static void user_dlm_unblock_lock(struct work_struct *work)
{
int new_level, status;
- struct user_lock_res *lockres = (struct user_lock_res *) opaque;
+ struct user_lock_res *lockres =
+ container_of(work, struct user_lock_res, l_work);
struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index c0ad7cb..1d7f4ab 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -703,11 +703,12 @@
* NOTE: This function can and will sleep on recovery of other nodes
* during cluster locking, just like any other ocfs2 process.
*/
-void ocfs2_complete_recovery(void *data)
+void ocfs2_complete_recovery(struct work_struct *work)
{
int ret;
- struct ocfs2_super *osb = data;
- struct ocfs2_journal *journal = osb->journal;
+ struct ocfs2_journal *journal =
+ container_of(work, struct ocfs2_journal, j_recovery_work);
+ struct ocfs2_super *osb = journal->j_osb;
struct ocfs2_dinode *la_dinode, *tl_dinode;
struct ocfs2_la_recovery_item *item;
struct list_head *p, *n;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index d86cb96..899112a 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -133,7 +133,7 @@
}
/* Exported only for the journal struct init code in super.c. Do not call. */
-void ocfs2_complete_recovery(void *data);
+void ocfs2_complete_recovery(struct work_struct *work);
/*
* Journal Control:
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 0788837..b767fd7 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -285,7 +285,7 @@
/* Truncate log info */
struct inode *osb_tl_inode;
struct buffer_head *osb_tl_bh;
- struct work_struct osb_truncate_log_wq;
+ struct delayed_work osb_truncate_log_wq;
struct ocfs2_node_map osb_recovering_orphan_dirs;
unsigned int *osb_orphan_wipes;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index b099257..d9b4214 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1365,7 +1365,7 @@
spin_lock_init(&journal->j_lock);
journal->j_trans_id = (unsigned long) 1;
INIT_LIST_HEAD(&journal->j_la_cleanups);
- INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb);
+ INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
journal->j_state = OCFS2_JOURNAL_FREE;
/* get some pseudo constants for clustersize bits */
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index ac93174..7280a23 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -104,7 +104,7 @@
struct reiserfs_journal *journal);
static int dirty_one_transaction(struct super_block *s,
struct reiserfs_journal_list *jl);
-static void flush_async_commits(void *p);
+static void flush_async_commits(struct work_struct *work);
static void queue_log_writer(struct super_block *s);
/* values for join in do_journal_begin_r */
@@ -2836,7 +2836,8 @@
if (reiserfs_mounted_fs_count <= 1)
commit_wq = create_workqueue("reiserfs");
- INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
+ INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
+ journal->j_work_sb = p_s_sb;
return 0;
free_and_return:
free_journal_ram(p_s_sb);
@@ -3447,10 +3448,11 @@
/*
** writeback the pending async commits to disk
*/
-static void flush_async_commits(void *p)
+static void flush_async_commits(struct work_struct *work)
{
- struct super_block *p_s_sb = p;
- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+ struct reiserfs_journal *journal =
+ container_of(work, struct reiserfs_journal, j_work.work);
+ struct super_block *p_s_sb = journal->j_work_sb;
struct reiserfs_journal_list *jl;
struct list_head *entry;
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 09360cf..8e6b56f 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -149,9 +149,10 @@
*/
STATIC void
xfs_end_bio_delalloc(
- void *data)
+ struct work_struct *work)
{
- xfs_ioend_t *ioend = data;
+ xfs_ioend_t *ioend =
+ container_of(work, xfs_ioend_t, io_work);
xfs_destroy_ioend(ioend);
}
@@ -161,9 +162,10 @@
*/
STATIC void
xfs_end_bio_written(
- void *data)
+ struct work_struct *work)
{
- xfs_ioend_t *ioend = data;
+ xfs_ioend_t *ioend =
+ container_of(work, xfs_ioend_t, io_work);
xfs_destroy_ioend(ioend);
}
@@ -176,9 +178,10 @@
*/
STATIC void
xfs_end_bio_unwritten(
- void *data)
+ struct work_struct *work)
{
- xfs_ioend_t *ioend = data;
+ xfs_ioend_t *ioend =
+ container_of(work, xfs_ioend_t, io_work);
bhv_vnode_t *vp = ioend->io_vnode;
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
@@ -220,11 +223,11 @@
ioend->io_size = 0;
if (type == IOMAP_UNWRITTEN)
- INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+ INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
else if (type == IOMAP_DELAY)
- INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+ INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
else
- INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
+ INIT_WORK(&ioend->io_work, xfs_end_bio_written);
return ioend;
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d338284..eef4a0b 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -994,9 +994,10 @@
STATIC void
xfs_buf_iodone_work(
- void *v)
+ struct work_struct *work)
{
- xfs_buf_t *bp = (xfs_buf_t *)v;
+ xfs_buf_t *bp =
+ container_of(work, xfs_buf_t, b_iodone_work);
if (bp->b_iodone)
(*(bp->b_iodone))(bp);
@@ -1017,10 +1018,10 @@
if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
if (schedule) {
- INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
+ INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
queue_work(xfslogd_workqueue, &bp->b_iodone_work);
} else {
- xfs_buf_iodone_work(bp);
+ xfs_buf_iodone_work(&bp->b_iodone_work);
}
} else {
up(&bp->b_iodonesema);
diff --git a/include/asm-arm/arch-omap/irda.h b/include/asm-arm/arch-omap/irda.h
index 805ae35..345a649 100644
--- a/include/asm-arm/arch-omap/irda.h
+++ b/include/asm-arm/arch-omap/irda.h
@@ -24,7 +24,7 @@
/* Very specific to the needs of some platforms (h3,h4)
* having calls which can sleep in irda_set_speed.
*/
- struct work_struct gpio_expa;
+ struct delayed_work gpio_expa;
int rx_channel;
int tx_channel;
unsigned long dest_start;
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 0d71c00..9e350fd 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -194,7 +194,7 @@
struct aio_ring_info ring_info;
- struct work_struct wq;
+ struct delayed_work wq;
};
/* prototypes */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 4c02119..3ea1cd5 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -133,7 +133,7 @@
struct cn_callback_entry {
struct list_head callback_entry;
struct cn_callback *cb;
- struct work_struct work;
+ struct delayed_work work;
struct cn_queue_dev *pdev;
struct cn_callback_id id;
@@ -170,7 +170,7 @@
int cn_cb_equal(struct cb_id *, struct cb_id *);
-void cn_queue_wrapper(void *data);
+void cn_queue_wrapper(struct work_struct *work);
extern int cn_already_initialized;
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index c115e9e..1fb02e1 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -461,7 +461,7 @@
int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
/* Event handler */
- void (*event) (struct i2o_event *);
+ work_func_t event;
struct workqueue_struct *event_queue; /* Event queue */
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index efe0ee4..06c58c4 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -158,7 +158,7 @@
if (t->buf.tail != NULL)
t->buf.tail->commit = t->buf.tail->used;
spin_unlock_irqrestore(&t->buf.lock, flags);
- schedule_work(&t->buf.work);
+ schedule_delayed_work(&t->buf.work, 0);
}
#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 202283b..ab27548 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -575,8 +575,9 @@
struct ata_host *host;
struct device *dev;
- struct work_struct port_task;
- struct work_struct hotplug_task;
+ void *port_task_data;
+ struct delayed_work port_task;
+ struct delayed_work hotplug_task;
struct work_struct scsi_rescan_task;
unsigned int hsm_task_state;
@@ -755,7 +756,7 @@
extern int ata_ratelimit(void);
extern int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat, unsigned long timeout);
-extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
+extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
void *data, unsigned long delay);
extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
unsigned long interval_msec,
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 528e7d3..c15ae19 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -110,7 +110,7 @@
struct mmc_card *card_busy; /* the MMC card claiming host */
struct mmc_card *card_selected; /* the selected MMC card */
- struct work_struct detect;
+ struct delayed_work detect;
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index b089d95..a503052 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -127,10 +127,10 @@
} unexpected_packet;
};
-extern void ncp_tcp_rcv_proc(void *server);
-extern void ncp_tcp_tx_proc(void *server);
-extern void ncpdgram_rcv_proc(void *server);
-extern void ncpdgram_timeout_proc(void *server);
+extern void ncp_tcp_rcv_proc(struct work_struct *work);
+extern void ncp_tcp_tx_proc(struct work_struct *work);
+extern void ncpdgram_rcv_proc(struct work_struct *work);
+extern void ncpdgram_timeout_proc(struct work_struct *work);
extern void ncpdgram_timeout_call(unsigned long server);
extern void ncp_tcp_data_ready(struct sock* sk, int len);
extern void ncp_tcp_write_space(struct sock* sk);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 2cc9867..29930b71 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -32,7 +32,7 @@
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
struct sk_buff_head txq;
- struct work_struct tx_work;
+ struct delayed_work tx_work;
};
void netpoll_poll(struct netpoll *np);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7ccfc7e..95796e6 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -51,7 +51,7 @@
unsigned long cl_lease_time;
unsigned long cl_last_renewal;
- struct work_struct cl_renewd;
+ struct delayed_work cl_renewd;
struct rpc_wait_queue cl_rpcwaitq;
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 62a7169..3a28742 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -249,7 +249,8 @@
int j_errno;
/* when flushing ordered buffers, throttle new ordered writers */
- struct work_struct j_work;
+ struct delayed_work j_work;
+ struct super_block *j_work_sb;
atomic_t j_async_throttle;
};
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 24accb4..0e3d91b 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -38,7 +38,7 @@
size_t subbufs_consumed; /* count of sub-buffers consumed */
struct rchan *chan; /* associated channel */
wait_queue_head_t read_wait; /* reader wait queue */
- struct work_struct wake_readers; /* reader wake-up work struct */
+ struct delayed_work wake_readers; /* reader wake-up work struct */
struct dentry *dentry; /* channel file dentry */
struct kref kref; /* channel buffer refcount */
struct page **page_array; /* array of current buffer pages */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index a2eb9b4..4a68125 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -30,7 +30,7 @@
#define RPC_PIPE_WAIT_FOR_OPEN 1
int flags;
struct rpc_pipe_ops *ops;
- struct work_struct queue_timeout;
+ struct delayed_work queue_timeout;
};
static inline struct rpc_inode *
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 60394fb..3e04c15 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -177,7 +177,7 @@
unsigned long connect_timeout,
bind_timeout,
reestablish_timeout;
- struct work_struct connect_worker;
+ struct delayed_work connect_worker;
unsigned short port;
/*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 65321f9..f717f08 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -53,7 +53,7 @@
};
struct tty_bufhead {
- struct work_struct work;
+ struct delayed_work work;
struct semaphore pty_sem;
spinlock_t lock;
struct tty_buffer *head; /* Queue head */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 0cd73ed..aab5b1b 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -388,7 +388,7 @@
int pm_usage_cnt; /* usage counter for autosuspend */
#ifdef CONFIG_PM
- struct work_struct autosuspend; /* for delayed autosuspends */
+ struct delayed_work autosuspend; /* for delayed autosuspends */
struct mutex pm_mutex; /* protects PM operations */
unsigned auto_pm:1; /* autosuspend/resume in progress */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9bca353..4a3ea83 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -11,12 +11,23 @@
struct workqueue_struct;
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+
struct work_struct {
- unsigned long pending;
+ /* the first word is the work queue pointer and the flags rolled into
+ * one */
+ unsigned long management;
+#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
+#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
+#define WORK_STRUCT_FLAG_MASK (3UL)
+#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
struct list_head entry;
- void (*func)(void *);
- void *data;
- void *wq_data;
+ work_func_t func;
+};
+
+struct delayed_work {
+ struct work_struct work;
struct timer_list timer;
};
@@ -24,36 +35,117 @@
struct work_struct work;
};
-#define __WORK_INITIALIZER(n, f, d) { \
+#define __WORK_INITIALIZER(n, f) { \
+ .management = 0, \
.entry = { &(n).entry, &(n).entry }, \
.func = (f), \
- .data = (d), \
+ }
+
+#define __WORK_INITIALIZER_NAR(n, f) { \
+ .management = (1 << WORK_STRUCT_NOAUTOREL), \
+ .entry = { &(n).entry, &(n).entry }, \
+ .func = (f), \
+ }
+
+#define __DELAYED_WORK_INITIALIZER(n, f) { \
+ .work = __WORK_INITIALIZER((n).work, (f)), \
.timer = TIMER_INITIALIZER(NULL, 0, 0), \
}
-#define DECLARE_WORK(n, f, d) \
- struct work_struct n = __WORK_INITIALIZER(n, f, d)
+#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \
+ .work = __WORK_INITIALIZER_NAR((n).work, (f)), \
+ .timer = TIMER_INITIALIZER(NULL, 0, 0), \
+ }
+
+#define DECLARE_WORK(n, f) \
+ struct work_struct n = __WORK_INITIALIZER(n, f)
+
+#define DECLARE_WORK_NAR(n, f) \
+ struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
+
+#define DECLARE_DELAYED_WORK(n, f) \
+ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
+
+#define DECLARE_DELAYED_WORK_NAR(n, f) \
+ struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
/*
- * initialize a work-struct's func and data pointers:
+ * initialize a work item's function pointer
*/
-#define PREPARE_WORK(_work, _func, _data) \
+#define PREPARE_WORK(_work, _func) \
do { \
- (_work)->func = _func; \
- (_work)->data = _data; \
+ (_work)->func = (_func); \
} while (0)
+#define PREPARE_DELAYED_WORK(_work, _func) \
+ PREPARE_WORK(&(_work)->work, (_func))
+
/*
- * initialize all of a work-struct:
+ * initialize all of a work item in one go
*/
-#define INIT_WORK(_work, _func, _data) \
+#define INIT_WORK(_work, _func) \
do { \
+ (_work)->management = 0; \
INIT_LIST_HEAD(&(_work)->entry); \
- (_work)->pending = 0; \
- PREPARE_WORK((_work), (_func), (_data)); \
+ PREPARE_WORK((_work), (_func)); \
+ } while (0)
+
+#define INIT_WORK_NAR(_work, _func) \
+ do { \
+ (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ PREPARE_WORK((_work), (_func)); \
+ } while (0)
+
+#define INIT_DELAYED_WORK(_work, _func) \
+ do { \
+ INIT_WORK(&(_work)->work, (_func)); \
init_timer(&(_work)->timer); \
} while (0)
+#define INIT_DELAYED_WORK_NAR(_work, _func) \
+ do { \
+ INIT_WORK_NAR(&(_work)->work, (_func)); \
+ init_timer(&(_work)->timer); \
+ } while (0)
+
+/**
+ * work_pending - Find out whether a work item is currently pending
+ * @work: The work item in question
+ */
+#define work_pending(work) \
+ test_bit(WORK_STRUCT_PENDING, &(work)->management)
+
+/**
+ * delayed_work_pending - Find out whether a delayable work item is currently
+ * pending
+ * @work: The work item in question
+ */
+#define delayed_work_pending(work) \
+ test_bit(WORK_STRUCT_PENDING, &(work)->work.management)
+
+/**
+ * work_release - Release a work item under execution
+ * @work: The work item to release
+ *
+ * This is used to release a work item that has been initialised with automatic
+ * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work
+ * function the opportunity to grab auxiliary data from the container of the
+ * work_struct before clearing the pending bit as the work_struct may be
+ * subject to deallocation the moment the pending bit is cleared.
+ *
+ * In such a case, this should be called in the work function after it has
+ * fetched any data it may require from the containter of the work_struct.
+ * After this function has been called, the work_struct may be scheduled for
+ * further execution or it may be deallocated unless other precautions are
+ * taken.
+ *
+ * This should also be used to release a delayed work item.
+ */
+#define work_release(work) \
+ clear_bit(WORK_STRUCT_PENDING, &(work)->management)
+
+
extern struct workqueue_struct *__create_workqueue(const char *name,
int singlethread);
#define create_workqueue(name) __create_workqueue((name), 0)
@@ -62,39 +154,38 @@
extern void destroy_workqueue(struct workqueue_struct *wq);
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
+extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct work_struct *work, unsigned long delay);
+ struct delayed_work *work, unsigned long delay);
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
+extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
-extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
-extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
+extern int schedule_on_each_cpu(work_func_t func);
extern void flush_scheduled_work(void);
extern int current_is_keventd(void);
extern int keventd_up(void);
extern void init_workqueues(void);
-void cancel_rearming_delayed_work(struct work_struct *work);
+void cancel_rearming_delayed_work(struct delayed_work *work);
void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
- struct work_struct *);
-int execute_in_process_context(void (*fn)(void *), void *,
- struct execute_work *);
+ struct delayed_work *);
+int execute_in_process_context(work_func_t fn, struct execute_work *);
/*
* Kill off a pending schedule_delayed_work(). Note that the work callback
* function may still be running on return from cancel_delayed_work(). Run
* flush_scheduled_work() to wait on it.
*/
-static inline int cancel_delayed_work(struct work_struct *work)
+static inline int cancel_delayed_work(struct delayed_work *work)
{
int ret;
ret = del_timer_sync(&work->timer);
if (ret)
- clear_bit(0, &work->pending);
+ clear_bit(WORK_STRUCT_PENDING, &work->work.management);
return ret;
}
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h
index 617b672..8911927 100644
--- a/include/net/ieee80211softmac.h
+++ b/include/net/ieee80211softmac.h
@@ -108,8 +108,8 @@
/* Scan retries remaining */
int scan_retry;
- struct work_struct work;
- struct work_struct timeout;
+ struct delayed_work work;
+ struct delayed_work timeout;
};
struct ieee80211softmac_bss_info {
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 5f48748..f7be1ac 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -84,7 +84,7 @@
};
extern void inet_twdr_hangman(unsigned long data);
-extern void inet_twdr_twkill_work(void *data);
+extern void inet_twdr_twkill_work(struct work_struct *work);
extern void inet_twdr_twcal_tick(unsigned long data);
#if (BITS_PER_LONG == 64)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f8cbe40..c089f93 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1030,7 +1030,7 @@
void sctp_inq_free(struct sctp_inq *);
void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
-void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
+void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
/* This is the structure we use to hold outbound chunks. You push
* chunks in and they automatically pop out the other end as bundled
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 44b2f82..b09bdd8 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -201,9 +201,14 @@
void *lldd_dev;
};
+struct sas_discovery_event {
+ struct work_struct work;
+ struct asd_sas_port *port;
+};
+
struct sas_discovery {
spinlock_t disc_event_lock;
- struct work_struct disc_work[DISC_NUM_EVENTS];
+ struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
unsigned long pending;
u8 fanout_sas_addr[8];
u8 eeds_a[8];
@@ -249,14 +254,19 @@
void *lldd_port; /* not touched by the sas class code */
};
+struct asd_sas_event {
+ struct work_struct work;
+ struct asd_sas_phy *phy;
+};
+
/* The phy pretty much is controlled by the LLDD.
* The class only reads those fields.
*/
struct asd_sas_phy {
/* private: */
/* protected by ha->event_lock */
- struct work_struct port_events[PORT_NUM_EVENTS];
- struct work_struct phy_events[PHY_NUM_EVENTS];
+ struct asd_sas_event port_events[PORT_NUM_EVENTS];
+ struct asd_sas_event phy_events[PHY_NUM_EVENTS];
unsigned long port_events_pending;
unsigned long phy_events_pending;
@@ -308,10 +318,15 @@
int queue_thread_kill;
};
+struct sas_ha_event {
+ struct work_struct work;
+ struct sas_ha_struct *ha;
+};
+
struct sas_ha_struct {
/* private: */
spinlock_t event_lock;
- struct work_struct ha_events[HA_NUM_EVENTS];
+ struct sas_ha_event ha_events[HA_NUM_EVENTS];
unsigned long pending;
struct scsi_core core;
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index fd35232..798f7c7 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -206,9 +206,9 @@
u8 flags;
struct list_head peers;
struct device dev;
- struct work_struct dev_loss_work;
+ struct delayed_work dev_loss_work;
struct work_struct scan_work;
- struct work_struct fail_io_work;
+ struct delayed_work fail_io_work;
struct work_struct stgt_delete_work;
struct work_struct rport_delete_work;
} __attribute__((aligned(sizeof(unsigned long))));
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 4b95c89..d5c218d 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -176,7 +176,7 @@
/* recovery fields */
int recovery_tmo;
- struct work_struct recovery_work;
+ struct delayed_work recovery_work;
int target_id;
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 4c43521..3372039 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -511,7 +511,7 @@
#ifdef CONFIG_SND_AC97_POWER_SAVE
unsigned int power_up; /* power states */
struct workqueue_struct *power_workq;
- struct work_struct power_work;
+ struct delayed_work power_work;
#endif
struct device dev;
};
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 11702aa..2ee0616 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -182,7 +182,7 @@
unsigned char rcs0;
unsigned char rcs1;
struct workqueue_struct *workqueue;
- struct work_struct work;
+ struct delayed_work work;
void *change_callback_private;
void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1);
};
diff --git a/ipc/util.c b/ipc/util.c
index cd8bb14..a9b7a22 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -514,6 +514,11 @@
container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
}
+static void ipc_do_vfree(struct work_struct *work)
+{
+ vfree(container_of(work, struct ipc_rcu_sched, work));
+}
+
/**
* ipc_schedule_free - free ipc + rcu space
* @head: RCU callback structure for queued work
@@ -528,7 +533,7 @@
struct ipc_rcu_sched *sched =
container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
- INIT_WORK(&sched->work, vfree, sched);
+ INIT_WORK(&sched->work, ipc_do_vfree);
schedule_work(&sched->work);
}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 2b76dee..8d2bea0 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -114,6 +114,7 @@
#endif /* CONFIG_KMOD */
struct subprocess_info {
+ struct work_struct work;
struct completion *complete;
char *path;
char **argv;
@@ -221,9 +222,10 @@
}
/* This is run by khelper thread */
-static void __call_usermodehelper(void *data)
+static void __call_usermodehelper(struct work_struct *work)
{
- struct subprocess_info *sub_info = data;
+ struct subprocess_info *sub_info =
+ container_of(work, struct subprocess_info, work);
pid_t pid;
int wait = sub_info->wait;
@@ -264,6 +266,8 @@
{
DECLARE_COMPLETION_ONSTACK(done);
struct subprocess_info sub_info = {
+ .work = __WORK_INITIALIZER(sub_info.work,
+ __call_usermodehelper),
.complete = &done,
.path = path,
.argv = argv,
@@ -272,7 +276,6 @@
.wait = wait,
.retval = 0,
};
- DECLARE_WORK(work, __call_usermodehelper, &sub_info);
if (!khelper_wq)
return -EBUSY;
@@ -280,7 +283,7 @@
if (path[0] == '\0')
return 0;
- queue_work(khelper_wq, &work);
+ queue_work(khelper_wq, &sub_info.work);
wait_for_completion(&done);
return sub_info.retval;
}
@@ -291,6 +294,8 @@
{
DECLARE_COMPLETION(done);
struct subprocess_info sub_info = {
+ .work = __WORK_INITIALIZER(sub_info.work,
+ __call_usermodehelper),
.complete = &done,
.path = path,
.argv = argv,
@@ -298,7 +303,6 @@
.retval = 0,
};
struct file *f;
- DECLARE_WORK(work, __call_usermodehelper, &sub_info);
if (!khelper_wq)
return -EBUSY;
@@ -318,7 +322,7 @@
}
sub_info.stdin = f;
- queue_work(khelper_wq, &work);
+ queue_work(khelper_wq, &sub_info.work);
wait_for_completion(&done);
return sub_info.retval;
}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4f9c60e..1db8c72 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -31,6 +31,8 @@
/* Result passed back to kthread_create() from keventd. */
struct task_struct *result;
struct completion done;
+
+ struct work_struct work;
};
struct kthread_stop_info
@@ -111,9 +113,10 @@
}
/* We are keventd: create a thread. */
-static void keventd_create_kthread(void *_create)
+static void keventd_create_kthread(struct work_struct *work)
{
- struct kthread_create_info *create = _create;
+ struct kthread_create_info *create =
+ container_of(work, struct kthread_create_info, work);
int pid;
/* We want our own signal handler (we take no signals by default). */
@@ -154,20 +157,20 @@
...)
{
struct kthread_create_info create;
- DECLARE_WORK(work, keventd_create_kthread, &create);
create.threadfn = threadfn;
create.data = data;
init_completion(&create.started);
init_completion(&create.done);
+ INIT_WORK(&create.work, keventd_create_kthread);
/*
* The workqueue needs to start up first:
*/
if (!helper_wq)
- work.func(work.data);
+ create.work.func(&create.work);
else {
- queue_work(helper_wq, &work);
+ queue_work(helper_wq, &create.work);
wait_for_completion(&create.done);
}
if (!IS_ERR(create.result)) {
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index f1f900a..678ec73 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -16,12 +16,12 @@
* callback we use.
*/
-static void do_poweroff(void *dummy)
+static void do_poweroff(struct work_struct *dummy)
{
kernel_power_off();
}
-static DECLARE_WORK(poweroff_work, do_poweroff, NULL);
+static DECLARE_WORK(poweroff_work, do_poweroff);
static void handle_poweroff(int key, struct tty_struct *tty)
{
diff --git a/kernel/relay.c b/kernel/relay.c
index f04bbdb..2b92e8e 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -308,9 +308,10 @@
* reason waking is deferred is that calling directly from write
* causes problems if you're writing from say the scheduler.
*/
-static void wakeup_readers(void *private)
+static void wakeup_readers(struct work_struct *work)
{
- struct rchan_buf *buf = private;
+ struct rchan_buf *buf =
+ container_of(work, struct rchan_buf, wake_readers.work);
wake_up_interruptible(&buf->read_wait);
}
@@ -328,7 +329,7 @@
if (init) {
init_waitqueue_head(&buf->read_wait);
kref_init(&buf->kref);
- INIT_WORK(&buf->wake_readers, NULL, NULL);
+ INIT_DELAYED_WORK(&buf->wake_readers, NULL);
} else {
cancel_delayed_work(&buf->wake_readers);
flush_scheduled_work();
@@ -549,7 +550,8 @@
buf->padding[old_subbuf];
smp_mb();
if (waitqueue_active(&buf->read_wait)) {
- PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf);
+ PREPARE_DELAYED_WORK(&buf->wake_readers,
+ wakeup_readers);
schedule_delayed_work(&buf->wake_readers, 1);
}
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 98489d8..c87b461 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -880,7 +880,7 @@
return 0;
}
-static void deferred_cad(void *dummy)
+static void deferred_cad(struct work_struct *dummy)
{
kernel_restart(NULL);
}
@@ -892,7 +892,7 @@
*/
void ctrl_alt_del(void)
{
- static DECLARE_WORK(cad_work, deferred_cad, NULL);
+ static DECLARE_WORK(cad_work, deferred_cad);
if (C_A_D)
schedule_work(&cad_work);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 17c2f03..8d1e7cb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -80,6 +80,29 @@
return list_empty(&wq->list);
}
+static inline void set_wq_data(struct work_struct *work, void *wq)
+{
+ unsigned long new, old, res;
+
+ /* assume the pending flag is already set and that the task has already
+ * been queued on this workqueue */
+ new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
+ res = work->management;
+ if (res != new) {
+ do {
+ old = res;
+ new = (unsigned long) wq;
+ new |= (old & WORK_STRUCT_FLAG_MASK);
+ res = cmpxchg(&work->management, old, new);
+ } while (res != old);
+ }
+}
+
+static inline void *get_wq_data(struct work_struct *work)
+{
+ return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
+}
+
/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work)
@@ -87,7 +110,7 @@
unsigned long flags;
spin_lock_irqsave(&cwq->lock, flags);
- work->wq_data = cwq;
+ set_wq_data(work, cwq);
list_add_tail(&work->entry, &cwq->worklist);
cwq->insert_sequence++;
wake_up(&cwq->more_work);
@@ -108,7 +131,7 @@
{
int ret = 0, cpu = get_cpu();
- if (!test_and_set_bit(0, &work->pending)) {
+ if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
if (unlikely(is_single_threaded(wq)))
cpu = singlethread_cpu;
BUG_ON(!list_empty(&work->entry));
@@ -122,38 +145,42 @@
static void delayed_work_timer_fn(unsigned long __data)
{
- struct work_struct *work = (struct work_struct *)__data;
- struct workqueue_struct *wq = work->wq_data;
+ struct delayed_work *dwork = (struct delayed_work *)__data;
+ struct workqueue_struct *wq = get_wq_data(&dwork->work);
int cpu = smp_processor_id();
if (unlikely(is_single_threaded(wq)))
cpu = singlethread_cpu;
- __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+ __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
}
/**
* queue_delayed_work - queue work on a workqueue after delay
* @wq: workqueue to use
- * @work: work to queue
+ * @work: delayable work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int fastcall queue_delayed_work(struct workqueue_struct *wq,
- struct work_struct *work, unsigned long delay)
+ struct delayed_work *dwork, unsigned long delay)
{
int ret = 0;
- struct timer_list *timer = &work->timer;
+ struct timer_list *timer = &dwork->timer;
+ struct work_struct *work = &dwork->work;
- if (!test_and_set_bit(0, &work->pending)) {
+ if (delay == 0)
+ return queue_work(wq, work);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry));
/* This stores wq for the moment, for the timer_fn */
- work->wq_data = wq;
+ set_wq_data(work, wq);
timer->expires = jiffies + delay;
- timer->data = (unsigned long)work;
+ timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn;
add_timer(timer);
ret = 1;
@@ -172,19 +199,20 @@
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct work_struct *work, unsigned long delay)
+ struct delayed_work *dwork, unsigned long delay)
{
int ret = 0;
- struct timer_list *timer = &work->timer;
+ struct timer_list *timer = &dwork->timer;
+ struct work_struct *work = &dwork->work;
- if (!test_and_set_bit(0, &work->pending)) {
+ if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry));
/* This stores wq for the moment, for the timer_fn */
- work->wq_data = wq;
+ set_wq_data(work, wq);
timer->expires = jiffies + delay;
- timer->data = (unsigned long)work;
+ timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn;
add_timer_on(timer, cpu);
ret = 1;
@@ -212,15 +240,15 @@
while (!list_empty(&cwq->worklist)) {
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
- void (*f) (void *) = work->func;
- void *data = work->data;
+ work_func_t f = work->func;
list_del_init(cwq->worklist.next);
spin_unlock_irqrestore(&cwq->lock, flags);
- BUG_ON(work->wq_data != cwq);
- clear_bit(0, &work->pending);
- f(data);
+ BUG_ON(get_wq_data(work) != cwq);
+ if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+ work_release(work);
+ f(work);
spin_lock_irqsave(&cwq->lock, flags);
cwq->remove_sequence++;
@@ -468,38 +496,37 @@
/**
* schedule_delayed_work - put work task in global workqueue after delay
- * @work: job to be done
- * @delay: number of jiffies to wait
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue.
*/
-int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
{
- return queue_delayed_work(keventd_wq, work, delay);
+ return queue_delayed_work(keventd_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work);
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use
- * @work: job to be done
+ * @dwork: job to be done
* @delay: number of jiffies to wait
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue on the specified CPU.
*/
int schedule_delayed_work_on(int cpu,
- struct work_struct *work, unsigned long delay)
+ struct delayed_work *dwork, unsigned long delay)
{
- return queue_delayed_work_on(cpu, keventd_wq, work, delay);
+ return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work_on);
/**
* schedule_on_each_cpu - call a function on each online CPU from keventd
* @func: the function to call
- * @info: a pointer to pass to func()
*
* Returns zero on success.
* Returns -ve errno on failure.
@@ -508,7 +535,7 @@
*
* schedule_on_each_cpu() is very slow.
*/
-int schedule_on_each_cpu(void (*func)(void *info), void *info)
+int schedule_on_each_cpu(work_func_t func)
{
int cpu;
struct work_struct *works;
@@ -519,7 +546,7 @@
mutex_lock(&workqueue_mutex);
for_each_online_cpu(cpu) {
- INIT_WORK(per_cpu_ptr(works, cpu), func, info);
+ INIT_WORK(per_cpu_ptr(works, cpu), func);
__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
per_cpu_ptr(works, cpu));
}
@@ -539,12 +566,12 @@
* cancel_rearming_delayed_workqueue - reliably kill off a delayed
* work whose handler rearms the delayed work.
* @wq: the controlling workqueue structure
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
*/
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
- struct work_struct *work)
+ struct delayed_work *dwork)
{
- while (!cancel_delayed_work(work))
+ while (!cancel_delayed_work(dwork))
flush_workqueue(wq);
}
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,18 +579,17 @@
/**
* cancel_rearming_delayed_work - reliably kill off a delayed keventd
* work whose handler rearms the delayed work.
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
*/
-void cancel_rearming_delayed_work(struct work_struct *work)
+void cancel_rearming_delayed_work(struct delayed_work *dwork)
{
- cancel_rearming_delayed_workqueue(keventd_wq, work);
+ cancel_rearming_delayed_workqueue(keventd_wq, dwork);
}
EXPORT_SYMBOL(cancel_rearming_delayed_work);
/**
* execute_in_process_context - reliably execute the routine with user context
* @fn: the function to execute
- * @data: data to pass to the function
* @ew: guaranteed storage for the execute work structure (must
* be available when the work executes)
*
@@ -573,15 +599,14 @@
* Returns: 0 - function was executed
* 1 - function was scheduled for execution
*/
-int execute_in_process_context(void (*fn)(void *data), void *data,
- struct execute_work *ew)
+int execute_in_process_context(work_func_t fn, struct execute_work *ew)
{
if (!in_interrupt()) {
- fn(data);
+ fn(&ew->work);
return 0;
}
- INIT_WORK(&ew->work, fn, data);
+ INIT_WORK(&ew->work, fn);
schedule_work(&ew->work);
return 1;
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e3..5de8147 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -313,7 +313,7 @@
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static int enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(void *unused);
+static void cache_reap(struct work_struct *unused);
/*
* This function must be completely optimized away if a constant is passed to
@@ -753,7 +753,7 @@
return g_cpucache_up == FULL;
}
-static DEFINE_PER_CPU(struct work_struct, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
@@ -916,16 +916,16 @@
*/
static void __devinit start_cpu_timer(int cpu)
{
- struct work_struct *reap_work = &per_cpu(reap_work, cpu);
+ struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
* init_workqueues() has already run, so keventd will be setup
* at that time.
*/
- if (keventd_up() && reap_work->func == NULL) {
+ if (keventd_up() && reap_work->work.func == NULL) {
init_reap_node(cpu);
- INIT_WORK(reap_work, cache_reap, NULL);
+ INIT_DELAYED_WORK(reap_work, cache_reap);
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
}
}
@@ -3815,7 +3815,7 @@
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
-static void cache_reap(void *unused)
+static void cache_reap(struct work_struct *unused)
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
diff --git a/mm/swap.c b/mm/swap.c
index 2e0e871..d9a3770 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -216,7 +216,7 @@
}
#ifdef CONFIG_NUMA
-static void lru_add_drain_per_cpu(void *dummy)
+static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
}
@@ -226,7 +226,7 @@
*/
int lru_add_drain_all(void)
{
- return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+ return schedule_on_each_cpu(lru_add_drain_per_cpu);
}
#else
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5946ec6..3fc0abe 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1454,7 +1454,7 @@
#define LEC_ARP_REFRESH_INTERVAL (3*HZ)
-static void lec_arp_check_expire(void *data);
+static void lec_arp_check_expire(struct work_struct *work);
static void lec_arp_expire_arp(unsigned long data);
/*
@@ -1477,7 +1477,7 @@
INIT_HLIST_HEAD(&priv->lec_no_forward);
INIT_HLIST_HEAD(&priv->mcast_fwds);
spin_lock_init(&priv->lec_arp_lock);
- INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
+ INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
}
@@ -1875,10 +1875,11 @@
* to ESI_FORWARD_DIRECT. This causes the flush period to end
* regardless of the progress of the flush protocol.
*/
-static void lec_arp_check_expire(void *data)
+static void lec_arp_check_expire(struct work_struct *work)
{
unsigned long flags;
- struct lec_priv *priv = data;
+ struct lec_priv *priv =
+ container_of(work, struct lec_priv, lec_arp_work.work);
struct hlist_node *node, *next;
struct lec_arp_table *entry;
unsigned long now;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 24cc95f..99136ba 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -92,7 +92,7 @@
spinlock_t lec_arp_lock;
struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */
struct atm_vcc *lecd;
- struct work_struct lec_arp_work; /* C10 */
+ struct delayed_work lec_arp_work; /* C10 */
unsigned int maximum_unknown_frame_count;
/*
* Within the period of time defined by this variable, the client will send
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3eeeb7a..d4c9356 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -237,9 +237,9 @@
kfree(data);
}
-static void add_conn(void *data)
+static void add_conn(struct work_struct *work)
{
- struct hci_conn *conn = data;
+ struct hci_conn *conn = container_of(work, struct hci_conn, work);
int i;
if (device_register(&conn->dev) < 0) {
@@ -272,14 +272,14 @@
dev_set_drvdata(&conn->dev, conn);
- INIT_WORK(&conn->work, add_conn, (void *) conn);
+ INIT_WORK(&conn->work, add_conn);
schedule_work(&conn->work);
}
-static void del_conn(void *data)
+static void del_conn(struct work_struct *work)
{
- struct hci_conn *conn = data;
+ struct hci_conn *conn = container_of(work, struct hci_conn, work);
device_del(&conn->dev);
}
@@ -287,7 +287,7 @@
{
BT_DBG("conn %p", conn);
- INIT_WORK(&conn->work, del_conn, (void *) conn);
+ INIT_WORK(&conn->work, del_conn);
schedule_work(&conn->work);
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f753c40..55bb263 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -77,12 +77,16 @@
* Called from work queue to allow for calling functions that
* might sleep (such as speed check), and to debounce.
*/
-static void port_carrier_check(void *arg)
+static void port_carrier_check(struct work_struct *work)
{
- struct net_device *dev = arg;
struct net_bridge_port *p;
+ struct net_device *dev;
struct net_bridge *br;
+ dev = container_of(work, struct net_bridge_port,
+ carrier_check.work)->dev;
+ work_release(work);
+
rtnl_lock();
p = dev->br_port;
if (!p)
@@ -276,7 +280,7 @@
p->port_no = index;
br_init_port(p);
p->state = BR_STATE_DISABLED;
- INIT_WORK(&p->carrier_check, port_carrier_check, dev);
+ INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
br_stp_port_timer_init(p);
kobject_init(&p->kobj);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 74258d8..3a534e9 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,7 +82,7 @@
struct timer_list hold_timer;
struct timer_list message_age_timer;
struct kobject kobj;
- struct work_struct carrier_check;
+ struct delayed_work carrier_check;
struct rcu_head rcu;
};
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4b36114..549a2ce 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -34,8 +34,8 @@
static unsigned long linkwatch_flags;
static unsigned long linkwatch_nextevent;
-static void linkwatch_event(void *dummy);
-static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
+static void linkwatch_event(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
@@ -127,7 +127,7 @@
}
-static void linkwatch_event(void *dummy)
+static void linkwatch_event(struct work_struct *dummy)
{
/* Limit the number of linkwatch events to one
* per second so that a runaway driver does not
@@ -171,10 +171,9 @@
unsigned long delay = linkwatch_nextevent - jiffies;
/* If we wrap around we'll delay it by at most HZ. */
- if (!delay || delay > HZ)
- schedule_work(&linkwatch_work);
- else
- schedule_delayed_work(&linkwatch_work, delay);
+ if (delay > HZ)
+ delay = 0;
+ schedule_delayed_work(&linkwatch_work, delay);
}
}
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3c58846..b3c559b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -50,9 +50,10 @@
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
{
- struct netpoll_info *npinfo = p;
+ struct netpoll_info *npinfo =
+ container_of(work, struct netpoll_info, tx_work.work);
struct sk_buff *skb;
while ((skb = skb_dequeue(&npinfo->txq))) {
@@ -72,8 +73,6 @@
schedule_delayed_work(&npinfo->tx_work, HZ/10);
return;
}
-
- netif_tx_unlock_bh(dev);
}
}
@@ -263,7 +262,7 @@
if (status != NETDEV_TX_OK) {
skb_queue_tail(&npinfo->txq, skb);
- schedule_work(&npinfo->tx_work);
+ schedule_delayed_work(&npinfo->tx_work,0);
}
}
@@ -628,7 +627,7 @@
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
skb_queue_head_init(&npinfo->txq);
- INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
+ INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
} else {
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 7b52f2a..4c9e267 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -32,8 +32,7 @@
.tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
(unsigned long)&dccp_death_row),
.twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
- inet_twdr_twkill_work,
- &dccp_death_row),
+ inet_twdr_twkill_work),
/* Short-time timewait calendar */
.twcal_hand = -1,
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index cf51c87..08386c1 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -58,9 +58,11 @@
}
void
-ieee80211softmac_assoc_timeout(void *d)
+ieee80211softmac_assoc_timeout(struct work_struct *work)
{
- struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+ struct ieee80211softmac_device *mac =
+ container_of(work, struct ieee80211softmac_device,
+ associnfo.timeout.work);
struct ieee80211softmac_network *n;
mutex_lock(&mac->associnfo.mutex);
@@ -186,9 +188,11 @@
/* This function is called to handle userspace requests (asynchronously) */
void
-ieee80211softmac_assoc_work(void *d)
+ieee80211softmac_assoc_work(struct work_struct *work)
{
- struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+ struct ieee80211softmac_device *mac =
+ container_of(work, struct ieee80211softmac_device,
+ associnfo.work.work);
struct ieee80211softmac_network *found = NULL;
struct ieee80211_network *net = NULL, *best = NULL;
int bssvalid;
@@ -412,7 +416,7 @@
network->authenticated = 0;
/* we don't want to do this more than once ... */
network->auth_desynced_once = 1;
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
break;
}
default:
@@ -446,7 +450,7 @@
ieee80211softmac_disassoc(mac);
/* try to reassociate */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
@@ -466,7 +470,7 @@
dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
return 0;
}
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 0612015..6012705 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -26,7 +26,7 @@
#include "ieee80211softmac_priv.h"
-static void ieee80211softmac_auth_queue(void *data);
+static void ieee80211softmac_auth_queue(struct work_struct *work);
/* Queues an auth request to the desired AP */
int
@@ -54,14 +54,14 @@
auth->mac = mac;
auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
- INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth);
+ INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
/* Lock (for list) */
spin_lock_irqsave(&mac->lock, flags);
/* add to list */
list_add_tail(&auth->list, &mac->auth_queue);
- schedule_work(&auth->work);
+ schedule_delayed_work(&auth->work, 0);
spin_unlock_irqrestore(&mac->lock, flags);
return 0;
@@ -70,14 +70,15 @@
/* Sends an auth request to the desired AP and handles timeouts */
static void
-ieee80211softmac_auth_queue(void *data)
+ieee80211softmac_auth_queue(struct work_struct *work)
{
struct ieee80211softmac_device *mac;
struct ieee80211softmac_auth_queue_item *auth;
struct ieee80211softmac_network *net;
unsigned long flags;
- auth = (struct ieee80211softmac_auth_queue_item *)data;
+ auth = container_of(work, struct ieee80211softmac_auth_queue_item,
+ work.work);
net = auth->net;
mac = auth->mac;
@@ -118,9 +119,11 @@
/* Sends a response to an auth challenge (for shared key auth). */
static void
-ieee80211softmac_auth_challenge_response(void *_aq)
+ieee80211softmac_auth_challenge_response(struct work_struct *work)
{
- struct ieee80211softmac_auth_queue_item *aq = _aq;
+ struct ieee80211softmac_auth_queue_item *aq =
+ container_of(work, struct ieee80211softmac_auth_queue_item,
+ work.work);
/* Send our response */
ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
@@ -234,8 +237,8 @@
* we have obviously already sent the initial auth
* request. */
cancel_delayed_work(&aq->work);
- INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
- schedule_work(&aq->work);
+ INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
+ schedule_delayed_work(&aq->work, 0);
spin_unlock_irqrestore(&mac->lock, flags);
return 0;
case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@ -398,6 +401,6 @@
ieee80211softmac_deauth_from_net(mac, net);
/* let's try to re-associate */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index f34fa2e..b901565 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -73,10 +73,12 @@
static void
-ieee80211softmac_notify_callback(void *d)
+ieee80211softmac_notify_callback(struct work_struct *work)
{
- struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d;
- kfree(d);
+ struct ieee80211softmac_event *pevent =
+ container_of(work, struct ieee80211softmac_event, work.work);
+ struct ieee80211softmac_event event = *pevent;
+ kfree(pevent);
event.fun(event.mac->dev, event.event_type, event.context);
}
@@ -99,7 +101,7 @@
return -ENOMEM;
eventptr->event_type = event;
- INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr);
+ INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
eventptr->fun = fun;
eventptr->context = context;
eventptr->mac = mac;
@@ -170,7 +172,7 @@
/* User may have subscribed to ANY event, so
* we tell them which event triggered it. */
eventptr->event_type = event;
- schedule_work(&eventptr->work);
+ schedule_delayed_work(&eventptr->work, 0);
}
}
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 33aff4f..256207b 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -58,8 +58,8 @@
INIT_LIST_HEAD(&softmac->events);
mutex_init(&softmac->associnfo.mutex);
- INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac);
- INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac);
+ INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
+ INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
softmac->start_scan = ieee80211softmac_start_scan_implementation;
softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 0642e09..c0dbe07 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -78,7 +78,7 @@
/* private definitions and prototypes */
/*** prototypes from _scan.c */
-void ieee80211softmac_scan(void *sm);
+void ieee80211softmac_scan(struct work_struct *work);
/* for internal use if scanning is needed */
int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
@@ -149,7 +149,7 @@
int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
/*** prototypes from _assoc.c */
-void ieee80211softmac_assoc_work(void *d);
+void ieee80211softmac_assoc_work(struct work_struct *work);
int ieee80211softmac_handle_assoc_response(struct net_device * dev,
struct ieee80211_assoc_response * resp,
struct ieee80211_network * network);
@@ -157,7 +157,7 @@
struct ieee80211_disassoc * disassoc);
int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
struct ieee80211_reassoc_request * reassoc);
-void ieee80211softmac_assoc_timeout(void *d);
+void ieee80211softmac_assoc_timeout(struct work_struct *work);
void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
@@ -207,7 +207,7 @@
struct ieee80211softmac_device *mac; /* SoftMAC device */
u8 retry; /* Retry limit */
u8 state; /* Auth State */
- struct work_struct work; /* Work queue */
+ struct delayed_work work; /* Work queue */
};
/* scanning information */
@@ -219,7 +219,8 @@
stop:1;
u8 skip_flags;
struct completion finished;
- struct work_struct softmac_scan;
+ struct delayed_work softmac_scan;
+ struct ieee80211softmac_device *mac;
};
/* private event struct */
@@ -227,7 +228,7 @@
struct list_head list;
int event_type;
void *event_context;
- struct work_struct work;
+ struct delayed_work work;
notify_function_ptr fun;
void *context;
struct ieee80211softmac_device *mac;
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index 5507fea..0c85d6c 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -90,12 +90,14 @@
/* internal scanning implementation follows */
-void ieee80211softmac_scan(void *d)
+void ieee80211softmac_scan(struct work_struct *work)
{
int invalid_channel;
u8 current_channel_idx;
- struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d;
- struct ieee80211softmac_scaninfo *si = sm->scaninfo;
+ struct ieee80211softmac_scaninfo *si =
+ container_of(work, struct ieee80211softmac_scaninfo,
+ softmac_scan.work);
+ struct ieee80211softmac_device *sm = si->mac;
unsigned long flags;
while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
@@ -146,7 +148,8 @@
struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
if (unlikely(!info))
return NULL;
- INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac);
+ INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
+ info->mac = mac;
init_completion(&info->finished);
return info;
}
@@ -187,7 +190,7 @@
sm->scaninfo->started = 1;
sm->scaninfo->stop = 0;
INIT_COMPLETION(sm->scaninfo->finished);
- schedule_work(&sm->scaninfo->softmac_scan);
+ schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
spin_unlock_irqrestore(&sm->lock, flags);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 23068a8..2ffaebd 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -122,7 +122,7 @@
sm->associnfo.associating = 1;
/* queue lower level code to do work (if necessary) */
- schedule_work(&sm->associnfo.work);
+ schedule_delayed_work(&sm->associnfo.work, 0);
out:
mutex_unlock(&sm->associnfo.mutex);
@@ -356,7 +356,7 @@
/* force reassociation */
mac->associnfo.bssvalid = 0;
if (mac->associnfo.associated)
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
} else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
/* the bssid we have is no longer fixed */
mac->associnfo.bssfixed = 0;
@@ -373,7 +373,7 @@
/* tell the other code that this bssid should be used no matter what */
mac->associnfo.bssfixed = 1;
/* queue associate if new bssid or (old one again and not associated) */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
}
out:
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index cdd8053..8c74f91 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -197,9 +197,10 @@
extern void twkill_slots_invalid(void);
-void inet_twdr_twkill_work(void *data)
+void inet_twdr_twkill_work(struct work_struct *work)
{
- struct inet_timewait_death_row *twdr = data;
+ struct inet_timewait_death_row *twdr =
+ container_of(work, struct inet_timewait_death_row, twkill_work);
int i;
if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f261616..9b93338 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -221,10 +221,10 @@
* Timer for checking the defense
*/
#define DEFENSE_TIMER_PERIOD 1*HZ
-static void defense_work_handler(void *data);
-static DECLARE_WORK(defense_work, defense_work_handler, NULL);
+static void defense_work_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
-static void defense_work_handler(void *data)
+static void defense_work_handler(struct work_struct *work)
{
update_defense_level();
if (atomic_read(&ip_vs_dropentry))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6dddf59..4a3889d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -45,8 +45,7 @@
.tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
(unsigned long)&tcp_death_row),
.twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
- inet_twdr_twkill_work,
- &tcp_death_row),
+ inet_twdr_twkill_work),
/* Short-time timewait calendar */
.twcal_hand = -1,
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index d50a020..262bda8 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -61,7 +61,7 @@
static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
static void ircomm_tty_hangup(struct tty_struct *tty);
-static void ircomm_tty_do_softint(void *private_);
+static void ircomm_tty_do_softint(struct work_struct *work);
static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
static void ircomm_tty_stop(struct tty_struct *tty);
@@ -389,7 +389,7 @@
self->flow = FLOW_STOP;
self->line = line;
- INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self);
+ INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
self->close_delay = 5*HZ/10;
@@ -594,15 +594,16 @@
}
/*
- * Function ircomm_tty_do_softint (private_)
+ * Function ircomm_tty_do_softint (work)
*
* We use this routine to give the write wakeup to the user at at a
* safe time (as fast as possible after write have completed). This
* can be compared to the Tx interrupt.
*/
-static void ircomm_tty_do_softint(void *private_)
+static void ircomm_tty_do_softint(struct work_struct *work)
{
- struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_;
+ struct ircomm_tty_cb *self =
+ container_of(work, struct ircomm_tty_cb, tqueue);
struct tty_struct *tty;
unsigned long flags;
struct sk_buff *skb, *ctrl_skb;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 39471d3..ad0057d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -61,7 +61,7 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal functions. */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc);
+static void sctp_assoc_bh_rcv(struct work_struct *work);
/* 1st Level Abstractions. */
@@ -269,9 +269,7 @@
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
- sctp_inq_set_th_handler(&asoc->base.inqueue,
- (void (*)(void *))sctp_assoc_bh_rcv,
- asoc);
+ sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
/* Create an output queue. */
sctp_outq_init(asoc, &asoc->outqueue);
@@ -946,8 +944,11 @@
}
/* Do delayed input processing. This is scheduled by sctp_rcv(). */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc)
+static void sctp_assoc_bh_rcv(struct work_struct *work)
{
+ struct sctp_association *asoc =
+ container_of(work, struct sctp_association,
+ base.inqueue.immediate);
struct sctp_endpoint *ep;
struct sctp_chunk *chunk;
struct sock *sk;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 33a42e9..1297569 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -61,7 +61,7 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep);
+static void sctp_endpoint_bh_rcv(struct work_struct *work);
/*
* Initialize the base fields of the endpoint structure.
@@ -89,8 +89,7 @@
sctp_inq_init(&ep->base.inqueue);
/* Set its top-half handler */
- sctp_inq_set_th_handler(&ep->base.inqueue,
- (void (*)(void *))sctp_endpoint_bh_rcv, ep);
+ sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
/* Initialize the bind addr area */
sctp_bind_addr_init(&ep->base.bind_addr, 0);
@@ -318,8 +317,11 @@
/* Do delayed input processing. This is scheduled by sctp_rcv().
* This may be called on BH or task time.
*/
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep)
+static void sctp_endpoint_bh_rcv(struct work_struct *work)
{
+ struct sctp_endpoint *ep =
+ container_of(work, struct sctp_endpoint,
+ base.inqueue.immediate);
struct sctp_association *asoc;
struct sock *sk;
struct sctp_transport *transport;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cf6deed..71b0746 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -54,7 +54,7 @@
queue->in_progress = NULL;
/* Create a task for delivering data. */
- INIT_WORK(&queue->immediate, NULL, NULL);
+ INIT_WORK(&queue->immediate, NULL);
queue->malloced = 0;
}
@@ -97,7 +97,7 @@
* on the BH related data structures.
*/
list_add_tail(&chunk->list, &q->in_chunk_list);
- q->immediate.func(q->immediate.data);
+ q->immediate.func(&q->immediate);
}
/* Extract a chunk from an SCTP inqueue.
@@ -205,9 +205,8 @@
* The intent is that this routine will pull stuff out of the
* inqueue and process it.
*/
-void sctp_inq_set_th_handler(struct sctp_inq *q,
- void (*callback)(void *), void *arg)
+void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
{
- INIT_WORK(&q->immediate, callback, arg);
+ INIT_WORK(&q->immediate, callback);
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 00cb388..d96fd46 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -284,8 +284,8 @@
static struct file_operations content_file_operations;
static struct file_operations cache_flush_operations;
-static void do_cache_clean(void *data);
-static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
+static void do_cache_clean(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
void cache_register(struct cache_detail *cd)
{
@@ -337,7 +337,7 @@
spin_unlock(&cache_list_lock);
/* start the cleaning process */
- schedule_work(&cache_cleaner);
+ schedule_delayed_work(&cache_cleaner, 0);
}
int cache_unregister(struct cache_detail *cd)
@@ -461,7 +461,7 @@
/*
* We want to regularly clean the cache, so we need to schedule some work ...
*/
-static void do_cache_clean(void *data)
+static void do_cache_clean(struct work_struct *work)
{
int delay = 5;
if (cache_clean() == -1)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9a0b41a..49dba5f 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -54,10 +54,11 @@
}
static void
-rpc_timeout_upcall_queue(void *data)
+rpc_timeout_upcall_queue(struct work_struct *work)
{
LIST_HEAD(free_list);
- struct rpc_inode *rpci = (struct rpc_inode *)data;
+ struct rpc_inode *rpci =
+ container_of(work, struct rpc_inode, queue_timeout.work);
struct inode *inode = &rpci->vfs_inode;
void (*destroy_msg)(struct rpc_pipe_msg *);
@@ -837,7 +838,8 @@
INIT_LIST_HEAD(&rpci->pipe);
rpci->pipelen = 0;
init_waitqueue_head(&rpci->waitq);
- INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
+ INIT_DELAYED_WORK(&rpci->queue_timeout,
+ rpc_timeout_upcall_queue);
rpci->ops = NULL;
}
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index a1ab4ee..eff44bc 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -41,7 +41,7 @@
static void __rpc_default_timer(struct rpc_task *task);
static void rpciod_killall(void);
-static void rpc_async_schedule(void *);
+static void rpc_async_schedule(struct work_struct *);
/*
* RPC tasks sit here while waiting for conditions to improve.
@@ -305,7 +305,7 @@
if (RPC_IS_ASYNC(task)) {
int status;
- INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
+ INIT_WORK(&task->u.tk_work, rpc_async_schedule);
status = queue_work(task->tk_workqueue, &task->u.tk_work);
if (status < 0) {
printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
@@ -695,9 +695,9 @@
return __rpc_execute(task);
}
-static void rpc_async_schedule(void *arg)
+static void rpc_async_schedule(struct work_struct *work)
{
- __rpc_execute((struct rpc_task *)arg);
+ __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
}
/**
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 8085747..4f9a5d9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -479,9 +479,10 @@
return status;
}
-static void xprt_autoclose(void *args)
+static void xprt_autoclose(struct work_struct *work)
{
- struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+ struct rpc_xprt *xprt =
+ container_of(work, struct rpc_xprt, task_cleanup);
xprt_disconnect(xprt);
xprt->ops->close(xprt);
@@ -932,7 +933,7 @@
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
- INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
+ INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
init_timer(&xprt->timer);
xprt->timer.function = xprt_init_autodisconnect;
xprt->timer.data = (unsigned long) xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 757fc91..cfe3c15 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1060,13 +1060,14 @@
/**
* xs_udp_connect_worker - set up a UDP socket
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
*
* Invoked by a work queue tasklet.
*/
-static void xs_udp_connect_worker(void *args)
+static void xs_udp_connect_worker(struct work_struct *work)
{
- struct rpc_xprt *xprt = (struct rpc_xprt *) args;
+ struct rpc_xprt *xprt =
+ container_of(work, struct rpc_xprt, connect_worker.work);
struct socket *sock = xprt->sock;
int err, status = -EIO;
@@ -1144,13 +1145,14 @@
/**
* xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
*
* Invoked by a work queue tasklet.
*/
-static void xs_tcp_connect_worker(void *args)
+static void xs_tcp_connect_worker(struct work_struct *work)
{
- struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+ struct rpc_xprt *xprt =
+ container_of(work, struct rpc_xprt, connect_worker.work);
struct socket *sock = xprt->sock;
int err, status = -EIO;
@@ -1262,7 +1264,7 @@
xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
} else {
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
- schedule_work(&xprt->connect_worker);
+ schedule_delayed_work(&xprt->connect_worker, 0);
/* flush_scheduled_work can sleep... */
if (!RPC_IS_ASYNC(task))
@@ -1375,7 +1377,7 @@
/* XXX: header size can vary due to auth type, IPv6, etc. */
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
- INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+ INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker);
xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_UDP_CONN_TO;
xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1422,7 @@
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
- INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+ INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker);
xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_TCP_CONN_TO;
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 64d3938..f6c77bd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -392,7 +392,7 @@
xfrm_pol_put(policy);
}
-static void xfrm_policy_gc_task(void *data)
+static void xfrm_policy_gc_task(struct work_struct *work)
{
struct xfrm_policy *policy;
struct hlist_node *entry, *tmp;
@@ -580,7 +580,7 @@
static DEFINE_MUTEX(hash_resize_mutex);
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
{
int dir, total;
@@ -597,7 +597,7 @@
mutex_unlock(&hash_resize_mutex);
}
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
/* Generate new index... KAME seems to generate them ordered by cost
* of an absolute inpredictability of ordering of rules. This will not pass. */
@@ -2116,7 +2116,7 @@
panic("XFRM: failed to allocate bydst hash\n");
}
- INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
+ INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
register_netdevice_notifier(&xfrm_dev_notifier);
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 864962b..da54a64 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -115,7 +115,7 @@
static DEFINE_MUTEX(hash_resize_mutex);
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
{
struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
unsigned long nsize, osize;
@@ -168,7 +168,7 @@
mutex_unlock(&hash_resize_mutex);
}
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
DECLARE_WAIT_QUEUE_HEAD(km_waitq);
EXPORT_SYMBOL(km_waitq);
@@ -207,7 +207,7 @@
kfree(x);
}
-static void xfrm_state_gc_task(void *data)
+static void xfrm_state_gc_task(struct work_struct *data)
{
struct xfrm_state *x;
struct hlist_node *entry, *tmp;
@@ -1568,6 +1568,6 @@
panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
- INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
+ INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
}
diff --git a/security/keys/key.c b/security/keys/key.c
index 80de8c3..70eacbe 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -30,8 +30,8 @@
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
-static void key_cleanup(void *data);
-static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
+static void key_cleanup(struct work_struct *work);
+static DECLARE_WORK(key_cleanup_task, key_cleanup);
/* we serialise key instantiation and link */
DECLARE_RWSEM(key_construction_sem);
@@ -552,7 +552,7 @@
* do cleaning up in process context so that we don't have to disable
* interrupts all over the place
*/
-static void key_cleanup(void *data)
+static void key_cleanup(struct work_struct *work)
{
struct rb_node *_n;
struct key *key;
diff --git a/sound/aoa/aoa-gpio.h b/sound/aoa/aoa-gpio.h
index 3a61f31..ee64f5d 100644
--- a/sound/aoa/aoa-gpio.h
+++ b/sound/aoa/aoa-gpio.h
@@ -59,10 +59,10 @@
};
struct gpio_notification {
+ struct delayed_work work;
notify_func_t notify;
void *data;
void *gpio_private;
- struct work_struct work;
struct mutex mutex;
};
diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c
index 40eb47e..2b03bc7 100644
--- a/sound/aoa/core/snd-aoa-gpio-feature.c
+++ b/sound/aoa/core/snd-aoa-gpio-feature.c
@@ -195,9 +195,10 @@
ftr_gpio_set_lineout(rt, (s>>2)&1);
}
-static void ftr_handle_notify(void *data)
+static void ftr_handle_notify(struct work_struct *work)
{
- struct gpio_notification *notif = data;
+ struct gpio_notification *notif =
+ container_of(work, struct gpio_notification, work.work);
mutex_lock(¬if->mutex);
if (notif->notify)
@@ -253,12 +254,9 @@
ftr_gpio_all_amps_off(rt);
rt->implementation_private = 0;
- INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify,
- &rt->headphone_notify);
- INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify,
- &rt->line_in_notify);
- INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify,
- &rt->line_out_notify);
+ INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
+ INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
+ INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
mutex_init(&rt->headphone_notify.mutex);
mutex_init(&rt->line_in_notify.mutex);
mutex_init(&rt->line_out_notify.mutex);
@@ -287,7 +285,7 @@
{
struct gpio_notification *notif = data;
- schedule_work(¬if->work);
+ schedule_delayed_work(¬if->work, 0);
return IRQ_HANDLED;
}
diff --git a/sound/aoa/core/snd-aoa-gpio-pmf.c b/sound/aoa/core/snd-aoa-gpio-pmf.c
index 2836c32..5ca2220 100644
--- a/sound/aoa/core/snd-aoa-gpio-pmf.c
+++ b/sound/aoa/core/snd-aoa-gpio-pmf.c
@@ -69,9 +69,10 @@
pmf_gpio_set_lineout(rt, (s>>2)&1);
}
-static void pmf_handle_notify(void *data)
+static void pmf_handle_notify(struct work_struct *work)
{
- struct gpio_notification *notif = data;
+ struct gpio_notification *notif =
+ container_of(work, struct gpio_notification, work.work);
mutex_lock(¬if->mutex);
if (notif->notify)
@@ -83,12 +84,9 @@
{
pmf_gpio_all_amps_off(rt);
rt->implementation_private = 0;
- INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify,
- &rt->headphone_notify);
- INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify,
- &rt->line_in_notify);
- INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify,
- &rt->line_out_notify);
+ INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify);
+ INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify);
+ INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify);
mutex_init(&rt->headphone_notify.mutex);
mutex_init(&rt->line_in_notify.mutex);
mutex_init(&rt->line_out_notify.mutex);
@@ -129,7 +127,7 @@
{
struct gpio_notification *notif = data;
- schedule_work(¬if->work);
+ schedule_delayed_work(¬if->work, 0);
}
static int pmf_set_notify(struct gpio_runtime *rt,
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 12ffffc..d2f2c50 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -35,7 +35,7 @@
#define AK4114_ADDR 0x00 /* fixed address */
-static void ak4114_stats(void *);
+static void ak4114_stats(struct work_struct *work);
static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val)
{
@@ -158,7 +158,7 @@
reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN);
/* bring up statistics / event queing */
chip->init = 0;
- INIT_WORK(&chip->work, ak4114_stats, chip);
+ INIT_DELAYED_WORK(&chip->work, ak4114_stats);
queue_delayed_work(chip->workqueue, &chip->work, HZ / 10);
}
@@ -561,9 +561,9 @@
return res;
}
-static void ak4114_stats(void *data)
+static void ak4114_stats(struct work_struct *work)
{
- struct ak4114 *chip = (struct ak4114 *)data;
+ struct ak4114 *chip = container_of(work, struct ak4114, work.work);
if (chip->init)
return;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 6577b23..7abcb10 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1927,9 +1927,10 @@
static struct snd_ac97_build_ops null_build_ops;
#ifdef CONFIG_SND_AC97_POWER_SAVE
-static void do_update_power(void *data)
+static void do_update_power(struct work_struct *work)
{
- update_power_regs(data);
+ update_power_regs(
+ container_of(work, struct snd_ac97, power_work.work));
}
#endif
@@ -1989,7 +1990,7 @@
mutex_init(&ac97->page_mutex);
#ifdef CONFIG_SND_AC97_POWER_SAVE
ac97->power_workq = create_workqueue("ac97");
- INIT_WORK(&ac97->power_work, do_update_power, ac97);
+ INIT_DELAYED_WORK(&ac97->power_work, do_update_power);
#endif
#ifdef CONFIG_PCI
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9c3d7ac..71482c1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -272,10 +272,11 @@
/*
* process queueud unsolicited events
*/
-static void process_unsol_events(void *data)
+static void process_unsol_events(struct work_struct *work)
{
- struct hda_bus *bus = data;
- struct hda_bus_unsolicited *unsol = bus->unsol;
+ struct hda_bus_unsolicited *unsol =
+ container_of(work, struct hda_bus_unsolicited, work);
+ struct hda_bus *bus = unsol->bus;
struct hda_codec *codec;
unsigned int rp, caddr, res;
@@ -314,7 +315,8 @@
kfree(unsol);
return -ENOMEM;
}
- INIT_WORK(&unsol->work, process_unsol_events, bus);
+ INIT_WORK(&unsol->work, process_unsol_events);
+ unsol->bus = bus;
bus->unsol = unsol;
return 0;
}
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index f9416c3..9ca1baf 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -206,6 +206,7 @@
/* workqueue */
struct workqueue_struct *workq;
struct work_struct work;
+ struct hda_bus *bus;
};
/*
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 2fbe1d1..8f074c79 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -942,10 +942,11 @@
}
static struct work_struct device_change;
+static struct snd_pmac *device_change_chip;
-static void device_change_handler(void *self)
+static void device_change_handler(struct work_struct *work)
{
- struct snd_pmac *chip = self;
+ struct snd_pmac *chip = device_change_chip;
struct pmac_tumbler *mix;
int headphone, lineout;
@@ -1417,7 +1418,8 @@
chip->resume = tumbler_resume;
#endif
- INIT_WORK(&device_change, device_change_handler, (void *)chip);
+ INIT_WORK(&device_change, device_change_handler);
+ device_change_chip = chip;
#ifdef PMAC_SUPPORT_AUTOMUTE
if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0)