[PATCH] x86-64: Some cleanup and optimization to the processor data area.
- Remove unused irqrsp field
- Remove pda->me
- Optimize set_softirq_pending slightly
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c
index 35b4c3f..aaa6d38 100644
--- a/arch/x86_64/kernel/asm-offsets.c
+++ b/arch/x86_64/kernel/asm-offsets.c
@@ -39,7 +39,6 @@
ENTRY(kernelstack);
ENTRY(oldrsp);
ENTRY(pcurrent);
- ENTRY(irqrsp);
ENTRY(irqcount);
ENTRY(cpunumber);
ENTRY(irqstackptr);
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index e3ffcac..e8b54dc 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -119,7 +119,6 @@
asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
- pda->me = pda;
pda->cpunumber = cpu;
pda->irqcount = -1;
pda->kernelstack =
diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h
index 27c381f..8661b47 100644
--- a/include/asm-x86_64/hardirq.h
+++ b/include/asm-x86_64/hardirq.h
@@ -9,11 +9,12 @@
#define __ARCH_IRQ_STAT 1
-/* Generate a lvalue for a pda member. Should fix softirq.c instead to use
- special access macros. This would generate better code. */
-#define __IRQ_STAT(cpu,member) (read_pda(me)->member)
+#define local_softirq_pending() read_pda(__softirq_pending)
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#define __ARCH_SET_SOFTIRQ_PENDING 1
+
+#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
+#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index e16d76c..bbf89aa 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -10,10 +10,8 @@
struct x8664_pda {
struct task_struct *pcurrent; /* Current process */
unsigned long data_offset; /* Per cpu data offset from linker address */
- struct x8664_pda *me; /* Pointer to itself */
unsigned long kernelstack; /* top of kernel stack for current */
unsigned long oldrsp; /* user rsp for system call */
- unsigned long irqrsp; /* Old rsp for interrupts. */
int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */
char *irqstackptr; /* top of irqstack */
@@ -42,13 +40,14 @@
#define pda_offset(field) offsetof(struct x8664_pda, field)
#define pda_to_op(op,field,val) do { \
+ typedef typeof_field(struct x8664_pda, field) T__; \
switch (sizeof_field(struct x8664_pda, field)) { \
case 2: \
-asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
+asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
case 4: \
-asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
+asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
case 8: \
-asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \
+asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
default: __bad_pda_field(); \
} \
} while (0)
@@ -58,7 +57,7 @@
* Unfortunately removing them causes all hell to break lose currently.
*/
#define pda_from_op(op,field) ({ \
- typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \
+ typeof_field(struct x8664_pda, field) ret__; \
switch (sizeof_field(struct x8664_pda, field)) { \
case 2: \
asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
@@ -75,6 +74,7 @@
#define write_pda(field,val) pda_to_op("mov",field,val)
#define add_pda(field,val) pda_to_op("add",field,val)
#define sub_pda(field,val) pda_to_op("sub",field,val)
+#define or_pda(field,val) pda_to_op("or",field,val)
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index d99e7ae..0a90205 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -57,6 +57,11 @@
extern void enable_irq(unsigned int irq);
#endif
+#ifndef __ARCH_SET_SOFTIRQ_PENDING
+#define set_softirq_pending(x) (local_softirq_pending() = (x))
+#define or_softirq_pending(x) (local_softirq_pending() |= (x))
+#endif
+
/*
* Temporary defines for UP kernels, until all code gets fixed.
*/
@@ -123,7 +128,7 @@
asmlinkage void do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
extern void softirq_init(void);
-#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
+#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
extern void FASTCALL(raise_softirq(unsigned int nr));
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b4ab6af..f766b2f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -84,7 +84,7 @@
cpu = smp_processor_id();
restart:
/* Reset the pending bitmask before enabling irqs */
- local_softirq_pending() = 0;
+ set_softirq_pending(0);
local_irq_enable();