[PATCH] powerpc: Fix handling of fpscr on 64-bit

The recent merge of fpu.S broken the handling of fpscr for
ARCH=powerpc and CONFIG_PPC64=y.  FP registers could be corrupted,
leading to strange random application crashes.

The confusion arises, because the thread_struct has (and requires) a
64-bit area to save the fpscr, because we use load/store double
instructions to get it in to/out of the FPU.  However, only the low
32-bits are actually used, so we want to treat it as a 32-bit quantity
when manipulating its bits to avoid extra load/stores on 32-bit.  This
patch replaces the current definition with a structure of two 32-bit
quantities (pad and val), to clarify things as much as is possible.
The 'val' field is used when manipulating bits, the structure itself
is used when obtaining the address for loading/unloading the value
from the FPU.

While we're at it, consolidate the 4 (!) almost identical versions of
cvt_fd() and cvt_df() (arch/ppc/kernel/misc.S,
arch/ppc64/kernel/misc.S, arch/powerpc/kernel/misc_32.S,
arch/powerpc/kernel/misc_64.S) into a single version in fpu.S.  The
new version takes a pointer to thread_struct and applies the correct
offset itself, rather than a pointer to the fpscr field itself, again
to avoid confusion as to which is the correct field to use.

Finally, this patch makes ARCH=ppc64 also use the consolidated fpu.S
code, which it previously did not.

Built for G5 (ARCH=ppc64 and ARCH=powerpc), 32-bit powermac (ARCH=ppc
and ARCH=powerpc) and Walnut (ARCH=ppc, CONFIG_MATH_EMULATION=y).
Booted on G5 (ARCH=powerpc) and things which previously fell over no
longer do.

Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a733347..94cf917 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -29,7 +29,6 @@
 extra-$(CONFIG_FSL_BOOKE)	:= head_fsl_booke.o
 extra-$(CONFIG_8xx)		:= head_8xx.o
 extra-$(CONFIG_PPC64)		+= entry_64.o
-extra-$(CONFIG_PPC_FPU)		+= fpu.o
 extra-y				+= vmlinux.lds
 
 obj-y				+= process.o init_task.o time.o \
@@ -49,7 +48,7 @@
 # stuff used from here for ARCH=ppc or ARCH=ppc64
 obj-$(CONFIG_PPC64)		+= traps.o process.o init_task.o time.o
 
-fpux-$(CONFIG_PPC32)		+= fpu.o
-extra-$(CONFIG_PPC_FPU)		+= $(fpux-y)
 
 endif
+
+extra-$(CONFIG_PPC_FPU)		+= fpu.o
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 563d445..51fd78d 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -48,7 +48,7 @@
 	addi	r4,r4,THREAD		/* want last_task_used_math->thread */
 	SAVE_32FPRS(0, r4)
 	mffs	fr0
-	stfd	fr0,THREAD_FPSCR-4(r4)
+	stfd	fr0,THREAD_FPSCR(r4)
 	LDL	r5,PT_REGS(r4)
 	tophys(r5,r5)
 	LDL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -71,7 +71,7 @@
 	or	r12,r12,r4
 	std	r12,_MSR(r1)
 #endif
-	lfd	fr0,THREAD_FPSCR-4(r5)
+	lfd	fr0,THREAD_FPSCR(r5)
 	mtfsf	0xff,fr0
 	REST_32FPRS(0, r5)
 #ifndef CONFIG_SMP
@@ -104,7 +104,7 @@
 	CMPI	0,r5,0
 	SAVE_32FPRS(0, r3)
 	mffs	fr0
-	stfd	fr0,THREAD_FPSCR-4(r3)
+	stfd	fr0,THREAD_FPSCR(r3)
 	beq	1f
 	LDL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 	li	r3,MSR_FP|MSR_FE0|MSR_FE1
@@ -117,3 +117,28 @@
 	STL	r5,OFF(last_task_used_math)(r4)
 #endif /* CONFIG_SMP */
 	blr
+
+/*
+ * These are used in the alignment trap handler when emulating
+ * single-precision loads and stores.
+ * We restore and save the fpscr so the task gets the same result
+ * and exceptions as if the cpu had performed the load or store.
+ */
+
+_GLOBAL(cvt_fd)
+	lfd	0,THREAD_FPSCR(r5)	/* load up fpscr value */
+	mtfsf	0xff,0
+	lfs	0,0(r3)
+	stfd	0,0(r4)
+	mffs	0
+	stfd	0,THREAD_FPSCR(r5)	/* save new fpscr value */
+	blr
+
+_GLOBAL(cvt_df)
+	lfd	0,THREAD_FPSCR(r5)	/* load up fpscr value */
+	mtfsf	0xff,0
+	lfd	0,0(r3)
+	stfs	0,0(r4)
+	mffs	0
+	stfd	0,THREAD_FPSCR(r5)	/* save new fpscr value */
+	blr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 303229b..3bedb53 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -993,33 +993,6 @@
 	blr
 
 /*
- * These are used in the alignment trap handler when emulating
- * single-precision loads and stores.
- * We restore and save the fpscr so the task gets the same result
- * and exceptions as if the cpu had performed the load or store.
- */
-
-#ifdef CONFIG_PPC_FPU
-_GLOBAL(cvt_fd)
-	lfd	0,-4(r5)	/* load up fpscr value */
-	mtfsf	0xff,0
-	lfs	0,0(r3)
-	stfd	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,-4(r5)
-	blr
-
-_GLOBAL(cvt_df)
-	lfd	0,-4(r5)	/* load up fpscr value */
-	mtfsf	0xff,0
-	lfd	0,0(r3)
-	stfs	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,-4(r5)
-	blr
-#endif
-
-/*
  * Create a kernel thread
  *   kernel_thread(fn, arg, flags)
  */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 4775bed..b3e95ff 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -462,25 +462,6 @@
 	sync
 	blr	
 
-
-_GLOBAL(cvt_fd)
-	lfd	0,0(r5)		/* load up fpscr value */
-	mtfsf	0xff,0
-	lfs	0,0(r3)
-	stfd	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,0(r5)
-	blr
-
-_GLOBAL(cvt_df)
-	lfd	0,0(r5)		/* load up fpscr value */
-	mtfsf	0xff,0
-	lfd	0,0(r3)
-	stfs	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,0(r5)
-	blr
-
 /*
  * identify_cpu and calls setup_cpu
  * In:	r3 = base of the cpu_specs array
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 047da1a..8f85dab 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -665,7 +665,7 @@
 #endif
 #endif /* CONFIG_SMP */
 	memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
-	current->thread.fpscr = 0;
+	current->thread.fpscr.val = 0;
 #ifdef CONFIG_ALTIVEC
 	memset(current->thread.vr, 0, sizeof(current->thread.vr));
 	memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 92452b2..444c3e8 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -403,7 +403,7 @@
 		    ELF_NFPREG * sizeof(double)))
 		return 1;
 
-	current->thread.fpscr = 0;	/* turn off all fp exceptions */
+	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
 
 #ifdef CONFIG_ALTIVEC
 	/* save altivec registers */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f875803..5d638ec 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -549,7 +549,7 @@
 
 	flush_fp_to_thread(current);
 
-	fpscr = current->thread.fpscr;
+	fpscr = current->thread.fpscr.val;
 
 	/* Invalid operation */
 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c
index ff81da9..ab398c4 100644
--- a/arch/ppc/kernel/align.c
+++ b/arch/ppc/kernel/align.c
@@ -375,7 +375,7 @@
 #ifdef CONFIG_PPC_FPU
 		preempt_disable();
 		enable_kernel_fp();
-		cvt_fd(&data.f, &data.d, &current->thread.fpscr);
+		cvt_fd(&data.f, &data.d, &current->thread);
 		preempt_enable();
 #else
 		return 0;
@@ -385,7 +385,7 @@
 #ifdef CONFIG_PPC_FPU
 		preempt_disable();
 		enable_kernel_fp();
-		cvt_df(&data.d, &data.f, &current->thread.fpscr);
+		cvt_df(&data.d, &data.f, &current->thread);
 		preempt_enable();
 #else
 		return 0;
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 2350f3e..3056ede 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -968,33 +968,6 @@
 	blr
 
 /*
- * These are used in the alignment trap handler when emulating
- * single-precision loads and stores.
- * We restore and save the fpscr so the task gets the same result
- * and exceptions as if the cpu had performed the load or store.
- */
-
-#ifdef CONFIG_PPC_FPU
-_GLOBAL(cvt_fd)
-	lfd	0,-4(r5)	/* load up fpscr value */
-	mtfsf	0xff,0
-	lfs	0,0(r3)
-	stfd	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,-4(r5)
-	blr
-
-_GLOBAL(cvt_df)
-	lfd	0,-4(r5)	/* load up fpscr value */
-	mtfsf	0xff,0
-	lfd	0,0(r3)
-	stfs	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,-4(r5)
-	blr
-#endif
-
-/*
  * Create a kernel thread
  *   kernel_thread(fn, arg, flags)
  */
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index 6d60c40..78ea101 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -542,7 +542,7 @@
 		last_task_used_spe = NULL;
 #endif
 	memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
-	current->thread.fpscr = 0;
+	current->thread.fpscr.val = 0;
 #ifdef CONFIG_ALTIVEC
 	memset(current->thread.vr, 0, sizeof(current->thread.vr));
 	memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 5e4bf88..f265b81 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -659,7 +659,7 @@
 			giveup_fpu(current);
 		preempt_enable();
 
-		fpscr = current->thread.fpscr;
+		fpscr = current->thread.fpscr.val;
 		fpscr &= fpscr << 22;	/* mask summary bits with enables */
 		if (fpscr & FPSCR_VX)
 			code = FPE_FLTINV;
diff --git a/arch/ppc/math-emu/sfp-machine.h b/arch/ppc/math-emu/sfp-machine.h
index 686e06d..4b17d83 100644
--- a/arch/ppc/math-emu/sfp-machine.h
+++ b/arch/ppc/math-emu/sfp-machine.h
@@ -166,7 +166,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 
-#define __FPU_FPSCR	(current->thread.fpscr)
+#define __FPU_FPSCR	(current->thread.fpscr.val)
 
 /* We only actually write to the destination register
  * if exceptions signalled (if any) will not trap.
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 8cc73cc..b889277 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -197,6 +197,9 @@
 config POWER4
 	def_bool y
 
+config PPC_FPU
+	def_bool y
+
 config POWER4_ONLY
 	bool "Optimize for POWER4"
 	default n
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 4d18bdb..ba59225 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -80,6 +80,7 @@
 CFLAGS += $(call cc-option,-funit-at-a-time)
 
 head-y := arch/ppc64/kernel/head.o
+head-y += arch/powerpc/kernel/fpu.o
 
 libs-y				+= arch/ppc64/lib/
 core-y				+= arch/ppc64/kernel/ arch/powerpc/kernel/
diff --git a/arch/ppc64/kernel/align.c b/arch/ppc64/kernel/align.c
index 330e7ef..256d5b5 100644
--- a/arch/ppc64/kernel/align.c
+++ b/arch/ppc64/kernel/align.c
@@ -313,7 +313,7 @@
 				/* Doing stfs, have to convert to single */
 				preempt_disable();
 				enable_kernel_fp();
-				cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread.fpscr);
+				cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread);
 				disable_kernel_fp();
 				preempt_enable();
 			}
@@ -349,7 +349,7 @@
 				/* Doing lfs, have to convert to double */
 				preempt_disable();
 				enable_kernel_fp();
-				cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread.fpscr);
+				cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread);
 				disable_kernel_fp();
 				preempt_enable();
 			}
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index f58af9c..929f9f4 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -81,7 +81,7 @@
 _GLOBAL(__start)
 	/* NOP this out unconditionally */
 BEGIN_FTR_SECTION
-	b .__start_initialization_multiplatform
+	b	.__start_initialization_multiplatform
 END_FTR_SECTION(0, 1)
 #endif /* CONFIG_PPC_MULTIPLATFORM */
 
@@ -747,6 +747,7 @@
  * any task or sent any task a signal, you should use
  * ret_from_except or ret_from_except_lite instead of this.
  */
+	.globl	fast_exception_return
 fast_exception_return:
 	ld	r12,_MSR(r1)
 	ld	r11,_NIP(r1)
@@ -858,62 +859,6 @@
 	bl	.kernel_fp_unavailable_exception
 	BUG_OPCODE
 
-/*
- * load_up_fpu(unused, unused, tsk)
- * Disable FP for the task which had the FPU previously,
- * and save its floating-point registers in its thread_struct.
- * Enables the FPU for use in the kernel on return.
- * On SMP we know the fpu is free, since we give it up every
- * switch (ie, no lazy save of the FP registers).
- * On entry: r13 == 'current' && last_task_used_math != 'current'
- */
-_STATIC(load_up_fpu)
-	mfmsr	r5			/* grab the current MSR */
-	ori	r5,r5,MSR_FP
-	mtmsrd	r5			/* enable use of fpu now */
-	isync
-/*
- * For SMP, we don't do lazy FPU switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_fpu in switch_to.
- *
- */
-#ifndef CONFIG_SMP
-	ld	r3,last_task_used_math@got(r2)
-	ld	r4,0(r3)
-	cmpdi	0,r4,0
-	beq	1f
-	/* Save FP state to last_task_used_math's THREAD struct */
-	addi	r4,r4,THREAD
-	SAVE_32FPRS(0, r4)
-	mffs	fr0
-	stfd	fr0,THREAD_FPSCR(r4)
-	/* Disable FP for last_task_used_math */
-	ld	r5,PT_REGS(r4)
-	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-	li	r6,MSR_FP|MSR_FE0|MSR_FE1
-	andc	r4,r4,r6
-	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
-	/* enable use of FP after return */
-	ld	r4,PACACURRENT(r13)
-	addi	r5,r4,THREAD		/* Get THREAD */
-	ld	r4,THREAD_FPEXC_MODE(r5)
-	ori	r12,r12,MSR_FP
-	or	r12,r12,r4
-	std	r12,_MSR(r1)
-	lfd	fr0,THREAD_FPSCR(r5)
-	mtfsf	0xff,fr0
-	REST_32FPRS(0, r5)
-#ifndef CONFIG_SMP
-	/* Update last_task_used_math to 'current' */
-	subi	r4,r5,THREAD		/* Back to 'current' */
-	std	r4,0(r3)
-#endif /* CONFIG_SMP */
-	/* restore registers and return */
-	b	fast_exception_return
-
 	.align	7
 	.globl altivec_unavailable_common
 altivec_unavailable_common:
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index a33448c..9cae3d5 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -451,25 +451,6 @@
 	sync
 	blr	
 
-
-_GLOBAL(cvt_fd)
-	lfd	0,0(r5)		/* load up fpscr value */
-	mtfsf	0xff,0
-	lfs	0,0(r3)
-	stfd	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,0(r5)
-	blr
-
-_GLOBAL(cvt_df)
-	lfd	0,0(r5)		/* load up fpscr value */
-	mtfsf	0xff,0
-	lfd	0,0(r3)
-	stfs	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,0(r5)
-	blr
-
 /*
  * identify_cpu and calls setup_cpu
  * In:	r3 = base of the cpu_specs array
@@ -655,38 +636,6 @@
 	isync
 	blr
 
-/*
- * giveup_fpu(tsk)
- * Disable FP for the task given as the argument,
- * and save the floating-point registers in its thread_struct.
- * Enables the FPU for use in the kernel on return.
- */
-_GLOBAL(giveup_fpu)
-	mfmsr	r5
-	ori	r5,r5,MSR_FP
-	mtmsrd	r5			/* enable use of fpu now */
-	isync
-	cmpdi	0,r3,0
-	beqlr-				/* if no previous owner, done */
-	addi	r3,r3,THREAD		/* want THREAD of task */
-	ld	r5,PT_REGS(r3)
-	cmpdi	0,r5,0
-	SAVE_32FPRS(0, r3)
-	mffs	fr0
-	stfd	fr0,THREAD_FPSCR(r3)
-	beq	1f
-	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-	li	r3,MSR_FP|MSR_FE0|MSR_FE1
-	andc	r4,r4,r3		/* disable FP for previous task */
-	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
-	li	r5,0
-	ld	r4,last_task_used_math@got(r2)
-	std	r5,0(r4)
-#endif /* CONFIG_SMP */
-	blr
-
 #ifdef CONFIG_ALTIVEC
 
 #if 0 /* this has no callers for now */
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c
index 347112c..ec9d098 100644
--- a/arch/ppc64/kernel/signal.c
+++ b/arch/ppc64/kernel/signal.c
@@ -133,7 +133,7 @@
 	flush_fp_to_thread(current);
 
 	/* Make sure signal doesn't get spurrious FP exceptions */
-	current->thread.fpscr = 0;
+	current->thread.fpscr.val = 0;
 
 #ifdef CONFIG_ALTIVEC
 	err |= __put_user(v_regs, &sc->v_regs);