[PATCH 2/2 v2] UBUNTU: Fix ARM VFP state corruption due to preemption

Brad Figg brad.figg at canonical.com
Wed Mar 11 18:42:30 UTC 2009


On Wed, Jan 28, 2009 at 01:09:37PM +0000, Catalin Marinas wrote:
 > > BTW, the VFP_bounce() function isn't preemption safe (problems and
 > > suggested fix reported by Lineo in private e-mails but I didn't have
 > > time to post them to the list yet).

 We've also observed that ARM VFP state can be corrupted during VFP
 exception
 handling when PREEMPT is enabled.  The exact conditions are difficult
 to reproduce but appear to occur during VFP exception handling when a
 task causes a VFP exception which is handled via VFP_bounce and is then
 preempted by yet another task which in turn causes yet another VFP
 exception.  Since the VFP_bounce code is not preempt safe, VFP state
 then
 becomes corrupt.  In order to prevent preemption from occuring while
 handling a VFP exception, this patch disables preemption while handling
 VFP exceptions.

 Signed-off-by: George G. Davis <gdavis at mvista.com>
 Signed-off-by: Brad Figg <brad.figg at canonical.com>

Signed-off-by: Brad Figg <brad.figg at canonical.com>
---
 arch/arm/vfp/entry.S     |   21 +++++++++++++++++++--
 arch/arm/vfp/vfphw.S     |    6 ++++++
 arch/arm/vfp/vfpmodule.c |    6 ++++--
 3 files changed, 29 insertions(+), 4 deletions(-)

diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index ba592a9..365aa37 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -18,18 +18,29 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/asm-offsets.h>
-#include <asm/assembler.h>
+#include <asm/thread_info.h>
 #include <asm/vfpmacros.h>
+#include "../kernel/entry-header.S"

 ENTRY(do_vfp)
+#ifdef CONFIG_PREEMPT
+	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
+	add	r11, r4, #1		@ increment it
+	str	r11, [r10, #TI_PREEMPT]
+#endif
 	enable_irq
  	ldr	r4, .LCvfp
 	ldr	r11, [r10, #TI_CPU]	@ CPU number
-	add	r10, r10, #TI_VFPSTATE	@ r10 = workspace
 	ldr	pc, [r4]		@ call VFP entry point
 ENDPROC(do_vfp)

 ENTRY(vfp_null_entry)
+#ifdef CONFIG_PREEMPT
+	get_thread_info	r10
+	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
+	sub	r11, r4, #1		@ decrement it
+	str	r11, [r10, #TI_PREEMPT]
+#endif
 	mov	pc, lr
 ENDPROC(vfp_null_entry)

@@ -41,6 +52,12 @@ ENDPROC(vfp_null_entry)

 	__INIT
 ENTRY(vfp_testing_entry)
+#ifdef CONFIG_PREEMPT
+	get_thread_info	r10
+	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
+	sub	r11, r4, #1		@ decrement it
+	str	r11, [r10, #TI_PREEMPT]
+#endif
 	ldr	r0, VFP_arch_address
 	str	r5, [r0]		@ known non-zero value
 	mov	pc, r9			@ we have handled the fault
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 2da59a3..565a5b7 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -153,6 +153,12 @@ look_for_VFP_exceptions:
 	@ not recognised by VFP

 	DBGSTR	"not VFP"
+#ifdef CONFIG_PREEMPT
+	get_thread_info	r10
+	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
+	sub	r11, r4, #1		@ decrement it
+	str	r11, [r10, #TI_PREEMPT]
+#endif
 	mov	pc, lr

 process_exception:
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 606283f..385f2fb 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -268,7 +268,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
 		 * on VFP subarch 1.
 		 */
 		 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
-		 return;
+		 goto exit;
 	}

 	/*
@@ -299,7 +299,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
 	 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
 	 */
 	if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
-		return;
+		goto exit;

 	/*
 	 * The barrier() here prevents fpinst2 being read
@@ -312,6 +312,8 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
 	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
 	if (exceptions)
 		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ exit:
+	preempt_enable();
 }

 static void vfp_enable(void *unused)
-- 
1.6.1.3




More information about the kernel-team mailing list