[ 3.8.y.z extended stable ] Patch "ARM: move vector stubs" has been added to staging queue

Kamal Mostafa kamal at canonical.com
Thu Aug 15 01:05:28 UTC 2013


This is a note to let you know that I have just added a patch titled

    ARM: move vector stubs

to the linux-3.8.y-queue branch of the 3.8.y.z extended stable tree 
which can be found at:

 http://kernel.ubuntu.com/git?p=ubuntu/linux.git;a=shortlog;h=refs/heads/linux-3.8.y-queue

This patch is scheduled to be released in version 3.8.13.7.

If you, or anyone else, feels it should not be added to this tree, please 
reply to this email.

For more information about the 3.8.y.z tree, see
https://wiki.ubuntu.com/Kernel/Dev/ExtendedStable

Thanks.
-Kamal

------

>From 9f014879fa17c7ad320e18723dbe3a8538ef7767 Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel at arm.linux.org.uk>
Date: Thu, 4 Jul 2013 11:40:32 +0100
Subject: ARM: move vector stubs

commit 19accfd373847ac3d10623c5d20f948846299741 upstream.

Move the machine vector stubs into the page above the vector page,
which we can prevent from being visible to userspace.  Also move
the reset stub, and place the swi vector at a location that the
'ldr' can get to it.

This hides pointers into the kernel which could give valuable
information to attackers, and reduces the number of exploitable
instructions at a fixed address.

Acked-by: Nicolas Pitre <nico at linaro.org>
Signed-off-by: Russell King <rmk+kernel at arm.linux.org.uk>
Signed-off-by: Kamal Mostafa <kamal at canonical.com>
---
 arch/arm/Kconfig             |  3 ++-
 arch/arm/kernel/entry-armv.S | 50 +++++++++++++++++++++-----------------------
 arch/arm/kernel/traps.c      |  4 ++--
 arch/arm/mm/mmu.c            | 10 ++++++++-
 4 files changed, 37 insertions(+), 30 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 6dd9069..5c037b9 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -189,7 +189,8 @@ config VECTORS_BASE
 	default DRAM_BASE if REMAP_VECTORS_TO_RAM
 	default 0x00000000
 	help
-	  The base address of exception vectors.
+	  The base address of exception vectors.  This must be two pages
+	  in size.

 config ARM_PATCH_PHYS_VIRT
 	bool "Patch physical to virtual translations at runtime" if EMBEDDED
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9c80c1d..a4af8e8 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -986,9 +986,9 @@ __kuser_helper_end:
 /*
  * Vector stubs.
  *
- * This code is copied to 0xffff0200 so we can use branches in the
- * vectors, rather than ldr's.  Note that this code must not
- * exceed 0x300 bytes.
+ * This code is copied to 0xffff1000 so we can use branches in the
+ * vectors, rather than ldr's.  Note that this code must not exceed
+ * a page size.
  *
  * Common stub entry macro:
  *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -1037,6 +1037,15 @@ ENDPROC(vector_\name)

 	.globl	__stubs_start
 __stubs_start:
+	@ This must be the first word
+	.word	vector_swi
+
+vector_rst:
+ ARM(	swi	SYS_ERROR0	)
+ THUMB(	svc	#0		)
+ THUMB(	nop			)
+	b	vector_und
+
 /*
  * Interrupt dispatcher
  */
@@ -1131,6 +1140,16 @@ __stubs_start:
 	.align	5

 /*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+	b	vector_addrexcptn
+
+/*=============================================================================
  * Undefined FIQs
  *-----------------------------------------------------------------------------
  * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1143,35 +1162,14 @@ __stubs_start:
 vector_fiq:
 	subs	pc, lr, #4

-/*=============================================================================
- * Address exception handler
- *-----------------------------------------------------------------------------
- * These aren't too critical.
- * (they're not supposed to happen, and won't happen in 32-bit data mode).
- */
-
-vector_addrexcptn:
-	b	vector_addrexcptn
-
-/*
- * We group all the following data together to optimise
- * for CPUs with separate I & D caches.
- */
-	.align	5
-
-.LCvswi:
-	.word	vector_swi
-
 	.globl	__stubs_end
 __stubs_end:

-	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
+	.equ	stubs_offset, __vectors_start + 0x1000 - __stubs_start

 	.globl	__vectors_start
 __vectors_start:
- ARM(	swi	SYS_ERROR0	)
- THUMB(	svc	#0		)
- THUMB(	nop			)
+	W(b)	vector_rst + stubs_offset
 	W(b)	vector_und + stubs_offset
 	W(ldr)	pc, .LCvswi + stubs_offset
 	W(b)	vector_pabt + stubs_offset
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index a4f84b0..366aa93 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -843,7 +843,7 @@ void __init early_trap_init(void *vectors_base)
 	 * are visible to the instruction stream.
 	 */
 	memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
-	memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
+	memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
 	memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);

 	/*
@@ -858,6 +858,6 @@ void __init early_trap_init(void *vectors_base)
 	memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
 	       sigreturn_codes, sizeof(sigreturn_codes));

-	flush_icache_range(vectors, vectors + PAGE_SIZE);
+	flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
 	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 }
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ce328c7..458b6f4 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1114,7 +1114,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 	/*
 	 * Allocate the vector page early.
 	 */
-	vectors = early_alloc(PAGE_SIZE);
+	vectors = early_alloc(PAGE_SIZE * 2);

 	early_trap_init(vectors);

@@ -1164,10 +1164,18 @@ static void __init devicemaps_init(struct machine_desc *mdesc)

 	if (!vectors_high()) {
 		map.virtual = 0;
+		map.length = PAGE_SIZE * 2;
 		map.type = MT_LOW_VECTORS;
 		create_mapping(&map);
 	}

+	/* Now create a kernel read-only mapping */
+	map.pfn += 1;
+	map.virtual = 0xffff0000 + PAGE_SIZE;
+	map.length = PAGE_SIZE;
+	map.type = MT_LOW_VECTORS;
+	create_mapping(&map);
+
 	/*
 	 * Ask the machine support to map in the statically mapped devices.
 	 */
--
1.8.1.2





More information about the kernel-team mailing list