

Make sure we never access anything in kernel mapping while
doing the prefetch workaround checks on x86-64.

Originally suggested by Jamie Lockier.



 arch/x86_64/mm/fault.c |    3 +++
 1 files changed, 3 insertions(+)

diff -puN arch/x86_64/mm/fault.c~x86_64-05 arch/x86_64/mm/fault.c
--- 25/arch/x86_64/mm/fault.c~x86_64-05	2003-12-23 23:55:40.000000000 -0800
+++ 25-akpm/arch/x86_64/mm/fault.c	2003-12-23 23:55:40.000000000 -0800
@@ -73,6 +73,9 @@ static int is_prefetch(struct pt_regs *r
 	if (regs->cs & (1<<2))
 		return 0;
 
+	if ((regs->cs & 3) != 0 && regs->rip >= TASK_SIZE)
+		return 0;
+
 	while (scan_more && instr < max_instr) { 
 		unsigned char opcode;
 		unsigned char instr_hi;

_
