

Patch from: Zwane Mwaikambo <zwane@holomorphy.com>

Considering that smp_call_function isn't allowed to hold a lock reference and
within smp_call_function we lock and unlock call_lock thus triggering a
preempt point.  Therefore we can't guarantee that we'll be on the same
processor when we hit do_flush_tlb_all_local.

void flush_tlb_all(void)
{
	smp_call_function (flush_tlb_all_ipi,0,1,1);

	do_flush_tlb_all_local();
}

...

smp_call_function()
{
	spin_lock(call_lock);
	...
	spin_unlock(call_lock);
	<preemption point>
}

...

do_flush_tlb_all_local() - possibly not executing on same processor 
anymore.




 i386/kernel/smp.c |    2 ++
 1 files changed, 2 insertions(+)

diff -puN arch/i386/kernel/smp.c~flush_tlb_all-preempt-safety arch/i386/kernel/smp.c
--- 25/arch/i386/kernel/smp.c~flush_tlb_all-preempt-safety	2003-02-14 18:23:54.000000000 -0800
+++ 25-akpm/arch/i386/kernel/smp.c	2003-02-14 18:23:54.000000000 -0800
@@ -452,9 +452,11 @@ static void flush_tlb_all_ipi(void* info
 
 void flush_tlb_all(void)
 {
+	preempt_disable();
 	smp_call_function (flush_tlb_all_ipi,0,1,1);
 
 	do_flush_tlb_all_local();
+	preempt_enable();
 }
 
 /*

_
