
From: Ingo Molnar <mingo@elte.hu>

add more empty sched_cache_flush() definitions. Architectures should 
fill them in.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 include/asm-alpha/system.h   |   10 ++++++++++
 include/asm-arm/system.h     |   10 ++++++++++
 include/asm-arm26/system.h   |   10 ++++++++++
 include/asm-m32r/system.h    |   10 ++++++++++
 include/asm-mips/system.h    |   10 ++++++++++
 include/asm-parisc/system.h  |    9 +++++++++
 include/asm-ppc/system.h     |   10 ++++++++++
 include/asm-s390/system.h    |   10 ++++++++++
 include/asm-sh/system.h      |   10 ++++++++++
 include/asm-sparc/system.h   |   10 ++++++++++
 include/asm-sparc64/system.h |   10 ++++++++++
 11 files changed, 109 insertions(+)

diff -puN include/asm-alpha/system.h~sched-add-cacheflush-asm-2 include/asm-alpha/system.h
--- 25/include/asm-alpha/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-alpha/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -139,6 +139,16 @@ extern void halt(void) __attribute__((no
 struct task_struct;
 extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #define mb() \
 __asm__ __volatile__("mb": : :"memory")
 
diff -puN include/asm-arm26/system.h~sched-add-cacheflush-asm-2 include/asm-arm26/system.h
--- 25/include/asm-arm26/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-arm26/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -115,6 +115,16 @@ do {									\
 } while (0)
 
 /*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
  * Save the current interrupt enable state & disable IRQs
  */
 #define local_irq_save(x)                               \
diff -puN include/asm-arm/system.h~sched-add-cacheflush-asm-2 include/asm-arm/system.h
--- 25/include/asm-arm/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-arm/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -165,6 +165,16 @@ do {									\
 } while (0)
 
 /*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
  * CPU interrupt mask handling.
  */
 #if __LINUX_ARM_ARCH__ >= 6
diff -puN include/asm-m32r/system.h~sched-add-cacheflush-asm-2 include/asm-m32r/system.h
--- 25/include/asm-m32r/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-m32r/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -67,6 +67,16 @@
 	last = __last; \
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 /* Interrupt Control */
 #if !defined(CONFIG_CHIP_M32102)
 #define local_irq_enable() \
diff -puN include/asm-mips/system.h~sched-add-cacheflush-asm-2 include/asm-mips/system.h
--- 25/include/asm-mips/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-mips/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -159,6 +159,16 @@ do { \
 	(last) = resume(prev, next, next->thread_info); \
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #define ROT_IN_PIECES							\
 	"	.set	noreorder	\n"				\
 	"	.set	reorder		\n"
diff -puN include/asm-parisc/system.h~sched-add-cacheflush-asm-2 include/asm-parisc/system.h
--- 25/include/asm-parisc/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-parisc/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(st
 	(last) = _switch_to(prev, next);			\
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
 
 
 /* interrupt control */
diff -puN include/asm-ppc/system.h~sched-add-cacheflush-asm-2 include/asm-ppc/system.h
--- 25/include/asm-ppc/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-ppc/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -95,6 +95,16 @@ extern struct task_struct *__switch_to(s
 	struct task_struct *);
 #define switch_to(prev, next, last)	((last) = __switch_to((prev), (next)))
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 struct thread_struct;
 extern struct task_struct *_switch(struct thread_struct *prev,
 				   struct thread_struct *next);
diff -puN include/asm-s390/system.h~sched-add-cacheflush-asm-2 include/asm-s390/system.h
--- 25/include/asm-s390/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-s390/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -104,6 +104,16 @@ static inline void restore_access_regs(u
 	prev = __switch_to(prev,next);					     \
 } while (0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 extern void account_user_vtime(struct task_struct *);
 extern void account_system_vtime(struct task_struct *);
diff -puN include/asm-sh/system.h~sched-add-cacheflush-asm-2 include/asm-sh/system.h
--- 25/include/asm-sh/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-sh/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -57,6 +57,16 @@
 	last = __last;							\
 } while (0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #define nop() __asm__ __volatile__ ("nop")
 
 
diff -puN include/asm-sparc64/system.h~sched-add-cacheflush-asm-2 include/asm-sparc64/system.h
--- 25/include/asm-sparc64/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-sparc64/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -220,6 +220,16 @@ do {	if (test_thread_flag(TIF_PERFCTR)) 
 	}								\
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
 {
 	unsigned long tmp1, tmp2;
diff -puN include/asm-sparc/system.h~sched-add-cacheflush-asm-2 include/asm-sparc/system.h
--- 25/include/asm-sparc/system.h~sched-add-cacheflush-asm-2	2005-06-25 01:17:14.000000000 -0700
+++ 25-akpm/include/asm-sparc/system.h	2005-06-25 01:17:14.000000000 -0700
@@ -167,6 +167,16 @@ extern void fpsave(unsigned long *fpregs
 	} while(0)
 
 /*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
  * Changing the IRQ level on the Sparc.
  */
 extern void local_irq_restore(unsigned long);
_
