
From: Christoph Lameter <clameter@sgi.com>

Changelog
        * Provide atomic pte operations for s390

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/include/asm-s390/pgalloc.h |   16 ++++++++++++++++
 25-akpm/include/asm-s390/pgtable.h |   18 ++++++++++++++++++
 2 files changed, 34 insertions(+)

diff -puN include/asm-s390/pgalloc.h~page-fault-scalability-patch-v11-s390-atomic-pte-operations include/asm-s390/pgalloc.h
--- 25/include/asm-s390/pgalloc.h~page-fault-scalability-patch-v11-s390-atomic-pte-operations	Thu Dec  2 12:41:02 2004
+++ 25-akpm/include/asm-s390/pgalloc.h	Thu Dec  2 12:41:02 2004
@@ -78,6 +78,10 @@ static inline void pgd_populate(struct m
 	pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
 }
 
+static inline int pgd_test_and_populate(struct mm_struct *mm, pdg_t *pgd, pmd_t *pmd)
+{
+	return cmpxchg(pgd, _PAGE_TABLE_INV, _PGD_ENTRY | __pa(pmd)) == _PAGE_TABLE_INV;
+}
 #endif /* __s390x__ */
 
 static inline void 
@@ -100,6 +104,18 @@ pmd_populate(struct mm_struct *mm, pmd_t
 	pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT));
 }
 
+static inline int
+pmd_test_and_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
+{
+	int rc;
+	spin_lock(&mm->page_table_lock);
+
+	rc=pte_same(*pmd, _PAGE_INVALID_EMPTY);
+	if (rc) pmd_populate(mm, pmd, page);
+	spin_unlock(&mm->page_table_lock);
+	return rc;
+}
+
 /*
  * page table entry allocation/free routines.
  */
diff -puN include/asm-s390/pgtable.h~page-fault-scalability-patch-v11-s390-atomic-pte-operations include/asm-s390/pgtable.h
--- 25/include/asm-s390/pgtable.h~page-fault-scalability-patch-v11-s390-atomic-pte-operations	Thu Dec  2 12:41:02 2004
+++ 25-akpm/include/asm-s390/pgtable.h	Thu Dec  2 12:41:02 2004
@@ -574,6 +574,15 @@ ptep_clear_flush(struct vm_area_struct *
 	return pte;
 }
 
+#define ptep_xchg_flush(__vma, __address, __ptep, __pteval)            \
+({                                                                     \
+	struct mm_struct *__mm = __vma->vm_mm;                          \
+	pte_t __pte;                                                    \
+	__pte = ptep_clear_flush(__vma, __address, __ptep);             \
+	set_pte(__ptep, __pteval);                                      \
+	__pte;                                                          \
+})
+
 static inline void ptep_set_wrprotect(pte_t *ptep)
 {
 	pte_t old_pte = *ptep;
@@ -782,6 +791,14 @@ extern inline pte_t mk_swap_pte(unsigned
 
 #define kern_addr_valid(addr)   (1)
 
+/* Atomic PTE operations */
+#define __HAVE_ARCH_ATOMIC_TABLE_OPS
+
+static inline int ptep_cmpxchg (struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t oldval, pte_t newval)
+{
+	return cmpxchg(ptep, pte_val(oldval), pte_val(newval)) == pte_val(oldval);
+}
+
 /*
  * No page table caches to initialise
  */
@@ -795,6 +812,7 @@ extern inline pte_t mk_swap_pte(unsigned
 #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+#define __HAVE_ARCH_PTEP_XCHG_FLUSH
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 #define __HAVE_ARCH_PTEP_MKDIRTY
 #define __HAVE_ARCH_PTE_SAME
_
