
From: David Gibson <david@gibson.dropbear.id.au>

On PowerPC64 the "low" hugepage range (at 2-3G for use by 32-bit processes)
needs to be activated before it can be used.  hugetlb_get_unmapped_area()
automatically activates the range for hugepage mappings in 32-bit processes
which are not MAP_FIXED.  However for MAP_FIXED mmap()s, even at a suitable
address will fail if the region is not already activated, because there is
no suitable callback from the generic MAP_FIXED code path into the arch
code.

This patch corrects this problem and allows PPC64 to do MAP_FIXED hugepage
mappings in the low hugepage range.


---

 25-akpm/arch/ppc64/mm/hugetlbpage.c |   10 ++++++++++
 25-akpm/include/asm-ppc64/page.h    |   13 ++++++++++---
 25-akpm/include/linux/hugetlb.h     |    8 ++++++++
 25-akpm/mm/mmap.c                   |    5 +++--
 4 files changed, 31 insertions(+), 5 deletions(-)

diff -puN arch/ppc64/mm/hugetlbpage.c~ppc64-MAP_FIXED-hugetlb-mappings arch/ppc64/mm/hugetlbpage.c
--- 25/arch/ppc64/mm/hugetlbpage.c~ppc64-MAP_FIXED-hugetlb-mappings	2004-03-31 22:55:03.030103568 -0800
+++ 25-akpm/arch/ppc64/mm/hugetlbpage.c	2004-03-31 22:55:03.041101896 -0800
@@ -295,6 +295,16 @@ static int open_32bit_htlbpage_range(str
 	return 0;
 }
 
+int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+	if (is_hugepage_high_range(addr, len))
+		return 0;
+	else if (is_hugepage_low_range(addr, len))
+		return open_32bit_htlbpage_range(current->mm);
+
+	return -EINVAL;
+}
+
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 			struct vm_area_struct *vma)
 {
diff -puN include/asm-ppc64/page.h~ppc64-MAP_FIXED-hugetlb-mappings include/asm-ppc64/page.h
--- 25/include/asm-ppc64/page.h~ppc64-MAP_FIXED-hugetlb-mappings	2004-03-31 22:55:03.031103416 -0800
+++ 25-akpm/include/asm-ppc64/page.h	2004-03-31 22:55:03.038102352 -0800
@@ -38,10 +38,17 @@
 #define TASK_HPAGE_END_32	(0xc0000000UL)
 
 #define ARCH_HAS_HUGEPAGE_ONLY_RANGE
+#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
+
+#define is_hugepage_low_range(addr, len) \
+	(((addr) > (TASK_HPAGE_BASE_32-(len))) && ((addr) < TASK_HPAGE_END_32))
+#define is_hugepage_high_range(addr, len) \
+	(((addr) > (TASK_HPAGE_BASE-(len))) && ((addr) < TASK_HPAGE_END))
+
 #define is_hugepage_only_range(addr, len) \
-	( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \
-	  (current->mm->context.low_hpages && \
-	   (addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) )
+	(is_hugepage_high_range((addr), (len)) || \
+	 (current->mm->context.low_hpages \
+	  && is_hugepage_low_range((addr), (len))))
 #define hugetlb_free_pgtables free_pgtables
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 
diff -puN include/linux/hugetlb.h~ppc64-MAP_FIXED-hugetlb-mappings include/linux/hugetlb.h
--- 25/include/linux/hugetlb.h~ppc64-MAP_FIXED-hugetlb-mappings	2004-03-31 22:55:03.032103264 -0800
+++ 25-akpm/include/linux/hugetlb.h	2004-03-31 22:55:03.039102200 -0800
@@ -42,6 +42,13 @@ mark_mm_hugetlb(struct mm_struct *mm, st
 #define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
 #endif
 
+#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
+#define prepare_hugepage_range(addr, len)	\
+	is_aligned_hugepage_range(addr, len)
+#else
+int prepare_hugepage_range(unsigned long addr, unsigned long len);
+#endif
+
 #else /* !CONFIG_HUGETLB_PAGE */
 
 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
@@ -62,6 +69,7 @@ static inline int is_vm_hugetlb_page(str
 #define mark_mm_hugetlb(mm, vma)		do { } while (0)
 #define follow_huge_pmd(mm, addr, pmd, write)	0
 #define is_aligned_hugepage_range(addr, len)	0
+#define prepare_hugepage_range(addr, len)	(-EINVAL)
 #define pmd_huge(x)	0
 #define is_hugepage_only_range(addr, len)	0
 #define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
diff -puN mm/mmap.c~ppc64-MAP_FIXED-hugetlb-mappings mm/mmap.c
--- 25/mm/mmap.c~ppc64-MAP_FIXED-hugetlb-mappings	2004-03-31 22:55:03.034102960 -0800
+++ 25-akpm/mm/mmap.c	2004-03-31 22:55:03.040102048 -0800
@@ -807,9 +807,10 @@ get_unmapped_area(struct file *file, uns
 			return -EINVAL;
 		if (file && is_file_hugepages(file))  {
 			/*
-			 * Make sure that addr and length are properly aligned.
+			 * Check if the given range is hugepage aligned, and
+			 * can be made suitable for hugepages.
 			 */
-			ret = is_aligned_hugepage_range(addr, len);
+			ret = prepare_hugepage_range(addr, len);
 		} else {
 			/*
 			 * Ensure that a normal request is not falling in a

_
