
From: Rajesh Venkatasubramanian <vrajesh@eecs.umich.edu>

Don't deref the pte pointer after having kunmapped the memory it points at.



 25-akpm/mm/fremap.c |    8 ++++++--
 1 files changed, 6 insertions(+), 2 deletions(-)

diff -puN mm/fremap.c~install_page-use-after-unmap-fix mm/fremap.c
--- 25/mm/fremap.c~install_page-use-after-unmap-fix	Fri Sep  5 11:35:20 2003
+++ 25-akpm/mm/fremap.c	Fri Sep  5 11:35:20 2003
@@ -61,6 +61,7 @@ int install_page(struct mm_struct *mm, s
 	pte_t *pte;
 	pgd_t *pgd;
 	pmd_t *pmd;
+	pte_t pte_val;
 	struct pte_chain *pte_chain;
 
 	pte_chain = pte_chain_alloc(GFP_KERNEL);
@@ -83,10 +84,11 @@ int install_page(struct mm_struct *mm, s
 	flush_icache_page(vma, page);
 	set_pte(pte, mk_pte(page, prot));
 	pte_chain = page_add_rmap(page, pte, pte_chain);
+	pte_val = *pte;
 	pte_unmap(pte);
 	if (flush)
 		flush_tlb_page(vma, addr);
-	update_mmu_cache(vma, addr, *pte);
+	update_mmu_cache(vma, addr, pte_val);
 	spin_unlock(&mm->page_table_lock);
 	pte_chain_free(pte_chain);
 	return 0;
@@ -111,6 +113,7 @@ int install_file_pte(struct mm_struct *m
 	pte_t *pte;
 	pgd_t *pgd;
 	pmd_t *pmd;
+	pte_t pte_val;
 
 	pgd = pgd_offset(mm, addr);
 	spin_lock(&mm->page_table_lock);
@@ -126,10 +129,11 @@ int install_file_pte(struct mm_struct *m
 	flush = zap_pte(mm, vma, addr, pte);
 
 	set_pte(pte, pgoff_to_pte(pgoff));
+	pte_val = *pte;
 	pte_unmap(pte);
 	if (flush)
 		flush_tlb_page(vma, addr);
-	update_mmu_cache(vma, addr, *pte);
+	update_mmu_cache(vma, addr, pte_val);
 	spin_unlock(&mm->page_table_lock);
 	return 0;
 

_
