

Patch from Christoph Hellwig <hch@sgi.com>

There's a bunch of minor fixes needed to disable the swap code for systems
with mmu.



 arch/i386/Kconfig      |    7 ++++++-
 arch/i386/mm/pgtable.c |    1 +
 include/linux/swap.h   |   24 +++++++++++++-----------
 mm/swap.c              |    1 -
 mm/swap_state.c        |   27 ++++++++++++++-------------
 5 files changed, 34 insertions(+), 26 deletions(-)

diff -puN arch/i386/Kconfig~i386-no-swap-fix arch/i386/Kconfig
--- 25/arch/i386/Kconfig~i386-no-swap-fix	2003-02-26 14:47:11.000000000 -0800
+++ 25-akpm/arch/i386/Kconfig	2003-02-26 14:47:11.000000000 -0800
@@ -19,8 +19,13 @@ config MMU
 	default y
 
 config SWAP
-	bool
+	bool "Support for paging of anonymous memory"
 	default y
+	help
+	  This option allows you to choose whether you want to have support
+	  for socalled swap devices or swap files in your kernel that are
+	  used to provide more virtual memory than the actual RAM present
+	  in your computer.  If unusre say Y.
 
 config SBUS
 	bool
diff -puN arch/i386/mm/pgtable.c~i386-no-swap-fix arch/i386/mm/pgtable.c
--- 25/arch/i386/mm/pgtable.c~i386-no-swap-fix	2003-02-26 14:47:11.000000000 -0800
+++ 25-akpm/arch/i386/mm/pgtable.c	2003-02-26 14:47:11.000000000 -0800
@@ -11,6 +11,7 @@
 #include <linux/smp.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
+#include <linux/pagemap.h>
 
 #include <asm/system.h>
 #include <asm/pgtable.h>
diff -puN include/linux/swap.h~i386-no-swap-fix include/linux/swap.h
--- 25/include/linux/swap.h~i386-no-swap-fix	2003-02-26 14:47:11.000000000 -0800
+++ 25-akpm/include/linux/swap.h	2003-02-26 14:47:11.000000000 -0800
@@ -68,10 +68,11 @@ typedef struct {
 
 #ifdef __KERNEL__
 
-struct sysinfo;
 struct address_space;
-struct zone;
+struct pte_chain;
+struct sysinfo;
 struct writeback_control;
+struct zone;
 
 /*
  * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
@@ -140,6 +141,9 @@ struct swap_list_t {
 /* Swap 50% full? Release swapcache more aggressively.. */
 #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
 
+/* linux/mm/oom_kill.c */
+extern void out_of_memory(void);
+
 /* linux/mm/page_alloc.c */
 extern unsigned long totalram_pages;
 extern unsigned long totalhigh_pages;
@@ -149,13 +153,11 @@ extern unsigned int nr_free_pages_pgdat(
 extern unsigned int nr_free_buffer_pages(void);
 extern unsigned int nr_free_pagecache_pages(void);
 
-/* linux/mm/filemap.c */
-extern void FASTCALL(mark_page_accessed(struct page *));
-
 /* linux/mm/swap.c */
 extern void FASTCALL(lru_cache_add(struct page *));
 extern void FASTCALL(lru_cache_add_active(struct page *));
 extern void FASTCALL(activate_page(struct page *));
+extern void FASTCALL(mark_page_accessed(struct page *));
 extern void lru_add_drain(void);
 extern int rotate_reclaimable_page(struct page *page);
 extern void swap_setup(void);
@@ -165,11 +167,8 @@ extern int try_to_free_pages(struct zone
 extern int shrink_all_memory(int);
 extern int vm_swappiness;
 
-/* linux/mm/oom_kill.c */
-extern void out_of_memory(void);
-
 /* linux/mm/rmap.c */
-struct pte_chain;
+#ifdef CONFIG_MMU
 int FASTCALL(page_referenced(struct page *));
 struct pte_chain *FASTCALL(page_add_rmap(struct page *, pte_t *,
 					struct pte_chain *));
@@ -186,6 +185,11 @@ int FASTCALL(page_over_rsslimit(struct p
 /* linux/mm/shmem.c */
 extern int shmem_unuse(swp_entry_t entry, struct page *page);
 
+#else
+#define page_referenced(page) \
+	TestClearPageReferenced(page)
+#endif /* CONFIG_MMU */
+
 #ifdef CONFIG_SWAP
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct file *, struct page *);
@@ -242,8 +246,6 @@ extern spinlock_t swaplock;
 	page_cache_release(page)
 #define free_pages_and_swap_cache(pages, nr) \
 	release_pages((pages), (nr), 0);
-#define page_referenced(page) \
-	TestClearPageReferenced(page)
 
 #define show_swap_cache_info()			/*NOTHING*/
 #define free_swap_and_cache(swp)		/*NOTHING*/
diff -puN mm/swap.c~i386-no-swap-fix mm/swap.c
--- 25/mm/swap.c~i386-no-swap-fix	2003-02-26 14:47:11.000000000 -0800
+++ 25-akpm/mm/swap.c	2003-02-26 14:47:11.000000000 -0800
@@ -363,5 +363,4 @@ void __init swap_setup(void)
 	 * Right now other parts of the system means that we
 	 * _really_ don't want to cluster much more
 	 */
-	init_MUTEX(&swapper_space.i_shared_sem);
 }
diff -puN mm/swap_state.c~i386-no-swap-fix mm/swap_state.c
--- 25/mm/swap_state.c~i386-no-swap-fix	2003-02-26 14:47:11.000000000 -0800
+++ 25-akpm/mm/swap_state.c	2003-02-26 15:07:19.000000000 -0800
@@ -33,19 +33,20 @@ static struct backing_dev_info swap_back
 extern struct address_space_operations swap_aops;
 
 struct address_space swapper_space = {
-	.page_tree		= RADIX_TREE_INIT(GFP_ATOMIC),
-	.page_lock		= RW_LOCK_UNLOCKED,
-	.clean_pages		= LIST_HEAD_INIT(swapper_space.clean_pages),
-	.dirty_pages		= LIST_HEAD_INIT(swapper_space.dirty_pages),
-	.io_pages		= LIST_HEAD_INIT(swapper_space.io_pages),
-	.locked_pages		= LIST_HEAD_INIT(swapper_space.locked_pages),
-	.host			= &swapper_inode,
-	.a_ops			= &swap_aops,
-	.backing_dev_info	= &swap_backing_dev_info,
-	.i_mmap			= LIST_HEAD_INIT(swapper_space.i_mmap),
-	.i_mmap_shared		= LIST_HEAD_INIT(swapper_space.i_mmap_shared),
-	.private_lock		= SPIN_LOCK_UNLOCKED,
-	.private_list		= LIST_HEAD_INIT(swapper_space.private_list),
+	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC),
+	.page_lock	= RW_LOCK_UNLOCKED,
+	.clean_pages	= LIST_HEAD_INIT(swapper_space.clean_pages),
+	.dirty_pages	= LIST_HEAD_INIT(swapper_space.dirty_pages),
+	.io_pages	= LIST_HEAD_INIT(swapper_space.io_pages),
+	.locked_pages	= LIST_HEAD_INIT(swapper_space.locked_pages),
+	.host		= &swapper_inode,
+	.a_ops		= &swap_aops,
+	.backing_dev_info = &swap_backing_dev_info,
+	.i_mmap		= LIST_HEAD_INIT(swapper_space.i_mmap),
+	.i_mmap_shared	= LIST_HEAD_INIT(swapper_space.i_mmap_shared),
+	.i_shared_sem	= __MUTEX_INITIALIZER(swapper_space.i_shared_sem),
+	.private_lock	= SPIN_LOCK_UNLOCKED,
+	.private_list	= LIST_HEAD_INIT(swapper_space.private_list),
 };
 
 #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)

_
