
From: Ingo Molnar <mingo@elte.hu>

The attached patch fixes long scheduling latencies in invalidate_inodes(). 
The lock-break is a bit tricky to not get into a livelock scenario: we use
a dummy inode as a marker at which point we can continue the scanning after
the schedule.

This patch has been tested as part of the -VP patchset for weeks.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/fs/inode.c |   28 ++++++++++++++++++++++++++--
 1 files changed, 26 insertions(+), 2 deletions(-)

diff -puN fs/inode.c~sched-vfs-fix-scheduling-latencies-in-invalidate_inodes fs/inode.c
--- 25/fs/inode.c~sched-vfs-fix-scheduling-latencies-in-invalidate_inodes	2004-09-15 03:07:25.867787200 -0700
+++ 25-akpm/fs/inode.c	2004-09-15 03:07:25.871786592 -0700
@@ -296,7 +296,7 @@ static void dispose_list(struct list_hea
 /*
  * Invalidate all inodes for a device.
  */
-static int invalidate_list(struct list_head *head, struct list_head *dispose)
+static int invalidate_list(struct list_head *head, struct list_head *dispose, struct list_head *mark)
 {
 	struct list_head *next;
 	int busy = 0, count = 0;
@@ -306,6 +306,20 @@ static int invalidate_list(struct list_h
 		struct list_head * tmp = next;
 		struct inode * inode;
 
+		/*
+		 * Preempt if necessary. To make this safe we use a dummy
+		 * inode as a marker - we can continue off that point.
+		 * We rely on this sb's inodes (including the marker) not
+		 * getting reordered within the list during umount. Other
+		 * inodes might get reordered.
+		 */
+		if (lock_need_resched(&inode_lock)) {
+			list_add_tail(mark, next);
+			BUG_ON(mark->next != next);
+			cond_resched_lock(&inode_lock);
+			tmp = next = mark->next;
+			list_del(mark);
+		}
 		next = next->next;
 		if (tmp == head)
 			break;
@@ -346,15 +360,23 @@ int invalidate_inodes(struct super_block
 {
 	int busy;
 	LIST_HEAD(throw_away);
+	struct inode *marker;
+	struct list_head *mark;
+
+	marker = kmalloc(sizeof(*marker), SLAB_KERNEL|__GFP_REPEAT);
+	memset(marker, 0, sizeof(*marker));
+	mark = &marker->i_list;
 
 	down(&iprune_sem);
 	spin_lock(&inode_lock);
-	busy = invalidate_list(&sb->s_inodes, &throw_away);
+	busy = invalidate_list(&sb->s_inodes, &throw_away, mark);
 	spin_unlock(&inode_lock);
 
 	dispose_list(&throw_away);
 	up(&iprune_sem);
 
+	kfree(marker);
+
 	return busy;
 }
 
@@ -425,6 +447,8 @@ static void prune_icache(int nr_to_scan)
 	for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
 		struct inode *inode;
 
+		cond_resched_lock(&inode_lock);
+
 		if (list_empty(&inode_unused))
 			break;
 
_
