

Patch from Nick Piggin <piggin@cyberone.com.au>

This patch simply finishes up the name changeover.




 drivers/block/as-iosched.c |  660 ++++++++++++++++++++++-----------------------
 1 files changed, 330 insertions(+), 330 deletions(-)

diff -puN drivers/block/as-iosched.c~as-tidy-up-rename drivers/block/as-iosched.c
--- 25/drivers/block/as-iosched.c~as-tidy-up-rename	Fri Feb 21 11:52:16 2003
+++ 25-akpm/drivers/block/as-iosched.c	Fri Feb 21 11:53:17 2003
@@ -76,17 +76,17 @@ static unsigned long write_batch_expire 
 static unsigned long antic_expire = HZ / 50;
 
 static const int as_hash_shift = 10;
-#define DL_HASH_BLOCK(sec)	((sec) >> 3)
-#define DL_HASH_FN(sec)		(hash_long(DL_HASH_BLOCK((sec)), as_hash_shift))
-#define DL_HASH_ENTRIES		(1 << as_hash_shift)
+#define AS_HASH_BLOCK(sec)	((sec) >> 3)
+#define AS_HASH_FN(sec)		(hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
+#define AS_HASH_ENTRIES		(1 << as_hash_shift)
 #define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
 #define list_entry_hash(ptr)	list_entry((ptr), struct as_rq, hash)
-#define ON_HASH(drq)		(drq)->hash_valid_count
+#define ON_HASH(arq)		(arq)->hash_valid_count
 
-#define DL_INVALIDATE_HASH(dd)				\
+#define AS_INVALIDATE_HASH(ad)				\
 	do {						\
-		if (!++(dd)->hash_valid_count)		\
-			(dd)->hash_valid_count = 1;	\
+		if (!++(ad)->hash_valid_count)		\
+			(ad)->hash_valid_count = 1;	\
 	} while (0)
 
 #define ANTIC_OFF	0	/* Not anticipating (normal operation)	*/
@@ -105,7 +105,7 @@ struct as_data {
 	struct rb_root sort_list[2];	
 	struct list_head fifo_list[2];
 	
-	struct as_rq *next_drq[2];/* next in sort order */
+	struct as_rq *next_arq[2];/* next in sort order */
 	sector_t last_sector[2];	/* last READ and WRITE sectors */
 	struct list_head *dispatch;	/* driver dispatch queue */
 	struct list_head *hash;		/* request hash */
@@ -157,63 +157,63 @@ struct as_rq {
 };
 
 static void
-as_move_request(struct as_data *dd, struct as_rq *drq);
+as_move_request(struct as_data *ad, struct as_rq *arq);
 
 /*
- * as_update_drq must be called whenever a request (drq) is added to
+ * as_update_arq must be called whenever a request (arq) is added to
  * the sort_list. This function keeps caches up to date, and checks if the
  * request might be one we are "anticipating"
  */
 static void
-as_update_drq(struct as_data *dd, struct as_rq *drq);
+as_update_arq(struct as_data *ad, struct as_rq *arq);
 
-static kmem_cache_t *drq_pool;
+static kmem_cache_t *arq_pool;
 
 #define RQ_DATA(rq)	((struct as_rq *) (rq)->elevator_private)
 
 /*
  * the back merge hash support functions
  */
-static inline void __as_del_drq_hash(struct as_rq *drq)
+static inline void __as_del_arq_hash(struct as_rq *arq)
 {
-	drq->hash_valid_count = 0;
-	list_del_init(&drq->hash);
+	arq->hash_valid_count = 0;
+	list_del_init(&arq->hash);
 }
 
-static inline void as_del_drq_hash(struct as_rq *drq)
+static inline void as_del_arq_hash(struct as_rq *arq)
 {
-	if (ON_HASH(drq))
-		__as_del_drq_hash(drq);
+	if (ON_HASH(arq))
+		__as_del_arq_hash(arq);
 }
 
 static void
-as_add_drq_hash(struct as_data *dd, struct as_rq *drq)
+as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
 {
-	struct request *rq = drq->request;
+	struct request *rq = arq->request;
 
-	BUG_ON(ON_HASH(drq));
+	BUG_ON(ON_HASH(arq));
 
-	drq->hash_valid_count = dd->hash_valid_count;
-	list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
+	arq->hash_valid_count = ad->hash_valid_count;
+	list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
 }
 
 static struct request *
-as_find_drq_hash(struct as_data *dd, sector_t offset)
+as_find_arq_hash(struct as_data *ad, sector_t offset)
 {
-	struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
+	struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
 	struct list_head *entry, *next = hash_list->next;
 
 	while ((entry = next) != hash_list) {
-		struct as_rq *drq = list_entry_hash(entry);
-		struct request *__rq = drq->request;
+		struct as_rq *arq = list_entry_hash(entry);
+		struct request *__rq = arq->request;
 
 		next = entry->next;
 		
-		BUG_ON(!ON_HASH(drq));
+		BUG_ON(!ON_HASH(arq));
 
 		if (!rq_mergeable(__rq)
-		    || drq->hash_valid_count != dd->hash_valid_count) {
-			__as_del_drq_hash(drq);
+		    || arq->hash_valid_count != ad->hash_valid_count) {
+			__as_del_arq_hash(arq);
 			continue;
 		}
 
@@ -231,143 +231,143 @@ as_find_drq_hash(struct as_data *dd, sec
 #define RB_EMPTY(root)	((root)->rb_node == NULL)
 #define ON_RB(node)	((node)->rb_color != RB_NONE)
 #define RB_CLEAR(node)	((node)->rb_color = RB_NONE)
-#define rb_entry_drq(node)	rb_entry((node), struct as_rq, rb_node)
-#define DRQ_RB_ROOT(dd, drq)	(&(dd)->sort_list[rq_data_dir((drq)->request)])
+#define rb_entry_arq(node)	rb_entry((node), struct as_rq, rb_node)
+#define ARQ_RB_ROOT(ad, arq)	(&(ad)->sort_list[rq_data_dir((arq)->request)])
 #define rq_rb_key(rq)		(rq)->sector
 
 /*
- * as_find_first_drq finds the first (lowest sector numbered) request
+ * as_find_first_arq finds the first (lowest sector numbered) request
  * for the specified data_dir. Used to sweep back to the start of the disk
  * (1-way elevator) after we process the last (highest sector) request.
  */
 static struct as_rq *
-as_find_first_drq(struct as_data *dd, int data_dir)
+as_find_first_arq(struct as_data *ad, int data_dir)
 {
-	struct rb_node *n = dd->sort_list[data_dir].rb_node;
+	struct rb_node *n = ad->sort_list[data_dir].rb_node;
 
 	if (n == NULL)
 		return NULL;
 
 	for (;;) {
 		if (n->rb_left == NULL)
-			return rb_entry_drq(n);
+			return rb_entry_arq(n);
 		
 		n = n->rb_left;
 	}
 }
 
 static struct as_rq *
-__as_add_drq_rb(struct as_data *dd, struct as_rq *drq)
+__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
 {
-	struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
+	struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
 	struct rb_node *parent = NULL;
-	struct as_rq *__drq;
+	struct as_rq *__arq;
 
 	while (*p) {
 		parent = *p;
-		__drq = rb_entry_drq(parent);
+		__arq = rb_entry_arq(parent);
 
-		if (drq->rb_key < __drq->rb_key)
+		if (arq->rb_key < __arq->rb_key)
 			p = &(*p)->rb_left;
-		else if (drq->rb_key > __drq->rb_key)
+		else if (arq->rb_key > __arq->rb_key)
 			p = &(*p)->rb_right;
 		else
-			return __drq;
+			return __arq;
 	}
 
-	rb_link_node(&drq->rb_node, parent, p);
+	rb_link_node(&arq->rb_node, parent, p);
 	return 0;
 }
 
 /*
- * Add the request to the rb tree if it is unique.  If there is an alias (an
+ * Aad the request to the rb tree if it is unique.  If there is an alias (an
  * existing request against the same sector), which can happen when using
  * direct IO, then move the alias to the dispatch list and then add the
  * request.
  */
 static void
-as_add_drq_rb(struct as_data *dd, struct as_rq *drq)
+as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
 {
 	struct as_rq *alias;
-	struct request *rq = drq->request;
+	struct request *rq = arq->request;
 	 
-	drq->rb_key = rq_rb_key(rq);
+	arq->rb_key = rq_rb_key(rq);
 
-	while ((alias = __as_add_drq_rb(dd, drq)))
-		as_move_request(dd, alias);
+	while ((alias = __as_add_arq_rb(ad, arq)))
+		as_move_request(ad, alias);
 	
-	rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
-	as_update_drq(dd, drq);
+	rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
+	as_update_arq(ad, arq);
 }
 
 static struct as_rq *
-as_choose_req(struct as_data *dd,
-	struct as_rq *drq1, struct as_rq *drq2);
+as_choose_req(struct as_data *ad,
+	struct as_rq *arq1, struct as_rq *arq2);
 
 static inline void
-as_del_drq_rb(struct as_data *dd, struct as_rq *drq)
+as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
 {
-	const int data_dir = rq_data_dir(drq->request);
+	const int data_dir = rq_data_dir(arq->request);
 
-	if (dd->next_drq[data_dir] == drq) {
-		struct rb_node *rbnext = rb_next(&drq->rb_node);
-		struct rb_node *rbprev = rb_prev(&drq->rb_node);
-		struct as_rq *drq_next, *drq_prev;
+	if (ad->next_arq[data_dir] == arq) {
+		struct rb_node *rbnext = rb_next(&arq->rb_node);
+		struct rb_node *rbprev = rb_prev(&arq->rb_node);
+		struct as_rq *arq_next, *arq_prev;
 
 		if (rbprev)
-			drq_prev = rb_entry_drq(rbprev);
+			arq_prev = rb_entry_arq(rbprev);
 		else
-			drq_prev = NULL;
+			arq_prev = NULL;
 
 		if (rbnext)
-			drq_next = rb_entry_drq(rbnext);
+			arq_next = rb_entry_arq(rbnext);
 		else
-			drq_next = as_find_first_drq(dd, data_dir);
+			arq_next = as_find_first_arq(ad, data_dir);
 
-		dd->next_drq[data_dir] = as_choose_req(dd,
-							drq_next, drq_prev);
+		ad->next_arq[data_dir] = as_choose_req(ad,
+							arq_next, arq_prev);
 	}
 
-	if (ON_RB(&drq->rb_node)) {
-		rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
-		RB_CLEAR(&drq->rb_node);
+	if (ON_RB(&arq->rb_node)) {
+		rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
+		RB_CLEAR(&arq->rb_node);
 	}
 }
 
 static struct request *
-as_find_drq_rb(struct as_data *dd, sector_t sector, int data_dir)
+as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir)
 {
-	struct rb_node *n = dd->sort_list[data_dir].rb_node;
-	struct as_rq *drq;
+	struct rb_node *n = ad->sort_list[data_dir].rb_node;
+	struct as_rq *arq;
 
 	while (n) {
-		drq = rb_entry_drq(n);
+		arq = rb_entry_arq(n);
 
-		if (sector < drq->rb_key)
+		if (sector < arq->rb_key)
 			n = n->rb_left;
-		else if (sector > drq->rb_key)
+		else if (sector > arq->rb_key)
 			n = n->rb_right;
 		else
-			return drq->request;
+			return arq->request;
 	}
 
 	return NULL;
 }
 
 /*
- * add drq to rbtree and fifo
+ * add arq to rbtree and fifo
  */
 static void
-as_add_request(struct as_data *dd, struct as_rq *drq)
+as_add_request(struct as_data *ad, struct as_rq *arq)
 {
-	const int data_dir = rq_data_dir(drq->request);
+	const int data_dir = rq_data_dir(arq->request);
 
-	as_add_drq_rb(dd, drq);
+	as_add_arq_rb(ad, arq);
 	/*
 	 * set expire time (only used for reads) and add to fifo list
 	 */
-	drq->expires = jiffies + dd->fifo_expire[data_dir];
-	list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
+	arq->expires = jiffies + ad->fifo_expire[data_dir];
+	list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
 }
 
 /*
@@ -375,14 +375,14 @@ as_add_request(struct as_data *dd, struc
  */
 static void as_remove_request(request_queue_t *q, struct request *rq)
 {
-	struct as_rq *drq = RQ_DATA(rq);
+	struct as_rq *arq = RQ_DATA(rq);
 
-	if (drq) {
-		struct as_data *dd = q->elevator.elevator_data;
+	if (arq) {
+		struct as_data *ad = q->elevator.elevator_data;
 
-		list_del_init(&drq->fifo);
-		as_del_drq_hash(drq);
-		as_del_drq_rb(dd, drq);
+		list_del_init(&arq->fifo);
+		as_del_arq_hash(arq);
+		as_del_arq_rb(ad, arq);
 	}
 
 	if (q->last_merge == &rq->queuelist)
@@ -394,7 +394,7 @@ static void as_remove_request(request_qu
 static int
 as_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
 {
-	struct as_data *dd = q->elevator.elevator_data;
+	struct as_data *ad = q->elevator.elevator_data;
 	struct request *__rq;
 	int ret;
 
@@ -410,7 +410,7 @@ as_merge(request_queue_t *q, struct list
 	/*
 	 * see if the merge hash can satisfy a back merge
 	 */
-	__rq = as_find_drq_hash(dd, bio->bi_sector);
+	__rq = as_find_arq_hash(ad, bio->bi_sector);
 	if (__rq) {
 		BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
 
@@ -423,10 +423,10 @@ as_merge(request_queue_t *q, struct list
 	/*
 	 * check for front merge
 	 */
-	if (dd->front_merges) {
+	if (ad->front_merges) {
 		sector_t rb_key = bio->bi_sector + bio_sectors(bio);
 
-		__rq = as_find_drq_rb(dd, rb_key, bio_data_dir(bio));
+		__rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
 		if (__rq) {
 			BUG_ON(rb_key != rq_rb_key(__rq));
 
@@ -447,21 +447,21 @@ out_insert:
 
 static void as_merged_request(request_queue_t *q, struct request *req)
 {
-	struct as_data *dd = q->elevator.elevator_data;
-	struct as_rq *drq = RQ_DATA(req);
+	struct as_data *ad = q->elevator.elevator_data;
+	struct as_rq *arq = RQ_DATA(req);
 
 	/*
 	 * hash always needs to be repositioned, key is end sector
 	 */
-	as_del_drq_hash(drq);
-	as_add_drq_hash(dd, drq);
+	as_del_arq_hash(arq);
+	as_add_arq_hash(ad, arq);
 
 	/*
 	 * if the merge was a front merge, we need to reposition request
 	 */
-	if (rq_rb_key(req) != drq->rb_key) {
-		as_del_drq_rb(dd, drq);
-		as_add_drq_rb(dd, drq);
+	if (rq_rb_key(req) != arq->rb_key) {
+		as_del_arq_rb(ad, arq);
+		as_add_arq_rb(ad, arq);
 	}
 
 	q->last_merge = &req->queuelist;
@@ -471,33 +471,33 @@ static void
 as_merged_requests(request_queue_t *q, struct request *req,
 			 struct request *next)
 {
-	struct as_data *dd = q->elevator.elevator_data;
-	struct as_rq *drq = RQ_DATA(req);
+	struct as_data *ad = q->elevator.elevator_data;
+	struct as_rq *arq = RQ_DATA(req);
 	struct as_rq *dnext = RQ_DATA(next);
 
-	BUG_ON(!drq);
+	BUG_ON(!arq);
 	BUG_ON(!dnext);
 
 	/*
-	 * reposition drq (this is the merged request) in hash, and in rbtree
+	 * reposition arq (this is the merged request) in hash, and in rbtree
 	 * in case of a front merge
 	 */
-	as_del_drq_hash(drq);
-	as_add_drq_hash(dd, drq);
+	as_del_arq_hash(arq);
+	as_add_arq_hash(ad, arq);
 
-	if (rq_rb_key(req) != drq->rb_key) {
-		as_del_drq_rb(dd, drq);
-		as_add_drq_rb(dd, drq);
+	if (rq_rb_key(req) != arq->rb_key) {
+		as_del_arq_rb(ad, arq);
+		as_add_arq_rb(ad, arq);
 	}
 
 	/*
-	 * if dnext expires before drq, assign its expire time to drq
+	 * if dnext expires before arq, assign its expire time to arq
 	 * and move into dnext position (dnext will be deleted) in fifo
 	 */
-	if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
-		if (time_before(dnext->expires, drq->expires)) {
-			list_move(&drq->fifo, &dnext->fifo);
-			drq->expires = dnext->expires;
+	if (!list_empty(&arq->fifo) && !list_empty(&dnext->fifo)) {
+		if (time_before(dnext->expires, arq->expires)) {
+			list_move(&arq->fifo, &dnext->fifo);
+			arq->expires = dnext->expires;
 		}
 	}
 
@@ -511,12 +511,12 @@ as_merged_requests(request_queue_t *q, s
  * move request from sort list to dispatch queue.
  */
 static void
-as_move_to_dispatch(struct as_data *dd, struct as_rq *drq)
+as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
 {
-	request_queue_t *q = drq->request->q;
+	request_queue_t *q = arq->request->q;
 
-	as_remove_request(q, drq->request);
-	list_add_tail(&drq->request->queuelist, dd->dispatch);
+	as_remove_request(q, arq->request);
+	list_add_tail(&arq->request->queuelist, ad->dispatch);
 }
 
 
@@ -524,37 +524,37 @@ as_move_to_dispatch(struct as_data *dd, 
  * move an entry to dispatch queue
  */
 static void
-as_move_request(struct as_data *dd, struct as_rq *drq)
+as_move_request(struct as_data *ad, struct as_rq *arq)
 {
-	const int data_dir = rq_data_dir(drq->request);
-	struct rb_node *rbnext = rb_next(&drq->rb_node);
-	struct rb_node *rbprev = rb_prev(&drq->rb_node);
-	struct as_rq *drq_next, *drq_prev;
+	const int data_dir = rq_data_dir(arq->request);
+	struct rb_node *rbnext = rb_next(&arq->rb_node);
+	struct rb_node *rbprev = rb_prev(&arq->rb_node);
+	struct as_rq *arq_next, *arq_prev;
 
-	BUG_ON(!ON_RB(&drq->rb_node));
+	BUG_ON(!ON_RB(&arq->rb_node));
 
 	if (rbprev)
-		drq_prev = rb_entry_drq(rbprev);
+		arq_prev = rb_entry_arq(rbprev);
 	else
-		drq_prev = NULL;
+		arq_prev = NULL;
 	
 	if (rbnext) 
-		drq_next = rb_entry_drq(rbnext);
+		arq_next = rb_entry_arq(rbnext);
 	else
-		drq_next = as_find_first_drq(dd, data_dir);
-	dd->next_drq[data_dir] = as_choose_req(dd, drq_next, drq_prev);
+		arq_next = as_find_first_arq(ad, data_dir);
+	ad->next_arq[data_dir] = as_choose_req(ad, arq_next, arq_prev);
 
-	dd->last_sector[data_dir] = drq->request->sector + drq->request->nr_sectors;
+	ad->last_sector[data_dir] = arq->request->sector + arq->request->nr_sectors;
 
 	if (data_dir == READ)
 		/* In case we have to anticipate after this */
-		dd->current_id = drq->request_id;
+		ad->current_id = arq->request_id;
 
 	/*
 	 * take it off the sort and fifo list, move
 	 * to dispatch queue
 	 */
-	as_move_to_dispatch(dd, drq);
+	as_move_to_dispatch(ad, arq);
 }
 
 #define list_entry_fifo(ptr)	list_entry((ptr), struct as_rq, fifo)
@@ -568,35 +568,35 @@ as_move_request(struct as_data *dd, stru
  * The funny "absolute difference" math on the elapsed time is to handle
  * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
  */ 
-static int as_fifo_expired(struct as_data *dd, int ddir)
+static int as_fifo_expired(struct as_data *ad, int adir)
 {
-	struct as_rq *drq;
+	struct as_rq *arq;
 	long delta_jif;
 
-	delta_jif = jiffies - dd->last_check_fifo[ddir];
+	delta_jif = jiffies - ad->last_check_fifo[adir];
 	if (unlikely(delta_jif < 0))
 		delta_jif = -delta_jif;
-	if (delta_jif < dd->fifo_expire[ddir])
+	if (delta_jif < ad->fifo_expire[adir])
 		return 0;
 
-	dd->last_check_fifo[ddir] = jiffies;
+	ad->last_check_fifo[adir] = jiffies;
 
-	if (list_empty(&dd->fifo_list[ddir]))
+	if (list_empty(&ad->fifo_list[adir]))
 		return 0;
 
-	drq = list_entry_fifo(dd->fifo_list[ddir].next);
+	arq = list_entry_fifo(ad->fifo_list[adir].next);
 	
-	return time_after(jiffies, drq->expires);
+	return time_after(jiffies, arq->expires);
 }
 
-static int as_antic_expired(struct as_data *dd)
+static int as_antic_expired(struct as_data *ad)
 {
 	long delta_jif;
 
-	delta_jif = jiffies - dd->antic_start;
+	delta_jif = jiffies - ad->antic_start;
 	if (unlikely(delta_jif < 0))
 		delta_jif = -delta_jif;
-	if (delta_jif < dd->antic_expire)
+	if (delta_jif < ad->antic_expire)
 		return 0;
 
 	return 1;
@@ -605,9 +605,9 @@ static int as_antic_expired(struct as_da
 /*
  * as_batch_expired returns true if the current batch has expired.
  */
-static inline int as_batch_expired(struct as_data *dd)
+static inline int as_batch_expired(struct as_data *ad)
 {
-	return time_after(jiffies, dd->current_batch_expires);
+	return time_after(jiffies, ad->current_batch_expires);
 }
 
 /*
@@ -643,15 +643,15 @@ static void as_anticipate_work(void *dat
 static void as_anticipate_timeout(unsigned long data)
 {
 	struct request_queue *q = (struct request_queue *)data;
-	struct as_data *dd = q->elevator.elevator_data;
+	struct as_data *ad = q->elevator.elevator_data;
 	unsigned long flags;
 
 	spin_lock_irqsave(q->queue_lock, flags);
 
-	dd->antic_status = ANTIC_FINISHED;
+	ad->antic_status = ANTIC_FINISHED;
 
 	blk_remove_plug(q);
-	schedule_work(&dd->antic_work);
+	schedule_work(&ad->antic_work);
 	ant_stats.timeouts++;
 	
 	spin_unlock_irqrestore(q->queue_lock, flags);
@@ -662,17 +662,17 @@ static void as_anticipate_timeout(unsign
  * previous one issued.
  */
 static int
-as_close_req(struct as_data *dd, struct as_rq *drq)
+as_close_req(struct as_data *ad, struct as_rq *arq)
 {
 	unsigned long delay;	/* milliseconds */
-	sector_t last = dd->last_sector[dd->batch_data_dir];
-	sector_t next = drq->request->sector;
+	sector_t last = ad->last_sector[ad->batch_data_dir];
+	sector_t next = arq->request->sector;
 	sector_t delta;	/* acceptable close offset (in sectors) */
 
-	delay = ((jiffies - dd->antic_start) * 1000) / HZ;
-	if (dd->antic_status == ANTIC_OFF || delay <= 1)
+	delay = ((jiffies - ad->antic_start) * 1000) / HZ;
+	if (ad->antic_status == ANTIC_OFF || delay <= 1)
 		delta = 32;
-	else if (delay <= 20 && delay <= dd->antic_expire / 2)
+	else if (delay <= 20 && delay <= ad->antic_expire / 2)
 		delta = 32 << (delay-1);
 	else
 		return 1;
@@ -683,24 +683,24 @@ as_close_req(struct as_data *dd, struct 
 #define MAXBACK (512 * 1024)
 
 static struct as_rq *
-as_choose_req(struct as_data *dd,
-		struct as_rq *drq1, struct as_rq *drq2)
+as_choose_req(struct as_data *ad,
+		struct as_rq *arq1, struct as_rq *arq2)
 {
 	int data_dir;
 	sector_t last, s1, s2, d1, d2;
 	const sector_t maxback = MAXBACK;
 
-	if (drq1 == NULL)
-		return drq2;
-	if (drq2 == NULL)
-		return drq1;
-
-	data_dir = rq_data_dir(drq1->request);
-	last = dd->last_sector[data_dir];
-	s1 = drq1->request->sector;
-	s2 = drq2->request->sector;
+	if (arq1 == NULL)
+		return arq2;
+	if (arq2 == NULL)
+		return arq1;
+
+	data_dir = rq_data_dir(arq1->request);
+	last = ad->last_sector[data_dir];
+	s1 = arq1->request->sector;
+	s2 = arq2->request->sector;
 
-	BUG_ON(data_dir != rq_data_dir(drq2->request));
+	BUG_ON(data_dir != rq_data_dir(arq2->request));
 
 	/*
 	 * Strict one way elevator _except_ in the case where we allow
@@ -711,7 +711,7 @@ as_choose_req(struct as_data *dd,
 	if (s1 >= last)
 		d1 = s1 - last;
 	else if (data_dir == READ
-			&& dd->current_id == drq1->request_id
+			&& ad->current_id == arq1->request_id
 			&& s1+maxback >= last)
 				d1 = (last - s1)*2;
 	else
@@ -720,7 +720,7 @@ as_choose_req(struct as_data *dd,
 	if (s2 >= last)
 		d2 = s2 - last;
 	else if (data_dir == READ
-			&& dd->current_id == drq2->request_id
+			&& ad->current_id == arq2->request_id
 			&& s2+maxback >= last)
 				d2 = (last - s2)*2;
 	else
@@ -728,14 +728,14 @@ as_choose_req(struct as_data *dd,
 
 	/* Found the deltas */
 	if (d1 < d2)
-		return drq1;
+		return arq1;
 	else if (d2 < d1)
-		return drq2;
+		return arq2;
 	else {
 		if (s1 >= s2)
-			return drq1;
+			return arq1;
 		else
-			return drq2;
+			return arq2;
 	}
 	
 elevator_wrap:
@@ -746,29 +746,29 @@ elevator_wrap:
 	 * is favourable
 	 */
 	if (s1 >= last && s2 < last)
-		return drq1;
+		return arq1;
 	else if (s2 >= last && s1 < last)
-		return drq2;
+		return arq2;
 	else {
 		/* both behind the head */
 		if (s1 <= s2)
-			return drq1;
+			return arq1;
 		else
-			return drq2;
+			return arq2;
 	}
 }
 
 /*
- * as_antic_req, has @dd been anticipating this @drq?
+ * as_antic_req, has @ad been anticipating this @arq?
  */
 static int
-as_antic_req(struct as_data *dd, struct as_rq *drq)
+as_antic_req(struct as_data *ad, struct as_rq *arq)
 {
-	if (as_close_req(dd, drq)) {
+	if (as_close_req(ad, arq)) {
 		ant_stats.close_requests++;
 		return 1;
 	}
-	if (dd->current_id == drq->request_id) {
+	if (ad->current_id == arq->request_id) {
 		ant_stats.matching_ids++;
 		return 1;
 	}
@@ -776,31 +776,31 @@ as_antic_req(struct as_data *dd, struct 
 }
 
 /*
- * as_update_drq must be called whenever a request (drq) is added to
+ * as_update_arq must be called whenever a request (arq) is added to
  * the sort_list. This function keeps caches up to date, and checks if the
  * request might be one we are "anticipating"
  */
 static void
-as_update_drq(struct as_data *dd, struct as_rq *drq)
+as_update_arq(struct as_data *ad, struct as_rq *arq)
 {
-	const int data_dir = rq_data_dir(drq->request);
-	sector_t last = dd->last_sector[data_dir];
-	sector_t this = drq->request->sector;
-	unsigned long delay = jiffies - dd->antic_start;
+	const int data_dir = rq_data_dir(arq->request);
+	sector_t last = ad->last_sector[data_dir];
+	sector_t this = arq->request->sector;
+	unsigned long delay = jiffies - ad->antic_start;
 
-	drq->request_id = request_id();
+	arq->request_id = request_id();
 
 	if (data_dir == READ)
 		ant_stats.reads++;
 	else
 		ant_stats.writes++;
 
-	/* keep the next_drq cache up to date */
-	dd->next_drq[data_dir] = as_choose_req(dd, drq, dd->next_drq[data_dir]);
+	/* keep the next_arq cache up to date */
+	ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);
 
 	/* have we been anticipating this request? */
-	if (dd->antic_status != ANTIC_OFF && data_dir == READ
-			&& as_antic_req(dd, drq)) {
+	if (ad->antic_status != ANTIC_OFF && data_dir == READ
+			&& as_antic_req(ad, arq)) {
 		long lba_offset;
 		int neg;
 		int log2;
@@ -823,34 +823,34 @@ as_update_drq(struct as_data *dd, struct
 		else
 			ant_stats.lba_forward_offsets[log2]++;
 
-		del_timer(&dd->antic_timer);
-		dd->antic_status = ANTIC_FINISHED;
-		blk_remove_plug(drq->request->q);
-		schedule_work(&dd->antic_work);
+		del_timer(&ad->antic_timer);
+		ad->antic_status = ANTIC_FINISHED;
+		blk_remove_plug(arq->request->q);
+		schedule_work(&ad->antic_work);
 	}
 }
 
 /*
- * can_start_anticipation indicates weather we should either run drq
+ * can_start_anticipation indicates weather we should either run arq
  * or keep anticipating a better request.
  */
 static int
-can_start_anticipation(struct as_data *dd, struct as_rq *drq)
+can_start_anticipation(struct as_data *ad, struct as_rq *arq)
 {
-	if (dd->antic_status == ANTIC_FINISHED)
+	if (ad->antic_status == ANTIC_FINISHED)
 		/*
 		 * Don't restart if we have just finished. Run the next request
 		 */
 		return 0;
 	
-	if (dd->antic_status == ANTIC_WAIT && as_antic_expired(dd))
+	if (ad->antic_status == ANTIC_WAIT && as_antic_expired(ad))
 		/*
 		 * In this situation status should really be FINISHED, however
 		 * the timer hasn't had the chance to run yet.
 		 */
 		return 0;
 
-	if (drq && as_antic_req(dd, drq))
+	if (arq && as_antic_req(ad, arq))
 		/*
 		 * This request is a good candidate. Don't keep anticipating,
 		 * run it.
@@ -874,49 +874,49 @@ can_start_anticipation(struct as_data *d
  */
 static int as_dispatch_request(struct request_queue *q)
 {
-	struct as_data *dd = q->elevator.elevator_data;
-	struct as_rq *drq;
-	const int reads = !list_empty(&dd->fifo_list[READ]);
-	const int writes = !list_empty(&dd->fifo_list[WRITE]);
+	struct as_data *ad = q->elevator.elevator_data;
+	struct as_rq *arq;
+	const int reads = !list_empty(&ad->fifo_list[READ]);
+	const int writes = !list_empty(&ad->fifo_list[WRITE]);
 	static unsigned long last_read_id;
 	static unsigned long last_finished;
 
 	if (!(reads || writes))
 		return 0;
 
-	if (as_batch_expired(dd)) {
-		if (dd->batch_data_dir == READ)
+	if (as_batch_expired(ad)) {
+		if (ad->batch_data_dir == READ)
 			ant_stats.expired_read_batches++;
 		else
 			ant_stats.expired_write_batches++;
 	}
 
-	if (!(reads && writes && as_batch_expired(dd))) {
+	if (!(reads && writes && as_batch_expired(ad))) {
 		/*
 		 * batch is still running or no reads or no writes
 		 */
-		drq = dd->next_drq[dd->batch_data_dir];
+		arq = ad->next_arq[ad->batch_data_dir];
 
-		if (dd->batch_data_dir == READ && dd->antic_expire) {
-			if (as_fifo_expired(dd, READ))
+		if (ad->batch_data_dir == READ && ad->antic_expire) {
+			if (as_fifo_expired(ad, READ))
 				goto fifo_expired;
 
-			if (can_start_anticipation(dd, drq)) {
+			if (can_start_anticipation(ad, arq)) {
 				unsigned long timeout;
 
-				if (dd->antic_status == ANTIC_OFF) {
+				if (ad->antic_status == ANTIC_OFF) {
 					ant_stats.anticipate_starts++;
-					dd->antic_start = jiffies;
+					ad->antic_start = jiffies;
 				}
-				timeout = dd->antic_start + dd->antic_expire;
+				timeout = ad->antic_start + ad->antic_expire;
 #if 0
 				/* FIX THIS!!! */
 				timeout = min(timeout,
-						dd->current_batch_expires);
+						ad->current_batch_expires);
 #endif
-				mod_timer(&dd->antic_timer, timeout);
+				mod_timer(&ad->antic_timer, timeout);
 				
-				dd->antic_status = ANTIC_WAIT;
+				ad->antic_status = ANTIC_WAIT;
 				blk_plug_device(q);
 
 				return 0;
@@ -924,11 +924,11 @@ static int as_dispatch_request(struct re
 
 		}
 
-		if (drq) {
+		if (arq) {
 			/* we have a "next request" */
 			if (reads && !writes)
-				dd->current_batch_expires =
-					jiffies + dd->batch_expire[READ];
+				ad->current_batch_expires =
+					jiffies + ad->batch_expire[READ];
 			goto dispatch_request;
 		}
 	}
@@ -939,18 +939,18 @@ static int as_dispatch_request(struct re
 	 */
 
 	if (reads) {
-		BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
+		BUG_ON(RB_EMPTY(&ad->sort_list[READ]));
 
-		if (writes && dd->batch_data_dir == READ)
+		if (writes && ad->batch_data_dir == READ)
 			/*
 			 * Last batch was a read, switch to writes
 			 */
 			goto dispatch_writes;
 
-		dd->batch_data_dir = READ;
-		drq = dd->next_drq[dd->batch_data_dir];
-		dd->current_batch_expires = jiffies +
-			dd->batch_expire[dd->batch_data_dir];
+		ad->batch_data_dir = READ;
+		arq = ad->next_arq[ad->batch_data_dir];
+		ad->current_batch_expires = jiffies +
+			ad->batch_expire[ad->batch_data_dir];
 		goto dispatch_request;
 	}
 
@@ -960,12 +960,12 @@ static int as_dispatch_request(struct re
 
 	if (writes) {
 dispatch_writes:
-		BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
+		BUG_ON(RB_EMPTY(&ad->sort_list[WRITE]));
 
-		dd->batch_data_dir = WRITE;
-		drq = dd->next_drq[dd->batch_data_dir];
-		dd->current_batch_expires = jiffies +
-			dd->batch_expire[dd->batch_data_dir];
+		ad->batch_data_dir = WRITE;
+		arq = ad->next_arq[ad->batch_data_dir];
+		ad->current_batch_expires = jiffies +
+			ad->batch_expire[ad->batch_data_dir];
 		goto dispatch_request;
 	}
 
@@ -978,27 +978,27 @@ dispatch_request:
 	 * If a request has expired, service it.
 	 */
 
-	if (as_fifo_expired(dd, dd->batch_data_dir)) {
+	if (as_fifo_expired(ad, ad->batch_data_dir)) {
 fifo_expired:
-		if (dd->batch_data_dir == WRITE)
+		if (ad->batch_data_dir == WRITE)
 			ant_stats.expired_fifo_writes++;
 		else
 			ant_stats.expired_fifo_reads++;
-		drq = list_entry_fifo(dd->fifo_list[dd->batch_data_dir].next);
-		BUG_ON(drq == NULL);
+		arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
+		BUG_ON(arq == NULL);
 	}
 
 	/*
-	 * drq is the selected appropriate request.
+	 * arq is the selected appropriate request.
 	 */
-	dd->antic_status = ANTIC_OFF;
-	as_move_request(dd, drq);
+	ad->antic_status = ANTIC_OFF;
+	as_move_request(ad, arq);
 
-	if (dd->batch_data_dir == READ) {
-		if (last_read_id != drq->request_id) {
-			last_read_id = drq->request_id;
+	if (ad->batch_data_dir == READ) {
+		if (last_read_id != arq->request_id) {
+			last_read_id = arq->request_id;
 		}
-		last_finished = drq->request->sector + drq->request->nr_sectors;
+		last_finished = arq->request->sector + arq->request->nr_sectors;
 	}
 	
 	return 1;
@@ -1006,14 +1006,14 @@ fifo_expired:
 
 static struct request *as_next_request(request_queue_t *q)
 {
-	struct as_data *dd = q->elevator.elevator_data;
+	struct as_data *ad = q->elevator.elevator_data;
 	struct request *rq = NULL;
 
 	/*
 	 * if there are still requests on the dispatch queue, grab the first one
 	 */
-	if (!list_empty(dd->dispatch) || as_dispatch_request(q))
-		rq = list_entry_rq(dd->dispatch->next);
+	if (!list_empty(ad->dispatch) || as_dispatch_request(q))
+		rq = list_entry_rq(ad->dispatch->next);
 	return rq;
 }
 
@@ -1021,47 +1021,47 @@ static void
 as_insert_request(request_queue_t *q, struct request *rq,
 			struct list_head *insert_here)
 {
-	struct as_data *dd = q->elevator.elevator_data;
-	struct as_rq *drq = RQ_DATA(rq);
+	struct as_data *ad = q->elevator.elevator_data;
+	struct as_rq *arq = RQ_DATA(rq);
 
 	if (unlikely(rq->flags & REQ_HARDBARRIER)) {
-		DL_INVALIDATE_HASH(dd);
+		AS_INVALIDATE_HASH(ad);
 		q->last_merge = NULL;
 	}
 
 	if (unlikely(!blk_fs_request(rq))) {
 		if (!insert_here)
-			insert_here = dd->dispatch->prev;
+			insert_here = ad->dispatch->prev;
 
 		list_add(&rq->queuelist, insert_here);
 		
-		if (rq_data_dir(rq) == READ && dd->antic_status != ANTIC_OFF) {
-			del_timer(&dd->antic_timer);
-			dd->antic_status = ANTIC_FINISHED;
+		if (rq_data_dir(rq) == READ && ad->antic_status != ANTIC_OFF) {
+			del_timer(&ad->antic_timer);
+			ad->antic_status = ANTIC_FINISHED;
 			blk_remove_plug(q);
-			schedule_work(&dd->antic_work);
+			schedule_work(&ad->antic_work);
 		}
 		
 		return;
 	}
 
 	if (rq_mergeable(rq)) {
-		as_add_drq_hash(dd, drq);
+		as_add_arq_hash(ad, arq);
 
 		if (!q->last_merge)
 			q->last_merge = &rq->queuelist;
 	}
 
-	as_add_request(dd, drq);
+	as_add_request(ad, arq);
 }
 
 static int as_queue_empty(request_queue_t *q)
 {
-	struct as_data *dd = q->elevator.elevator_data;
+	struct as_data *ad = q->elevator.elevator_data;
 
-	if (!list_empty(&dd->fifo_list[WRITE])
-		|| !list_empty(&dd->fifo_list[READ])
-		|| !list_empty(dd->dispatch) )
+	if (!list_empty(&ad->fifo_list[WRITE])
+		|| !list_empty(&ad->fifo_list[READ])
+		|| !list_empty(ad->dispatch) )
 			return 0;
 
 	return 1;
@@ -1076,12 +1076,12 @@ static int as_queue_empty(request_queue_
  */
 static int as_queue_notready(request_queue_t *q)
 {
-	struct as_data *dd = q->elevator.elevator_data;
+	struct as_data *ad = q->elevator.elevator_data;
 
-	if (!list_empty(dd->dispatch))
+	if (!list_empty(ad->dispatch))
 		return 0;
 	
-	if (dd->antic_status == ANTIC_WAIT)
+	if (ad->antic_status == ANTIC_WAIT)
 		return 1;
 				
 	if (!as_dispatch_request(q))
@@ -1093,11 +1093,11 @@ static int as_queue_notready(request_que
 static struct request *
 as_former_request(request_queue_t *q, struct request *rq)
 {
-	struct as_rq *drq = RQ_DATA(rq);
-	struct rb_node *rbprev = rb_prev(&drq->rb_node);
+	struct as_rq *arq = RQ_DATA(rq);
+	struct rb_node *rbprev = rb_prev(&arq->rb_node);
 
 	if (rbprev)
-		return rb_entry_drq(rbprev)->request;
+		return rb_entry_arq(rbprev)->request;
 
 	return NULL;
 }
@@ -1105,24 +1105,24 @@ as_former_request(request_queue_t *q, st
 static struct request *
 as_latter_request(request_queue_t *q, struct request *rq)
 {
-	struct as_rq *drq = RQ_DATA(rq);
-	struct rb_node *rbnext = rb_next(&drq->rb_node);
+	struct as_rq *arq = RQ_DATA(rq);
+	struct rb_node *rbnext = rb_next(&arq->rb_node);
 
 	if (rbnext)
-		return rb_entry_drq(rbnext)->request;
+		return rb_entry_arq(rbnext)->request;
 
 	return NULL;
 }
 
 static void as_exit(request_queue_t *q, elevator_t *e)
 {
-	struct as_data *dd = e->elevator_data;
-	struct as_rq *drq;
+	struct as_data *ad = e->elevator_data;
+	struct as_rq *arq;
 	struct request *rq;
 	int i;
 
-	BUG_ON(!list_empty(&dd->fifo_list[READ]));
-	BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
+	BUG_ON(!list_empty(&ad->fifo_list[READ]));
+	BUG_ON(!list_empty(&ad->fifo_list[WRITE]));
 
 	for (i = READ; i <= WRITE; i++) {
 		struct request_list *rl = &q->rq[i];
@@ -1131,65 +1131,65 @@ static void as_exit(request_queue_t *q, 
 		list_for_each(entry, &rl->free) {
 			rq = list_entry_rq(entry);
 
-			if ((drq = RQ_DATA(rq)) == NULL)
+			if ((arq = RQ_DATA(rq)) == NULL)
 				continue;
 
 			rq->elevator_private = NULL;
-			kmem_cache_free(drq_pool, drq);
+			kmem_cache_free(arq_pool, arq);
 		}
 	}
 
-	kfree(dd->hash);
-	kfree(dd);
+	kfree(ad->hash);
+	kfree(ad);
 }
 
 /*
- * initialize elevator private data (as_data), and alloc a drq for
+ * initialize elevator private data (as_data), and alloc a arq for
  * each request on the free lists
  */
 static int as_init(request_queue_t *q, elevator_t *e)
 {
-	struct as_data *dd;
-	struct as_rq *drq;
+	struct as_data *ad;
+	struct as_rq *arq;
 	struct request *rq;
 	int i, ret = 0;
 
-	if (!drq_pool)
+	if (!arq_pool)
 		return -ENOMEM;
 
-	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
-	if (!dd)
+	ad = kmalloc(sizeof(*ad), GFP_KERNEL);
+	if (!ad)
 		return -ENOMEM;
-	memset(dd, 0, sizeof(*dd));
+	memset(ad, 0, sizeof(*ad));
 
-	dd->hash = kmalloc(sizeof(struct list_head)*DL_HASH_ENTRIES,GFP_KERNEL);
-	if (!dd->hash) {
-		kfree(dd);
+	ad->hash = kmalloc(sizeof(struct list_head)*AS_HASH_ENTRIES,GFP_KERNEL);
+	if (!ad->hash) {
+		kfree(ad);
 		return -ENOMEM;
 	}
 
 	/* anticipatory scheduling helpers */
-	dd->antic_timer.function = as_anticipate_timeout;
-	dd->antic_timer.data = (unsigned long)q;
-	init_timer(&dd->antic_timer);
-	INIT_WORK(&dd->antic_work, as_anticipate_work, q);
-
-	for (i = 0; i < DL_HASH_ENTRIES; i++)
-		INIT_LIST_HEAD(&dd->hash[i]);
-
-	INIT_LIST_HEAD(&dd->fifo_list[READ]);
-	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
-	dd->sort_list[READ] = RB_ROOT;
-	dd->sort_list[WRITE] = RB_ROOT;
-	dd->dispatch = &q->queue_head;
-	dd->fifo_expire[READ] = read_expire;
-	dd->fifo_expire[WRITE] = write_expire;
-	dd->hash_valid_count = 1;
-	dd->front_merges = 1;
-	dd->antic_expire = antic_expire;
-	dd->batch_expire[READ] = read_batch_expire;
-	dd->batch_expire[WRITE] = write_batch_expire;
-	e->elevator_data = dd;
+	ad->antic_timer.function = as_anticipate_timeout;
+	ad->antic_timer.data = (unsigned long)q;
+	init_timer(&ad->antic_timer);
+	INIT_WORK(&ad->antic_work, as_anticipate_work, q);
+
+	for (i = 0; i < AS_HASH_ENTRIES; i++)
+		INIT_LIST_HEAD(&ad->hash[i]);
+
+	INIT_LIST_HEAD(&ad->fifo_list[READ]);
+	INIT_LIST_HEAD(&ad->fifo_list[WRITE]);
+	ad->sort_list[READ] = RB_ROOT;
+	ad->sort_list[WRITE] = RB_ROOT;
+	ad->dispatch = &q->queue_head;
+	ad->fifo_expire[READ] = read_expire;
+	ad->fifo_expire[WRITE] = write_expire;
+	ad->hash_valid_count = 1;
+	ad->front_merges = 1;
+	ad->antic_expire = antic_expire;
+	ad->batch_expire[READ] = read_batch_expire;
+	ad->batch_expire[WRITE] = write_batch_expire;
+	e->elevator_data = ad;
 
 	for (i = READ; i <= WRITE; i++) {
 		struct request_list *rl = &q->rq[i];
@@ -1198,18 +1198,18 @@ static int as_init(request_queue_t *q, e
 		list_for_each(entry, &rl->free) {
 			rq = list_entry_rq(entry);
 
-			drq = kmem_cache_alloc(drq_pool, GFP_KERNEL);
-			if (!drq) {
+			arq = kmem_cache_alloc(arq_pool, GFP_KERNEL);
+			if (!arq) {
 				ret = -ENOMEM;
 				break;
 			}
 
-			memset(drq, 0, sizeof(*drq));
-			INIT_LIST_HEAD(&drq->fifo);
-			INIT_LIST_HEAD(&drq->hash);
-			RB_CLEAR(&drq->rb_node);
-			drq->request = rq;
-			rq->elevator_private = drq;
+			memset(arq, 0, sizeof(*arq));
+			INIT_LIST_HEAD(&arq->fifo);
+			INIT_LIST_HEAD(&arq->hash);
+			RB_CLEAR(&arq->rb_node);
+			arq->request = rq;
+			rq->elevator_private = arq;
 		}
 	}
 
@@ -1244,20 +1244,20 @@ as_var_store(unsigned long *var, const c
 }
 
 #define SHOW_FUNCTION(__FUNC, __VAR)					\
-static ssize_t __FUNC(struct as_data *dd, char *page)		\
+static ssize_t __FUNC(struct as_data *ad, char *page)		\
 {									\
 	return as_var_show(__VAR, (page));			\
 }
-SHOW_FUNCTION(as_readexpire_show, dd->fifo_expire[READ]);
-SHOW_FUNCTION(as_writeexpire_show, dd->fifo_expire[WRITE]);
-SHOW_FUNCTION(as_frontmerges_show, dd->front_merges);
-SHOW_FUNCTION(as_anticexpire_show, dd->antic_expire);
-SHOW_FUNCTION(as_read_batchexpire_show, dd->batch_expire[READ]);
-SHOW_FUNCTION(as_write_batchexpire_show, dd->batch_expire[WRITE]);
+SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[READ]);
+SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[WRITE]);
+SHOW_FUNCTION(as_frontmerges_show, ad->front_merges);
+SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);
+SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[READ]);
+SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[WRITE]);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)				\
-static ssize_t __FUNC(struct as_data *dd, const char *page, size_t count)	\
+static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count)	\
 {									\
 	int ret = as_var_store(__PTR, (page), count);		\
 	if (*(__PTR) < (MIN))						\
@@ -1266,14 +1266,14 @@ static ssize_t __FUNC(struct as_data *dd
 		*(__PTR) = (MAX);					\
 	return ret;							\
 }
-STORE_FUNCTION(as_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX);
-STORE_FUNCTION(as_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX);
-STORE_FUNCTION(as_frontmerges_store, &dd->front_merges, 0, 1);
-STORE_FUNCTION(as_anticexpire_store, &dd->antic_expire, 0, INT_MAX);
+STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[READ], 0, INT_MAX);
+STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[WRITE], 0, INT_MAX);
+STORE_FUNCTION(as_frontmerges_store, &ad->front_merges, 0, 1);
+STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);
 STORE_FUNCTION(as_read_batchexpire_store,
-			&dd->batch_expire[READ], 0, INT_MAX);
+			&ad->batch_expire[READ], 0, INT_MAX);
 STORE_FUNCTION(as_write_batchexpire_store,
-			&dd->batch_expire[WRITE], 0, INT_MAX);
+			&ad->batch_expire[WRITE], 0, INT_MAX);
 #undef STORE_FUNCTION
 
 static struct as_fs_entry as_readexpire_entry = {
@@ -1356,10 +1356,10 @@ struct kobj_type as_ktype = {
 
 static int __init as_slab_setup(void)
 {
-	drq_pool = kmem_cache_create("as_drq", sizeof(struct as_rq),
+	arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
 				     0, 0, NULL, NULL);
 
-	if (!drq_pool)
+	if (!arq_pool)
 		panic("as: can't init slab pool\n");
 
 	return 0;

_
