arq->state = AS_RQ_NEW;
 
        if (rq_data_dir(arq->request) == READ
-                       || (arq->request->flags & REQ_RW_SYNC))
+                       || (arq->request->cmd_flags & REQ_RW_SYNC))
                arq->is_sync = 1;
        else
                arq->is_sync = 0;
 
        list_for_each_prev(entry, &q->queue_head) {
                struct request *pos = list_entry_rq(entry);
 
-               if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
+               if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
                        break;
                if (rq->sector >= boundary) {
                        if (pos->sector < boundary)
                        e->ops->elevator_deactivate_req_fn(q, rq);
        }
 
-       rq->flags &= ~REQ_STARTED;
+       rq->cmd_flags &= ~REQ_STARTED;
 
        elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
 }
 
        switch (where) {
        case ELEVATOR_INSERT_FRONT:
-               rq->flags |= REQ_SOFTBARRIER;
+               rq->cmd_flags |= REQ_SOFTBARRIER;
 
                list_add(&rq->queuelist, &q->queue_head);
                break;
 
        case ELEVATOR_INSERT_BACK:
-               rq->flags |= REQ_SOFTBARRIER;
+               rq->cmd_flags |= REQ_SOFTBARRIER;
                elv_drain_elevator(q);
                list_add_tail(&rq->queuelist, &q->queue_head);
                /*
 
        case ELEVATOR_INSERT_SORT:
                BUG_ON(!blk_fs_request(rq));
-               rq->flags |= REQ_SORTED;
+               rq->cmd_flags |= REQ_SORTED;
                q->nr_sorted++;
                if (q->last_merge == NULL && rq_mergeable(rq))
                        q->last_merge = rq;
                 * insertion; otherwise, requests should be requeued
                 * in ordseq order.
                 */
-               rq->flags |= REQ_SOFTBARRIER;
+               rq->cmd_flags |= REQ_SOFTBARRIER;
 
                if (q->ordseq == 0) {
                        list_add(&rq->queuelist, &q->queue_head);
                       int plug)
 {
        if (q->ordcolor)
-               rq->flags |= REQ_ORDERED_COLOR;
+               rq->cmd_flags |= REQ_ORDERED_COLOR;
 
-       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+       if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
                /*
                 * toggle ordered color
                 */
                        q->end_sector = rq_end_sector(rq);
                        q->boundary_rq = rq;
                }
-       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
+       } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
                where = ELEVATOR_INSERT_BACK;
 
        if (plug)
        int ret;
 
        while ((rq = __elv_next_request(q)) != NULL) {
-               if (!(rq->flags & REQ_STARTED)) {
+               if (!(rq->cmd_flags & REQ_STARTED)) {
                        elevator_t *e = q->elevator;
 
                        /*
                         * it, a request that has been delayed should
                         * not be passed by new incoming requests
                         */
-                       rq->flags |= REQ_STARTED;
+                       rq->cmd_flags |= REQ_STARTED;
                        blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
                }
 
                        q->boundary_rq = NULL;
                }
 
-               if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
+               if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
                        break;
 
                ret = q->prep_rq_fn(q, rq);
                                nr_bytes = rq->data_len;
 
                        blkdev_dequeue_request(rq);
-                       rq->flags |= REQ_QUIET;
+                       rq->cmd_flags |= REQ_QUIET;
                        end_that_request_chunk(rq, 0, nr_bytes);
                        end_that_request_last(rq, 0);
                } else {
 
        if (rq == &q->post_flush_rq)
                return QUEUE_ORDSEQ_POSTFLUSH;
 
-       if ((rq->flags & REQ_ORDERED_COLOR) ==
-           (q->orig_bar_rq->flags & REQ_ORDERED_COLOR))
+       if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
+           (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
                return QUEUE_ORDSEQ_DRAIN;
        else
                return QUEUE_ORDSEQ_DONE;
                end_io = post_flush_end_io;
        }
 
+       rq->cmd_flags = REQ_HARDBARRIER;
        rq_init(q, rq);
-       rq->flags = REQ_HARDBARRIER;
        rq->elevator_private = NULL;
        rq->rq_disk = q->bar_rq.rq_disk;
        rq->rl = NULL;
        blkdev_dequeue_request(rq);
        q->orig_bar_rq = rq;
        rq = &q->bar_rq;
+       rq->cmd_flags = 0;
        rq_init(q, rq);
-       rq->flags = bio_data_dir(q->orig_bar_rq->bio);
-       rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
+       if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
+               rq->cmd_flags |= REQ_RW;
+       rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
        rq->elevator_private = NULL;
        rq->rl = NULL;
        init_request_from_bio(rq, q->orig_bar_rq->bio);
        }
 
        list_del_init(&rq->queuelist);
-       rq->flags &= ~REQ_QUEUED;
+       rq->cmd_flags &= ~REQ_QUEUED;
        rq->tag = -1;
 
        if (unlikely(bqt->tag_index[tag] == NULL))
        struct blk_queue_tag *bqt = q->queue_tags;
        int tag;
 
-       if (unlikely((rq->flags & REQ_QUEUED))) {
+       if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
                printk(KERN_ERR 
                       "%s: request %p for device [%s] already tagged %d",
                       __FUNCTION__, rq,
 
        __set_bit(tag, bqt->tag_map);
 
-       rq->flags |= REQ_QUEUED;
+       rq->cmd_flags |= REQ_QUEUED;
        rq->tag = tag;
        bqt->tag_index[tag] = rq;
        blkdev_dequeue_request(rq);
                        printk(KERN_ERR
                               "%s: bad tag found on list\n", __FUNCTION__);
                        list_del_init(&rq->queuelist);
-                       rq->flags &= ~REQ_QUEUED;
+                       rq->cmd_flags &= ~REQ_QUEUED;
                } else
                        blk_queue_end_tag(q, rq);
 
-               rq->flags &= ~REQ_STARTED;
+               rq->cmd_flags &= ~REQ_STARTED;
                __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
        }
 }
 
 EXPORT_SYMBOL(blk_queue_invalidate_tags);
 
-static const char * const rq_flags[] = {
-       "REQ_RW",
-       "REQ_FAILFAST",
-       "REQ_SORTED",
-       "REQ_SOFTBARRIER",
-       "REQ_HARDBARRIER",
-       "REQ_FUA",
-       "REQ_CMD",
-       "REQ_NOMERGE",
-       "REQ_STARTED",
-       "REQ_DONTPREP",
-       "REQ_QUEUED",
-       "REQ_ELVPRIV",
-       "REQ_PC",
-       "REQ_BLOCK_PC",
-       "REQ_SENSE",
-       "REQ_FAILED",
-       "REQ_QUIET",
-       "REQ_SPECIAL",
-       "REQ_DRIVE_CMD",
-       "REQ_DRIVE_TASK",
-       "REQ_DRIVE_TASKFILE",
-       "REQ_PREEMPT",
-       "REQ_PM_SUSPEND",
-       "REQ_PM_RESUME",
-       "REQ_PM_SHUTDOWN",
-       "REQ_ORDERED_COLOR",
-};
-
 void blk_dump_rq_flags(struct request *rq, char *msg)
 {
        int bit;
 
-       printk("%s: dev %s: flags = ", msg,
-               rq->rq_disk ? rq->rq_disk->disk_name : "?");
-       bit = 0;
-       do {
-               if (rq->flags & (1 << bit))
-                       printk("%s ", rq_flags[bit]);
-               bit++;
-       } while (bit < __REQ_NR_BITS);
+       printk("%s: dev %s: type=%x, flags=%x\n", msg,
+               rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
+               rq->cmd_flags);
 
        printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
                                                       rq->nr_sectors,
                                                       rq->current_nr_sectors);
        printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
 
-       if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
+       if (blk_pc_request(rq)) {
                printk("cdb: ");
                for (bit = 0; bit < sizeof(rq->cmd); bit++)
                        printk("%02x ", rq->cmd[bit]);
        int nr_phys_segs = bio_phys_segments(q, bio);
 
        if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->flags |= REQ_NOMERGE;
+               req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
                return 0;
 
        if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
            || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->flags |= REQ_NOMERGE;
+               req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
                return 0;
                max_sectors = q->max_sectors;
 
        if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
-               req->flags |= REQ_NOMERGE;
+               req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
                return 0;
 
 
        if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
-               req->flags |= REQ_NOMERGE;
+               req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
                return 0;
 
 static inline void blk_free_request(request_queue_t *q, struct request *rq)
 {
-       if (rq->flags & REQ_ELVPRIV)
+       if (rq->cmd_flags & REQ_ELVPRIV)
                elv_put_request(q, rq);
        mempool_free(rq, q->rq.rq_pool);
 }
                return NULL;
 
        /*
-        * first three bits are identical in rq->flags and bio->bi_rw,
+        * first three bits are identical in rq->cmd_flags and bio->bi_rw,
         * see bio.h and blkdev.h
         */
-       rq->flags = rw;
+       rq->cmd_flags = rw;
 
        if (priv) {
                if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
                        mempool_free(rq, q->rq.rq_pool);
                        return NULL;
                }
-               rq->flags |= REQ_ELVPRIV;
+               rq->cmd_flags |= REQ_ELVPRIV;
        }
 
        return rq;
         * must not attempt merges on this) and that it acts as a soft
         * barrier
         */
-       rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
+       rq->cmd_type = REQ_TYPE_SPECIAL;
+       rq->cmd_flags |= REQ_SOFTBARRIER;
 
        rq->special = data;
 
        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
 
        rq->rq_disk = bd_disk;
-       rq->flags |= REQ_NOMERGE;
+       rq->cmd_flags |= REQ_NOMERGE;
        rq->end_io = done;
        WARN_ON(irqs_disabled());
        spin_lock_irq(q->queue_lock);
         */
        if (rl) {
                int rw = rq_data_dir(req);
-               int priv = req->flags & REQ_ELVPRIV;
+               int priv = req->cmd_flags & REQ_ELVPRIV;
 
                BUG_ON(!list_empty(&req->queuelist));
 
 
 static void init_request_from_bio(struct request *req, struct bio *bio)
 {
-       req->flags |= REQ_CMD;
+       req->cmd_type = REQ_TYPE_FS;
 
        /*
         * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
         */
        if (bio_rw_ahead(bio) || bio_failfast(bio))
-               req->flags |= REQ_FAILFAST;
+               req->cmd_flags |= REQ_FAILFAST;
 
        /*
         * REQ_BARRIER implies no merging, but lets make it explicit
         */
        if (unlikely(bio_barrier(bio)))
-               req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+               req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
 
        if (bio_sync(bio))
-               req->flags |= REQ_RW_SYNC;
+               req->cmd_flags |= REQ_RW_SYNC;
 
        req->errors = 0;
        req->hard_sector = req->sector = bio->bi_sector;
                req->errors = 0;
 
        if (!uptodate) {
-               if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
+               if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
                        printk("end_request: I/O error, dev %s, sector %llu\n",
                                req->rq_disk ? req->rq_disk->disk_name : "?",
                                (unsigned long long)req->sector);
 
 void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
 {
-       /* first two bits are identical in rq->flags and bio->bi_rw */
-       rq->flags |= (bio->bi_rw & 3);
+       /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
+       rq->cmd_flags |= (bio->bi_rw & 3);
 
        rq->nr_phys_segments = bio_phys_segments(q, bio);
        rq->nr_hw_segments = bio_hw_segments(q, bio);
 
        rq->sense = sense;
        rq->sense_len = 0;
 
-       rq->flags |= REQ_BLOCK_PC;
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
        bio = rq->bio;
 
        /*
        memset(sense, 0, sizeof(sense));
        rq->sense = sense;
        rq->sense_len = 0;
-       rq->flags |= REQ_BLOCK_PC;
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
 
        blk_execute_rq(q, disk, rq, 0);
 
        int err;
 
        rq = blk_get_request(q, WRITE, __GFP_WAIT);
-       rq->flags |= REQ_BLOCK_PC;
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->data = NULL;
        rq->data_len = 0;
        rq->timeout = BLK_DEFAULT_TIMEOUT;
 
        if (usage_count == 0) {
                printk("warning: usage count=0, current_req=%p exiting\n",
                       current_req);
-               printk("sect=%ld flags=%lx\n", (long)current_req->sector,
-                      current_req->flags);
+               printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector,
+                      current_req->cmd_type, current_req->cmd_flags);
                return;
        }
        if (test_bit(0, &fdc_busy)) {
 
                struct nbd_device *lo;
 
                blkdev_dequeue_request(req);
-               dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%lx)\n",
-                               req->rq_disk->disk_name, req, req->flags);
+               dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
+                               req->rq_disk->disk_name, req, req->cmd_type);
 
-               if (!(req->flags & REQ_CMD))
+               if (!blk_fs_request(req))
                        goto error_out;
 
                lo = req->rq_disk->private_data;
        switch (cmd) {
        case NBD_DISCONNECT:
                printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
-               sreq.flags = REQ_SPECIAL;
+               sreq.cmd_type = REQ_TYPE_SPECIAL;
                nbd_cmd(&sreq) = NBD_CMD_DISC;
                /*
                 * Set these to sane values in case server implementation
 
 
 static enum action do_pd_io_start(void)
 {
-       if (pd_req->flags & REQ_SPECIAL) {
+       if (blk_special_request(pd_req)) {
                phase = pd_special;
                return pd_special();
        }
 
        rq->sense = sense;
        memset(sense, 0, sizeof(sense));
        rq->sense_len = 0;
-       rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER;
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       rq->cmd_flags |= REQ_HARDBARRIER;
        if (cgc->quiet)
-               rq->flags |= REQ_QUIET;
+               rq->cmd_flags |= REQ_QUIET;
        memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
        if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
                memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
        rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
 
        rq->ref_count++;
-       rq->flags |= REQ_NOMERGE;
        rq->waiting = &wait;
        rq->end_io = blk_end_sync_rq;
        elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
 
                int res = 0;
                int retry;
 
-               if (!(req->flags & REQ_CMD)) {
+               if (!blk_fs_request(req)) {
                        end_request(req, 0);
                        continue;
                }
 
                rq->cmd[9] = 0xf8;
 
                rq->cmd_len = 12;
-               rq->flags |= REQ_BLOCK_PC;
+               rq->cmd_type = REQ_TYPE_BLOCK_PC;
                rq->timeout = 60 * HZ;
                bio = rq->bio;
 
 
                }
 
                /* WTF??? */
-               if (!(req->flags & REQ_CMD))
+               if (!blk_fs_request(req)) {
+                       end_request(req, 0);
                        continue;
+               }
                if (rq_data_dir(req) == WRITE) {
                        end_request(req, 0);
                        continue;
 
 {
        int log = 0;
 
-       if (!sense || !rq || (rq->flags & REQ_QUIET))
+       if (!sense || !rq || (rq->cmd_flags & REQ_QUIET))
                return 0;
 
        switch (sense->sense_key) {
        struct cdrom_info *cd = drive->driver_data;
 
        ide_init_drive_cmd(rq);
-       rq->flags = REQ_PC;
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->rq_disk = cd->disk;
 }
 
        rq->cmd[0] = GPCMD_REQUEST_SENSE;
        rq->cmd[4] = rq->data_len = 18;
 
-       rq->flags = REQ_SENSE;
+       rq->cmd_type = REQ_TYPE_SENSE;
 
        /* NOTE! Save the failed command in "rq->buffer" */
        rq->buffer = (void *) failed_command;
        struct request *rq = HWGROUP(drive)->rq;
        int nsectors = rq->hard_cur_sectors;
 
-       if ((rq->flags & REQ_SENSE) && uptodate) {
+       if (blk_sense_request(rq) && uptodate) {
                /*
-                * For REQ_SENSE, "rq->buffer" points to the original failed
-                * request
+                * For REQ_TYPE_SENSE, "rq->buffer" points to the original
+                * failed request
                 */
                struct request *failed = (struct request *) rq->buffer;
                struct cdrom_info *info = drive->driver_data;
                return 1;
        }
 
-       if (rq->flags & REQ_SENSE) {
+       if (blk_sense_request(rq)) {
                /* We got an error trying to get sense info
                   from the drive (probably while trying
                   to recover from a former error).  Just give up. */
 
-               rq->flags |= REQ_FAILED;
+               rq->cmd_flags |= REQ_FAILED;
                cdrom_end_request(drive, 0);
                ide_error(drive, "request sense failure", stat);
                return 1;
 
-       } else if (rq->flags & (REQ_PC | REQ_BLOCK_PC)) {
+       } else if (blk_pc_request(rq)) {
                /* All other functions, except for READ. */
                unsigned long flags;
 
                 * if we have an error, pass back CHECK_CONDITION as the
                 * scsi status byte
                 */
-               if ((rq->flags & REQ_BLOCK_PC) && !rq->errors)
+               if (!rq->errors)
                        rq->errors = SAM_STAT_CHECK_CONDITION;
 
                /* Check for tray open. */
                        cdrom_saw_media_change (drive);
                        /*printk("%s: media changed\n",drive->name);*/
                        return 0;
-               } else if (!(rq->flags & REQ_QUIET)) {
+               } else if (!(rq->cmd_flags & REQ_QUIET)) {
                        /* Otherwise, print an error. */
                        ide_dump_status(drive, "packet command error", stat);
                }
                
-               rq->flags |= REQ_FAILED;
+               rq->cmd_flags |= REQ_FAILED;
 
                /*
                 * instead of playing games with moving completions around,
                        wait = ATAPI_WAIT_PC;
                        break;
                default:
-                       if (!(rq->flags & REQ_QUIET))
+                       if (!(rq->cmd_flags & REQ_QUIET))
                                printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]);
                        wait = 0;
                        break;
                if (rq->current_nr_sectors > 0) {
                        printk (KERN_ERR "%s: cdrom_read_intr: data underrun (%d blocks)\n",
                                drive->name, rq->current_nr_sectors);
-                       rq->flags |= REQ_FAILED;
+                       rq->cmd_flags |= REQ_FAILED;
                        cdrom_end_request(drive, 0);
                } else
                        cdrom_end_request(drive, 1);
                        printk ("%s: cdrom_pc_intr: data underrun %d\n",
                                drive->name, pc->buflen);
                        */
-                       rq->flags |= REQ_FAILED;
+                       rq->cmd_flags |= REQ_FAILED;
                        cdrom_end_request(drive, 0);
                }
                return ide_stopped;
                rq->data += thislen;
                rq->data_len -= thislen;
 
-               if (rq->flags & REQ_SENSE)
+               if (blk_sense_request(rq))
                        rq->sense_len += thislen;
        } else {
 confused:
                        "appears confused (ireason = 0x%02x). "
                        "Trying to recover by ending request.\n",
                        drive->name, ireason);
-               rq->flags |= REQ_FAILED;
+               rq->cmd_flags |= REQ_FAILED;
                cdrom_end_request(drive, 0);
                return ide_stopped;
        }
        struct cdrom_info *info = drive->driver_data;
 
        info->dma = 0;
-       rq->flags &= ~REQ_FAILED;
+       rq->cmd_flags &= ~REQ_FAILED;
        len = rq->data_len;
 
        /* Start sending the command to the drive. */
 {
        struct request_sense sense;
        int retries = 10;
-       unsigned int flags = rq->flags;
+       unsigned int flags = rq->cmd_flags;
 
        if (rq->sense == NULL)
                rq->sense = &sense;
        do {
                int error;
                unsigned long time = jiffies;
-               rq->flags = flags;
+               rq->cmd_flags = flags;
 
                error = ide_do_drive_cmd(drive, rq, ide_wait);
                time = jiffies - time;
 
                /* FIXME: we should probably abort/retry or something 
                 * in case of failure */
-               if (rq->flags & REQ_FAILED) {
+               if (rq->cmd_flags & REQ_FAILED) {
                        /* The request failed.  Retry if it was due to a unit
                           attention status
                           (usually means media was changed). */
                }
 
                /* End of retry loop. */
-       } while ((rq->flags & REQ_FAILED) && retries >= 0);
+       } while ((rq->cmd_flags & REQ_FAILED) && retries >= 0);
 
        /* Return an error if the command failed. */
-       return (rq->flags & REQ_FAILED) ? -EIO : 0;
+       return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0;
 }
 
 /*
 {
        struct cdrom_info *info = drive->driver_data;
 
-       rq->flags |= REQ_QUIET;
+       rq->cmd_flags |= REQ_QUIET;
 
        info->dma = 0;
 
                }
                info->last_block = block;
                return action;
-       } else if (rq->flags & (REQ_PC | REQ_SENSE)) {
+       } else if (rq->cmd_type == REQ_TYPE_SENSE) {
                return cdrom_do_packet_command(drive);
-       } else if (rq->flags & REQ_BLOCK_PC) {
+       } else if (blk_pc_request(rq)) {
                return cdrom_do_block_pc(drive, rq);
-       } else if (rq->flags & REQ_SPECIAL) {
+       } else if (blk_special_request(rq)) {
                /*
                 * right now this can only be a reset...
                 */
 
        req.sense = sense;
        req.cmd[0] = GPCMD_TEST_UNIT_READY;
-       req.flags |= REQ_QUIET;
+       req.cmd_flags |= REQ_QUIET;
 
 #if ! STANDARD_ATAPI
         /* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to 
        req.cmd[0] = GPCMD_READ_CDVD_CAPACITY;
        req.data = (char *)&capbuf;
        req.data_len = sizeof(capbuf);
-       req.flags |= REQ_QUIET;
+       req.cmd_flags |= REQ_QUIET;
 
        stat = cdrom_queue_packet_command(drive, &req);
        if (stat == 0) {
        req.sense = sense;
        req.data =  buf;
        req.data_len = buflen;
-       req.flags |= REQ_QUIET;
+       req.cmd_flags |= REQ_QUIET;
        req.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
        req.cmd[6] = trackno;
        req.cmd[7] = (buflen >> 8);
        req.timeout = cgc->timeout;
 
        if (cgc->quiet)
-               req.flags |= REQ_QUIET;
+               req.cmd_flags |= REQ_QUIET;
 
        req.sense = cgc->sense;
        cgc->stat = cdrom_queue_packet_command(drive, &req);
        int ret;
 
        cdrom_prepare_request(drive, &req);
-       req.flags = REQ_SPECIAL | REQ_QUIET;
+       req.cmd_type = REQ_TYPE_SPECIAL;
+       req.cmd_flags = REQ_QUIET;
        ret = ide_do_drive_cmd(drive, &req, ide_wait);
 
        /*
 
 static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq)
 {
-       if (rq->flags & REQ_CMD)
+       if (blk_fs_request(rq))
                return ide_cdrom_prep_fs(q, rq);
-       else if (rq->flags & REQ_BLOCK_PC)
+       else if (blk_pc_request(rq))
                return ide_cdrom_prep_pc(rq);
 
        return 0;
 
                rq->cmd[0] = WIN_FLUSH_CACHE;
 
 
-       rq->flags |= REQ_DRIVE_TASK;
+       rq->cmd_type = REQ_TYPE_ATA_TASK;
+       rq->cmd_flags |= REQ_SOFTBARRIER;
        rq->buffer = rq->cmd;
 }
 
        if (drive->special.b.set_multmode)
                return -EBUSY;
        ide_init_drive_cmd (&rq);
-       rq.flags = REQ_DRIVE_CMD;
+       rq.cmd_type = REQ_TYPE_ATA_CMD;
        drive->mult_req = arg;
        drive->special.b.set_multmode = 1;
        (void) ide_do_drive_cmd (drive, &rq, ide_wait);
 
        ide_hwif_t *hwif = HWIF(drive);
        struct scatterlist *sg = hwif->sg_table;
 
-       BUG_ON((rq->flags & REQ_DRIVE_TASKFILE) && rq->nr_sectors > 256);
+       BUG_ON((rq->cmd_type == REQ_TYPE_ATA_TASKFILE) && rq->nr_sectors > 256);
 
        ide_map_sg(drive, rq);
 
 
        /* Why does this happen? */
        if (!rq)
                return 0;
-       if (!(rq->flags & REQ_SPECIAL)) { //if (!IDEFLOPPY_RQ_CMD (rq->cmd)) {
+       if (!blk_special_request(rq)) {
                /* our real local end request function */
                ide_end_request(drive, uptodate, nsecs);
                return 0;
 
        ide_init_drive_cmd(rq);
        rq->buffer = (char *) pc;
-       rq->flags = REQ_SPECIAL;        //rq->cmd = IDEFLOPPY_PC_RQ;
+       rq->cmd_type = REQ_TYPE_SPECIAL;
        rq->rq_disk = floppy->disk;
        (void) ide_do_drive_cmd(drive, rq, ide_preempt);
 }
        pc->callback = &idefloppy_rw_callback;
        pc->rq = rq;
        pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
-       if (rq->flags & REQ_RW)
+       if (rq->cmd_flags & REQ_RW)
                set_bit(PC_WRITING, &pc->flags);
        pc->buffer = NULL;
        pc->request_transfer = pc->buffer_size = blocks * floppy->block_size;
                idefloppy_do_end_request(drive, 0, 0);
                return ide_stopped;
        }
-       if (rq->flags & REQ_CMD) {
+       if (blk_fs_request(rq)) {
                if (((long)rq->sector % floppy->bs_factor) ||
                    (rq->nr_sectors % floppy->bs_factor)) {
                        printk("%s: unsupported r/w request size\n",
                }
                pc = idefloppy_next_pc_storage(drive);
                idefloppy_create_rw_cmd(floppy, pc, rq, block);
-       } else if (rq->flags & REQ_SPECIAL) {
+       } else if (blk_special_request(rq)) {
                pc = (idefloppy_pc_t *) rq->buffer;
-       } else if (rq->flags & REQ_BLOCK_PC) {
+       } else if (blk_pc_request(rq)) {
                pc = idefloppy_next_pc_storage(drive);
                if (idefloppy_blockpc_cmd(floppy, pc, rq)) {
                        idefloppy_do_end_request(drive, 0, 0);
 
        ide_init_drive_cmd (&rq);
        rq.buffer = (char *) pc;
-       rq.flags = REQ_SPECIAL;         //      rq.cmd = IDEFLOPPY_PC_RQ;
+       rq.cmd_type = REQ_TYPE_SPECIAL;
        rq.rq_disk = floppy->disk;
 
        return ide_do_drive_cmd(drive, &rq, ide_wait);
 
 {
        int ret = 1;
 
-       BUG_ON(!(rq->flags & REQ_STARTED));
+       BUG_ON(!blk_rq_started(rq));
 
        /*
         * if failfast is set on a request, override number of sectors and
 
        spin_lock_irqsave(&ide_lock, flags);
 
-       BUG_ON(!(rq->flags & REQ_STARTED));
+       BUG_ON(!blk_rq_started(rq));
 
        /*
         * if failfast is set on a request, override number of sectors and
        rq = HWGROUP(drive)->rq;
        spin_unlock_irqrestore(&ide_lock, flags);
 
-       if (rq->flags & REQ_DRIVE_CMD) {
+       if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
                u8 *args = (u8 *) rq->buffer;
                if (rq->errors == 0)
                        rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
                        args[1] = err;
                        args[2] = hwif->INB(IDE_NSECTOR_REG);
                }
-       } else if (rq->flags & REQ_DRIVE_TASK) {
+       } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
                u8 *args = (u8 *) rq->buffer;
                if (rq->errors == 0)
                        rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
                        args[5] = hwif->INB(IDE_HCYL_REG);
                        args[6] = hwif->INB(IDE_SELECT_REG);
                }
-       } else if (rq->flags & REQ_DRIVE_TASKFILE) {
+       } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
                ide_task_t *args = (ide_task_t *) rq->special;
                if (rq->errors == 0)
                        rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
                return ide_stopped;
 
        /* retry only "normal" I/O: */
-       if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) {
+       if (!blk_fs_request(rq)) {
                rq->errors = 1;
                ide_end_drive_cmd(drive, stat, err);
                return ide_stopped;
                return ide_stopped;
 
        /* retry only "normal" I/O: */
-       if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) {
+       if (!blk_fs_request(rq)) {
                rq->errors = 1;
                ide_end_drive_cmd(drive, BUSY_STAT, 0);
                return ide_stopped;
        if (hwif->sg_mapped)    /* needed by ide-scsi */
                return;
 
-       if ((rq->flags & REQ_DRIVE_TASKFILE) == 0) {
+       if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
                hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
        } else {
                sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
                struct request *rq)
 {
        ide_hwif_t *hwif = HWIF(drive);
-       if (rq->flags & REQ_DRIVE_TASKFILE) {
+       if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
                ide_task_t *args = rq->special;
  
                if (!args)
                if (args->tf_out_flags.all != 0) 
                        return flagged_taskfile(drive, args);
                return do_rw_taskfile(drive, args);
-       } else if (rq->flags & REQ_DRIVE_TASK) {
+       } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
                u8 *args = rq->buffer;
                u8 sel;
  
                hwif->OUTB(sel, IDE_SELECT_REG);
                ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
                return ide_started;
-       } else if (rq->flags & REQ_DRIVE_CMD) {
+       } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
                u8 *args = rq->buffer;
 
                if (!args)
        ide_startstop_t startstop;
        sector_t block;
 
-       BUG_ON(!(rq->flags & REQ_STARTED));
+       BUG_ON(!blk_rq_started(rq));
 
 #ifdef DEBUG
        printk("%s: start_request: current=0x%08lx\n",
        if (!drive->special.all) {
                ide_driver_t *drv;
 
-               if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK))
-                       return execute_drive_cmd(drive, rq);
-               else if (rq->flags & REQ_DRIVE_TASKFILE)
+               if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
+                   rq->cmd_type == REQ_TYPE_ATA_TASK ||
+                   rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
                        return execute_drive_cmd(drive, rq);
                else if (blk_pm_request(rq)) {
                        struct request_pm_state *pm = rq->end_io_data;
                 * We count how many times we loop here to make sure we service
                 * all drives in the hwgroup without looping for ever
                 */
-               if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) {
+               if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) {
                        drive = drive->next ? drive->next : hwgroup->drive;
                        if (loops++ < 4 && !blk_queue_plugged(drive->queue))
                                goto again;
 void ide_init_drive_cmd (struct request *rq)
 {
        memset(rq, 0, sizeof(*rq));
-       rq->flags = REQ_DRIVE_CMD;
+       rq->cmd_type = REQ_TYPE_ATA_CMD;
        rq->ref_count = 1;
 }
 
                hwgroup->rq = NULL;
        if (action == ide_preempt || action == ide_head_wait) {
                where = ELEVATOR_INSERT_FRONT;
-               rq->flags |= REQ_PREEMPT;
+               rq->cmd_flags |= REQ_PREEMPT;
        }
        __elv_add_request(drive->queue, rq, where, 0);
        ide_do_request(hwgroup, IDE_NO_IRQ);
 
        spin_unlock(&ide_lock);
        if (!rq)
                return;
-       if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) {
+       if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
+           rq->cmd_type == REQ_TYPE_ATA_TASK) {
                char *args = rq->buffer;
                if (args) {
                        opcode = args[0];
                        found = 1;
                }
-       } else if (rq->flags & REQ_DRIVE_TASKFILE) {
+       } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
                ide_task_t *args = rq->special;
                if (args) {
                        task_struct_t *tf = (task_struct_t *) args->tfRegister;
 
 static void idetape_init_rq(struct request *rq, u8 cmd)
 {
        memset(rq, 0, sizeof(*rq));
-       rq->flags = REQ_SPECIAL;
+       rq->cmd_type = REQ_TYPE_SPECIAL;
        rq->cmd[0] = cmd;
 }
 
                        rq->sector, rq->nr_sectors, rq->current_nr_sectors);
 #endif /* IDETAPE_DEBUG_LOG */
 
-       if ((rq->flags & REQ_SPECIAL) == 0) {
+       if (!blk_special_request(rq)) {
                /*
                 * We do not support buffer cache originated requests.
                 */
                printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
-                       "request queue (%ld)\n", drive->name, rq->flags);
+                       "request queue (%d)\n", drive->name, rq->cmd_type);
                ide_end_request(drive, 0, 0);
                return ide_stopped;
        }
        idetape_tape_t *tape = drive->driver_data;
 
 #if IDETAPE_DEBUG_BUGS
-       if (rq == NULL || (rq->flags & REQ_SPECIAL) == 0) {
+       if (rq == NULL || !blk_special_request(rq)) {
                printk (KERN_ERR "ide-tape: bug: Trying to sleep on non-valid request\n");
                return;
        }
 
 
 static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
 {
-       if (rq->flags & REQ_DRIVE_TASKFILE) {
+       if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
                ide_task_t *task = rq->special;
 
                if (task->tf_out_flags.all) {
        struct request rq;
 
        memset(&rq, 0, sizeof(rq));
-       rq.flags = REQ_DRIVE_TASKFILE;
+       rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
        rq.buffer = buf;
 
        /*
                rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors;
 
                if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
-                       rq.flags |= REQ_RW;
+                       rq.cmd_flags |= REQ_RW;
        }
 
        rq.special = args;
        struct request rq;
 
        ide_init_drive_cmd(&rq);
-       rq.flags = REQ_DRIVE_TASK;
+       rq.cmd_type = REQ_TYPE_ATA_TASK;
        rq.buffer = buf;
        return ide_do_drive_cmd(drive, &rq, ide_wait);
 }
 
        memset(&rq, 0, sizeof(rq));
        memset(&rqpm, 0, sizeof(rqpm));
        memset(&args, 0, sizeof(args));
-       rq.flags = REQ_PM_SUSPEND;
+       rq.cmd_type = REQ_TYPE_PM_SUSPEND;
        rq.special = &args;
        rq.end_io_data = &rqpm;
        rqpm.pm_step = ide_pm_state_start_suspend;
        memset(&rq, 0, sizeof(rq));
        memset(&rqpm, 0, sizeof(rqpm));
        memset(&args, 0, sizeof(args));
-       rq.flags = REQ_PM_RESUME;
+       rq.cmd_type = REQ_TYPE_PM_RESUME;
        rq.special = &args;
        rq.end_io_data = &rqpm;
        rqpm.pm_step = ide_pm_state_start_resume;
 
                req->rq_disk->disk_name, (req->cmd == READ)?"read":"writ",
                cyl, head, sec, nsect, req->buffer);
 #endif
-       if (req->flags & REQ_CMD) {
+       if (blk_fs_request(req)) {
                switch (rq_data_dir(req)) {
                case READ:
                        hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr);
 
        memset(&rq->cmd, 0, BLK_MAX_CDB);
 
        rq->timeout = EMC_FAILOVER_TIMEOUT;
-       rq->flags |= (REQ_BLOCK_PC | REQ_FAILFAST | REQ_NOMERGE);
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
 
        return rq;
 }
 
        }
 
        /* request is already processed by us, so return */
-       if (req->flags & REQ_SPECIAL) {
+       if (blk_special_request(req)) {
                osm_debug("REQ_SPECIAL already set!\n");
-               req->flags |= REQ_DONTPREP;
+               req->cmd_flags |= REQ_DONTPREP;
                return BLKPREP_OK;
        }
 
                ireq = req->special;
 
        /* do not come back here */
-       req->flags |= REQ_DONTPREP | REQ_SPECIAL;
+       req->cmd_type = REQ_TYPE_SPECIAL;
+       req->cmd_flags |= REQ_DONTPREP;
 
        return BLKPREP_OK;
 };
 
        struct mmc_queue *mq = q->queuedata;
        int ret = BLKPREP_KILL;
 
-       if (req->flags & REQ_SPECIAL) {
+       if (blk_special_request(req)) {
                /*
                 * Special commands already have the command
                 * blocks already setup in req->special.
                BUG_ON(!req->special);
 
                ret = BLKPREP_OK;
-       } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
+       } else if (blk_fs_request(req) || blk_pc_request(req)) {
                /*
                 * Block I/O requests need translating according
                 * to the protocol.
        }
 
        if (ret == BLKPREP_OK)
-               req->flags |= REQ_DONTPREP;
+               req->cmd_flags |= REQ_DONTPREP;
 
        return ret;
 }
 
        nsect = req->current_nr_sectors;
        buf = req->buffer;
 
-       if (!(req->flags & REQ_CMD))
+       if (!blk_fs_request(req))
                return 0;
 
        if (block + nsect > get_capacity(req->rq_disk))
 
        }
        cqr->retries = DIAG_MAX_RETRIES;
        cqr->buildclk = get_clock();
-       if (req->flags & REQ_FAILFAST)
+       if (req->cmd_flags & REQ_FAILFAST)
                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
        cqr->device = device;
        cqr->expires = DIAG_TIMEOUT;
 
                        recid++;
                }
        }
-       if (req->flags & REQ_FAILFAST)
+       if (req->cmd_flags & REQ_FAILFAST)
                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
        cqr->device = device;
        cqr->expires = 5 * 60 * HZ;     /* 5 minutes */
 
                        recid++;
                }
        }
-       if (req->flags & REQ_FAILFAST)
+       if (req->cmd_flags & REQ_FAILFAST)
                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
        cqr->device = device;
        cqr->expires = 5 * 60 * HZ;     /* 5 minutes */
 
       aic_dev->r_total++;
       ptr = aic_dev->r_bins;
     }
-    if(cmd->device->simple_tags && cmd->request->flags & REQ_HARDBARRIER)
+    if(cmd->device->simple_tags && cmd->request->cmd_flags & REQ_HARDBARRIER)
     {
       aic_dev->barrier_total++;
       if(scb->tag_action == MSG_ORDERED_Q_TAG)
     /* We always force TEST_UNIT_READY to untagged */
     if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags)
     {
-      if (req->flags & REQ_HARDBARRIER)
+      if (req->cmd_flags & REQ_HARDBARRIER)
       {
        if(sdptr->ordered_tags)
        {
 
        pc->buffer = buf;
        pc->c[0] = REQUEST_SENSE;
        pc->c[4] = pc->request_transfer = pc->buffer_size = SCSI_SENSE_BUFFERSIZE;
-       rq->flags = REQ_SENSE;
+       rq->cmd_type = REQ_TYPE_SENSE;
        pc->timeout = jiffies + WAIT_READY;
        /* NOTE! Save the failed packet command in "rq->buffer" */
        rq->buffer = (void *) failed_command->special;
        int errors = rq->errors;
        unsigned long flags;
 
-       if (!(rq->flags & (REQ_SPECIAL|REQ_SENSE))) {
+       if (!blk_special_request(rq) && !blk_sense_request(rq)) {
                ide_end_request(drive, uptodate, nrsecs);
                return 0;
        }
        ide_end_drive_cmd (drive, 0, 0);
-       if (rq->flags & REQ_SENSE) {
+       if (blk_sense_request(rq)) {
                idescsi_pc_t *opc = (idescsi_pc_t *) rq->buffer;
                if (log) {
                        printk ("ide-scsi: %s: wrap up check %lu, rst = ", drive->name, opc->scsi_cmd->serial_number);
        printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors);
 #endif /* IDESCSI_DEBUG_LOG */
 
-       if (rq->flags & (REQ_SPECIAL|REQ_SENSE)) {
+       if (blk_sense_request(rq) || blk_special_request(rq)) {
                return idescsi_issue_pc (drive, (idescsi_pc_t *) rq->special);
        }
        blk_dump_rq_flags(rq, "ide-scsi: unsup command");
 
        ide_init_drive_cmd (rq);
        rq->special = (char *) pc;
-       rq->flags = REQ_SPECIAL;
+       rq->cmd_type = REQ_TYPE_SPECIAL;
        spin_unlock_irq(host->host_lock);
        rq->rq_disk = scsi->disk;
        (void) ide_do_drive_cmd (drive, rq, ide_end);
                 */
                printk (KERN_ERR "ide-scsi: cmd aborted!\n");
 
-               if (scsi->pc->rq->flags & REQ_SENSE)
+               if (blk_sense_request(scsi->pc->rq))
                        kfree(scsi->pc->buffer);
                kfree(scsi->pc->rq);
                kfree(scsi->pc);
        /* kill current request */
        blkdev_dequeue_request(req);
        end_that_request_last(req, 0);
-       if (req->flags & REQ_SENSE)
+       if (blk_sense_request(req))
                kfree(scsi->pc->buffer);
        kfree(scsi->pc);
        scsi->pc = NULL;
 
 
 static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt)
 {
-       SCpnt->request->rq_status = RQ_SCSI_DONE;
        PLND(("Detect done %08lx\n", (long)SCpnt))
        if (atomic_dec_and_test (&fcss))
                up(&fc_sem);
                
                SCpnt->cmd_len = COMMAND_SIZE(INQUIRY);
        
-               SCpnt->request->rq_status = RQ_SCSI_BUSY;
+               SCpnt->request->cmd_flags &= ~REQ_STARTED;
                
                SCpnt->done = pluto_detect_done;
                SCpnt->request_bufflen = 256;
        for (retry = 0; retry < 5; retry++) {
                for (i = 0; i < fcscount; i++) {
                        if (!fcs[i].fc) break;
-                       if (fcs[i].cmd.request->rq_status != RQ_SCSI_DONE) {
+                       if (!(fcs[i].cmd.request->cmd_flags & REQ_STARTED)) {
+                               fcs[i].cmd.request->cmd_flags |= REQ_STARTED;
                                disable_irq(fcs[i].fc->irq);
                                PLND(("queuecommand %d %d\n", retry, i))
                                fcp_scsi_queuecommand (&(fcs[i].cmd), 
 
 {
        struct scsi_cmnd *cmd = req->special;
 
-       req->flags &= ~REQ_DONTPREP;
+       req->cmd_flags &= ~REQ_DONTPREP;
        req->special = NULL;
 
        scsi_put_command(cmd);
        req->sense_len = 0;
        req->retries = retries;
        req->timeout = timeout;
-       req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
+       req->cmd_type = REQ_TYPE_BLOCK_PC;
+       req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
 
        /*
         * head injection *required* here otherwise quiesce won't work
        req = blk_get_request(sdev->request_queue, write, gfp);
        if (!req)
                goto free_sense;
-       req->flags |= REQ_BLOCK_PC | REQ_QUIET;
+       req->cmd_type = REQ_TYPE_BLOCK_PC;
+       req->cmd_flags |= REQ_QUIET;
 
        if (use_sg)
                err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
                                        break;
                                }
                        }
-                       if (!(req->flags & REQ_QUIET)) {
+                       if (!(req->cmd_flags & REQ_QUIET)) {
                                scmd_printk(KERN_INFO, cmd,
                                            "Device not ready: ");
                                scsi_print_sense_hdr("", &sshdr);
                        scsi_end_request(cmd, 0, this_count, 1);
                        return;
                case VOLUME_OVERFLOW:
-                       if (!(req->flags & REQ_QUIET)) {
+                       if (!(req->cmd_flags & REQ_QUIET)) {
                                scmd_printk(KERN_INFO, cmd,
                                            "Volume overflow, CDB: ");
                                __scsi_print_command(cmd->cmnd);
                return;
        }
        if (result) {
-               if (!(req->flags & REQ_QUIET)) {
+               if (!(req->cmd_flags & REQ_QUIET)) {
                        scmd_printk(KERN_INFO, cmd,
                                    "SCSI error: return code = 0x%08x\n",
                                    result);
        /*
         * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
         */
-       if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
+       if (blk_pc_request(req) && !req->bio) {
                cmd->request_bufflen = req->data_len;
                cmd->request_buffer = req->data;
                req->buffer = req->data;
         * these two cases differently.  We differentiate by looking
         * at request->cmd, as this tells us the real story.
         */
-       if (req->flags & REQ_SPECIAL && req->special) {
+       if (blk_special_request(req) && req->special)
                cmd = req->special;
-       } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
-
-               if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
-                       if(specials_only == SDEV_QUIESCE ||
-                                       specials_only == SDEV_BLOCK)
+       else if (blk_pc_request(req) || blk_fs_request(req)) {
+               if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){
+                       if (specials_only == SDEV_QUIESCE ||
+                           specials_only == SDEV_BLOCK)
                                goto defer;
                        
                        sdev_printk(KERN_ERR, sdev,
                        goto kill;
                }
                        
-                       
                /*
                 * Now try and find a command block that we can use.
                 */
         * lock.  We hope REQ_STARTED prevents anything untoward from
         * happening now.
         */
-       if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
+       if (blk_fs_request(req) || blk_pc_request(req)) {
                int ret;
 
                /*
                /*
                 * Initialize the actual SCSI command for this request.
                 */
-               if (req->flags & REQ_BLOCK_PC) {
+               if (blk_pc_request(req)) {
                        scsi_setup_blk_pc_cmnd(cmd);
                } else if (req->rq_disk) {
                        struct scsi_driver *drv;
        /*
         * The request is now prepped, no need to come back here
         */
-       req->flags |= REQ_DONTPREP;
+       req->cmd_flags |= REQ_DONTPREP;
        return BLKPREP_OK;
 
  defer:
                if (unlikely(cmd == NULL)) {
                        printk(KERN_CRIT "impossible request in %s.\n"
                                         "please mail a stack trace to "
-                                        "linux-scsi@vger.kernel.org",
+                                        "linux-scsi@vger.kernel.org\n",
                                         __FUNCTION__);
+                       blk_dump_rq_flags(req, "foo");
                        BUG();
                }
                spin_lock(shost->host_lock);
 
                SCpnt->cmnd[0] = READ_6;
                SCpnt->sc_data_direction = DMA_FROM_DEVICE;
        } else {
-               printk(KERN_ERR "sd: Unknown command %lx\n", rq->flags);
-/* overkill    panic("Unknown sd command %lx\n", rq->flags); */
+               printk(KERN_ERR "sd: Unknown command %x\n", rq->cmd_flags);
                return 0;
        }
 
 static void sd_prepare_flush(request_queue_t *q, struct request *rq)
 {
        memset(rq->cmd, 0, sizeof(rq->cmd));
-       rq->flags |= REQ_BLOCK_PC;
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->timeout = SD_TIMEOUT;
        rq->cmd[0] = SYNCHRONIZE_CACHE;
        rq->cmd_len = 10;
 
                if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done
                                                  != cmd))
                {
-                       if(cmd->request->flags & REQ_CMD) {
+                       if(blk_fs_request(cmd->request)) {
                                sun3scsi_dma_setup(d, count,
                                                   rq_data_dir(cmd->request));
                                sun3_dma_setup_done = cmd;
 
 static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd,
                                    int write_flag)
 {
-       if(cmd->request->flags & REQ_CMD)
+       if(blk_fs_request(cmd->request))
                return wanted;
        else
                return 0;
 
 static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd,
                                    int write_flag)
 {
-       if(cmd->request->flags & REQ_CMD)
+       if(blk_fs_request(cmd->request))
                return wanted;
        else
                return 0;
 
        wait_queue_head_t wait[2];
 };
 
+/*
+ * request command types
+ */
+enum rq_cmd_type_bits {
+       REQ_TYPE_FS             = 1,    /* fs request */
+       REQ_TYPE_BLOCK_PC,              /* scsi command */
+       REQ_TYPE_SENSE,                 /* sense request */
+       REQ_TYPE_PM_SUSPEND,            /* suspend request */
+       REQ_TYPE_PM_RESUME,             /* resume request */
+       REQ_TYPE_PM_SHUTDOWN,           /* shutdown request */
+       REQ_TYPE_FLUSH,                 /* flush request */
+       REQ_TYPE_SPECIAL,               /* driver defined type */
+       REQ_TYPE_LINUX_BLOCK,           /* generic block layer message */
+       /*
+        * for ATA/ATAPI devices. this really doesn't belong here, ide should
+        * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
+        * private REQ_LB opcodes to differentiate what type of request this is
+        */
+       REQ_TYPE_ATA_CMD,
+       REQ_TYPE_ATA_TASK,
+       REQ_TYPE_ATA_TASKFILE,
+};
+
+/*
+ * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
+ * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
+ * SCSI cdb.
+ *
+ * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
+ * typically to differentiate REQ_TYPE_SPECIAL requests.
+ *
+ */
+enum {
+       /*
+        * just examples for now
+        */
+       REQ_LB_OP_EJECT = 0x40,         /* eject request */
+       REQ_LB_OP_FLUSH = 0x41,         /* flush device */
+};
+
+/*
+ * request type modified bits. first three bits match BIO_RW* bits, important
+ */
+enum rq_flag_bits {
+       __REQ_RW,               /* not set, read. set, write */
+       __REQ_FAILFAST,         /* no low level driver retries */
+       __REQ_SORTED,           /* elevator knows about this request */
+       __REQ_SOFTBARRIER,      /* may not be passed by ioscheduler */
+       __REQ_HARDBARRIER,      /* may not be passed by drive either */
+       __REQ_FUA,              /* forced unit access */
+       __REQ_NOMERGE,          /* don't touch this for merging */
+       __REQ_STARTED,          /* drive already may have started this one */
+       __REQ_DONTPREP,         /* don't call prep for this one */
+       __REQ_QUEUED,           /* uses queueing */
+       __REQ_ELVPRIV,          /* elevator private data attached */
+       __REQ_FAILED,           /* set if the request failed */
+       __REQ_QUIET,            /* don't worry about errors */
+       __REQ_PREEMPT,          /* set for "ide_preempt" requests */
+       __REQ_ORDERED_COLOR,    /* is before or after barrier */
+       __REQ_RW_SYNC,          /* request is sync (O_DIRECT) */
+       __REQ_NR_BITS,          /* stops here */
+};
+
+#define REQ_RW         (1 << __REQ_RW)
+#define REQ_FAILFAST   (1 << __REQ_FAILFAST)
+#define REQ_SORTED     (1 << __REQ_SORTED)
+#define REQ_SOFTBARRIER        (1 << __REQ_SOFTBARRIER)
+#define REQ_HARDBARRIER        (1 << __REQ_HARDBARRIER)
+#define REQ_FUA                (1 << __REQ_FUA)
+#define REQ_NOMERGE    (1 << __REQ_NOMERGE)
+#define REQ_STARTED    (1 << __REQ_STARTED)
+#define REQ_DONTPREP   (1 << __REQ_DONTPREP)
+#define REQ_QUEUED     (1 << __REQ_QUEUED)
+#define REQ_ELVPRIV    (1 << __REQ_ELVPRIV)
+#define REQ_FAILED     (1 << __REQ_FAILED)
+#define REQ_QUIET      (1 << __REQ_QUIET)
+#define REQ_PREEMPT    (1 << __REQ_PREEMPT)
+#define REQ_ORDERED_COLOR      (1 << __REQ_ORDERED_COLOR)
+#define REQ_RW_SYNC    (1 << __REQ_RW_SYNC)
+
 #define BLK_MAX_CDB    16
 
 /*
        struct list_head queuelist;
        struct list_head donelist;
 
-       unsigned long flags;            /* see REQ_ bits below */
+       unsigned int cmd_flags;
+       enum rq_cmd_type_bits cmd_type;
 
        /* Maintain bio traversal state for part by part I/O submission.
         * hard_* are block layer internals, no driver should touch them!
 };
 
 /*
- * first three bits match BIO_RW* bits, important
- */
-enum rq_flag_bits {
-       __REQ_RW,               /* not set, read. set, write */
-       __REQ_FAILFAST,         /* no low level driver retries */
-       __REQ_SORTED,           /* elevator knows about this request */
-       __REQ_SOFTBARRIER,      /* may not be passed by ioscheduler */
-       __REQ_HARDBARRIER,      /* may not be passed by drive either */
-       __REQ_FUA,              /* forced unit access */
-       __REQ_CMD,              /* is a regular fs rw request */
-       __REQ_NOMERGE,          /* don't touch this for merging */
-       __REQ_STARTED,          /* drive already may have started this one */
-       __REQ_DONTPREP,         /* don't call prep for this one */
-       __REQ_QUEUED,           /* uses queueing */
-       __REQ_ELVPRIV,          /* elevator private data attached */
-       /*
-        * for ATA/ATAPI devices
-        */
-       __REQ_PC,               /* packet command (special) */
-       __REQ_BLOCK_PC,         /* queued down pc from block layer */
-       __REQ_SENSE,            /* sense retrival */
-
-       __REQ_FAILED,           /* set if the request failed */
-       __REQ_QUIET,            /* don't worry about errors */
-       __REQ_SPECIAL,          /* driver suplied command */
-       __REQ_DRIVE_CMD,
-       __REQ_DRIVE_TASK,
-       __REQ_DRIVE_TASKFILE,
-       __REQ_PREEMPT,          /* set for "ide_preempt" requests */
-       __REQ_PM_SUSPEND,       /* suspend request */
-       __REQ_PM_RESUME,        /* resume request */
-       __REQ_PM_SHUTDOWN,      /* shutdown request */
-       __REQ_ORDERED_COLOR,    /* is before or after barrier */
-       __REQ_RW_SYNC,          /* request is sync (O_DIRECT) */
-       __REQ_NR_BITS,          /* stops here */
-};
-
-#define REQ_RW         (1 << __REQ_RW)
-#define REQ_FAILFAST   (1 << __REQ_FAILFAST)
-#define REQ_SORTED     (1 << __REQ_SORTED)
-#define REQ_SOFTBARRIER        (1 << __REQ_SOFTBARRIER)
-#define REQ_HARDBARRIER        (1 << __REQ_HARDBARRIER)
-#define REQ_FUA                (1 << __REQ_FUA)
-#define REQ_CMD                (1 << __REQ_CMD)
-#define REQ_NOMERGE    (1 << __REQ_NOMERGE)
-#define REQ_STARTED    (1 << __REQ_STARTED)
-#define REQ_DONTPREP   (1 << __REQ_DONTPREP)
-#define REQ_QUEUED     (1 << __REQ_QUEUED)
-#define REQ_ELVPRIV    (1 << __REQ_ELVPRIV)
-#define REQ_PC         (1 << __REQ_PC)
-#define REQ_BLOCK_PC   (1 << __REQ_BLOCK_PC)
-#define REQ_SENSE      (1 << __REQ_SENSE)
-#define REQ_FAILED     (1 << __REQ_FAILED)
-#define REQ_QUIET      (1 << __REQ_QUIET)
-#define REQ_SPECIAL    (1 << __REQ_SPECIAL)
-#define REQ_DRIVE_CMD  (1 << __REQ_DRIVE_CMD)
-#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
-#define REQ_DRIVE_TASKFILE     (1 << __REQ_DRIVE_TASKFILE)
-#define REQ_PREEMPT    (1 << __REQ_PREEMPT)
-#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
-#define REQ_PM_RESUME  (1 << __REQ_PM_RESUME)
-#define REQ_PM_SHUTDOWN        (1 << __REQ_PM_SHUTDOWN)
-#define REQ_ORDERED_COLOR      (1 << __REQ_ORDERED_COLOR)
-#define REQ_RW_SYNC    (1 << __REQ_RW_SYNC)
-
-/*
- * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
+ * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
  * requests. Some step values could eventually be made generic.
  */
 struct request_pm_state
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_flushing(q)  ((q)->ordseq)
 
-#define blk_fs_request(rq)     ((rq)->flags & REQ_CMD)
-#define blk_pc_request(rq)     ((rq)->flags & REQ_BLOCK_PC)
-#define blk_noretry_request(rq)        ((rq)->flags & REQ_FAILFAST)
-#define blk_rq_started(rq)     ((rq)->flags & REQ_STARTED)
+#define blk_fs_request(rq)     ((rq)->cmd_type == REQ_TYPE_FS)
+#define blk_pc_request(rq)     ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
+#define blk_special_request(rq)        ((rq)->cmd_type == REQ_TYPE_SPECIAL)
+#define blk_sense_request(rq)  ((rq)->cmd_type == REQ_TYPE_SENSE)
+
+#define blk_noretry_request(rq)        ((rq)->cmd_flags & REQ_FAILFAST)
+#define blk_rq_started(rq)     ((rq)->cmd_flags & REQ_STARTED)
 
 #define blk_account_rq(rq)     (blk_rq_started(rq) && blk_fs_request(rq))
 
-#define blk_pm_suspend_request(rq)     ((rq)->flags & REQ_PM_SUSPEND)
-#define blk_pm_resume_request(rq)      ((rq)->flags & REQ_PM_RESUME)
+#define blk_pm_suspend_request(rq)     ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
+#define blk_pm_resume_request(rq)      ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
 #define blk_pm_request(rq)     \
-       ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
+       (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
 
-#define blk_sorted_rq(rq)      ((rq)->flags & REQ_SORTED)
-#define blk_barrier_rq(rq)     ((rq)->flags & REQ_HARDBARRIER)
-#define blk_fua_rq(rq)         ((rq)->flags & REQ_FUA)
+#define blk_sorted_rq(rq)      ((rq)->cmd_flags & REQ_SORTED)
+#define blk_barrier_rq(rq)     ((rq)->cmd_flags & REQ_HARDBARRIER)
+#define blk_fua_rq(rq)         ((rq)->cmd_flags & REQ_FUA)
 
 #define list_entry_rq(ptr)     list_entry((ptr), struct request, queuelist)
 
-#define rq_data_dir(rq)                ((rq)->flags & 1)
+#define rq_data_dir(rq)                ((rq)->cmd_flags & 1)
 
 static inline int blk_queue_full(struct request_queue *q, int rw)
 {
 #define RQ_NOMERGE_FLAGS       \
        (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
 #define rq_mergeable(rq)       \
-       (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
+       (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
 
 /*
  * noop, requests are automagically marked as active/inactive by I/O
  */
 #define blk_queue_tag_depth(q)         ((q)->queue_tags->busy)
 #define blk_queue_tag_queue(q)         ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
-#define blk_rq_tagged(rq)              ((rq)->flags & REQ_QUEUED)
+#define blk_rq_tagged(rq)              ((rq)->cmd_flags & REQ_QUEUED)
 extern int blk_queue_start_tag(request_queue_t *, struct request *);
 extern struct request *blk_queue_find_tag(request_queue_t *, int);
 extern void blk_queue_end_tag(request_queue_t *, struct request *);
 
                                    u32 what)
 {
        struct blk_trace *bt = q->blk_trace;
-       int rw = rq->flags & 0x03;
+       int rw = rq->cmd_flags & 0x03;
 
        if (likely(!bt))
                return;
 
        struct scsi_device *sdev = cmd->device;
 
         if (blk_rq_tagged(req)) {
-               if (sdev->ordered_tags && req->flags & REQ_HARDBARRIER)
+               if (sdev->ordered_tags && req->cmd_flags & REQ_HARDBARRIER)
                        *msg++ = MSG_ORDERED_TAG;
                else
                        *msg++ = MSG_SIMPLE_TAG;