q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
-       blk_queue_max_sectors(q, MAX_SECTORS);
+       blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
        blk_queue_hardsect_size(q, 512);
        blk_queue_dma_alignment(q, 511);
        blk_queue_congestion_threshold(q);
                printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
        }
 
-       q->max_sectors = q->max_hw_sectors = max_sectors;
+       if (BLK_DEF_MAX_SECTORS > max_sectors)
+               q->max_hw_sectors = q->max_sectors = max_sectors;
+       else {
+               q->max_sectors = BLK_DEF_MAX_SECTORS;
+               q->max_hw_sectors = max_sectors;
+       }
 }
 
 EXPORT_SYMBOL(blk_queue_max_sectors);
 void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
 {
        /* zero is "infinity" */
-       t->max_sectors = t->max_hw_sectors =
-               min_not_zero(t->max_sectors,b->max_sectors);
+       t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+       t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
 
        t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
        t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
 static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
                            struct bio *bio)
 {
+       unsigned short max_sectors;
        int len;
 
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+       if (unlikely(blk_pc_request(req)))
+               max_sectors = q->max_hw_sectors;
+       else
+               max_sectors = q->max_sectors;
+
+       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
                req->flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
 static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
                             struct bio *bio)
 {
+       unsigned short max_sectors;
        int len;
 
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+       if (unlikely(blk_pc_request(req)))
+               max_sectors = q->max_hw_sectors;
+       else
+               max_sectors = q->max_sectors;
+
+
+       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
                req->flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
        struct bio *bio;
        int reading;
 
-       if (len > (q->max_sectors << 9))
+       if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
        if (!len || !ubuf)
                return -EINVAL;
 {
        struct bio *bio;
 
-       if (len > (q->max_sectors << 9))
+       if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
        if (!len || !kbuf)
                return -EINVAL;
 
        if (verify_command(file, cmd))
                return -EPERM;
 
-       if (hdr->dxfer_len > (q->max_sectors << 9))
+       if (hdr->dxfer_len > (q->max_hw_sectors << 9))
                return -EIO;
 
        if (hdr->dxfer_len)
 
 static void check_for_valid_limits(struct io_restrictions *rs)
 {
        if (!rs->max_sectors)
-               rs->max_sectors = MAX_SECTORS;
+               rs->max_sectors = SAFE_MAX_SECTORS;
        if (!rs->max_phys_segments)
                rs->max_phys_segments = MAX_PHYS_SEGMENTS;
        if (!rs->max_hw_segments)
 
        req = blk_get_request(sdev->request_queue, write, gfp);
        if (!req)
                goto free_sense;
+       req->flags |= REQ_BLOCK_PC | REQ_QUIET;
 
        if (use_sg)
                err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
        req->sense_len = 0;
        req->timeout = timeout;
        req->retries = retries;
-       req->flags |= REQ_BLOCK_PC | REQ_QUIET;
        req->end_io_data = sioc;
 
        sioc->data = privdata;
 
 }
 
 static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
-                         *page, unsigned int len, unsigned int offset)
+                         *page, unsigned int len, unsigned int offset,
+                         unsigned short max_sectors)
 {
        int retried_segments = 0;
        struct bio_vec *bvec;
        if (bio->bi_vcnt >= bio->bi_max_vecs)
                return 0;
 
-       if (((bio->bi_size + len) >> 9) > q->max_sectors)
+       if (((bio->bi_size + len) >> 9) > max_sectors)
                return 0;
 
        /*
 int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
                    unsigned int len, unsigned int offset)
 {
-       return __bio_add_page(q, bio, page, len, offset);
+       return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
 }
 
 /**
 int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
                 unsigned int offset)
 {
-       return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page,
-                             len, offset);
+       struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+       return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
 }
 
 struct bio_map_data {
                        break;
                }
 
-               if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
+               if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
                        ret = -EINVAL;
                        break;
                }
                        /*
                         * sorry...
                         */
-                       if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
+                       if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
+                                           bytes)
                                break;
 
                        len -= bytes;
                if (bytes > len)
                        bytes = len;
 
-               if (__bio_add_page(q, bio, virt_to_page(data), bytes,
-                                  offset) < bytes)
+               if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
+                                   offset) < bytes)
                        break;
 
                data += bytes;
 
 
 #define MAX_PHYS_SEGMENTS 128
 #define MAX_HW_SEGMENTS 128
-#define MAX_SECTORS 255
+#define SAFE_MAX_SECTORS 255
+#define BLK_DEF_MAX_SECTORS 1024
 
 #define MAX_SEGMENT_SIZE       65536