1 << QUEUE_FLAG_STACKABLE);
        q->queue_lock           = lock;
 
-       blk_queue_segment_boundary(q, 0xffffffff);
+       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
 
        blk_queue_make_request(q, __make_request);
        blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
 
        q->nr_requests = BLKDEV_MAX_RQ;
        blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
        blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+       blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
+       blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
+
        q->make_request_fn = mfn;
        q->backing_dev_info.ra_pages =
                        (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
        /* zero is "infinity" */
        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+       t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
 
        t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
        t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
 
        if (!rs->max_segment_size)
                rs->max_segment_size = MAX_SEGMENT_SIZE;
        if (!rs->seg_boundary_mask)
-               rs->seg_boundary_mask = -1;
+               rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
        if (!rs->bounce_pfn)
                rs->bounce_pfn = -1;
 }
 
 
 #define MAX_SEGMENT_SIZE       65536
 
+#define BLK_SEG_BOUNDARY_MASK  0xFFFFFFFFUL
+
 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
 
 static inline int queue_hardsect_size(struct request_queue *q)