3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2005 Douglas Gilbert
12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
21 static int sg_version_num = 30534; /* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.34"
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 * (otherwise the macros compile to empty statements).
31 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/mtio.h>
40 #include <linux/ioctl.h>
41 #include <linux/fcntl.h>
42 #include <linux/init.h>
43 #include <linux/poll.h>
44 #include <linux/smp_lock.h>
45 #include <linux/moduleparam.h>
46 #include <linux/cdev.h>
47 #include <linux/seq_file.h>
48 #include <linux/blkdev.h>
49 #include <linux/delay.h>
50 #include <linux/scatterlist.h>
53 #include <scsi/scsi_dbg.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsi_driver.h>
56 #include <scsi/scsi_ioctl.h>
59 #include "scsi_logging.h"
61 #ifdef CONFIG_SCSI_PROC_FS
62 #include <linux/proc_fs.h>
63 static char *sg_version_date = "20060818";
65 static int sg_proc_init(void);
66 static void sg_proc_cleanup(void);
69 #define SG_ALLOW_DIO_DEF 0
70 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
72 #define SG_MAX_DEVS 32768
75 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
76 * Then when using 32 bit integers x * m may overflow during the calculation.
77 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
78 * calculates the same, but prevents the overflow when both m and d
79 * are "small" numbers (like HZ and USER_HZ).
80 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
83 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
85 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
87 int sg_big_buff = SG_DEF_RESERVED_SIZE;
88 /* N.B. This variable is readable and writeable via
89 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
90 of this size (or less if there is not enough memory) will be reserved
91 for use by this file descriptor. [Deprecated usage: this variable is also
92 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
93 the kernel (i.e. it is not a module).] */
94 static int def_reserved_size = -1; /* picks up init parameter */
95 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
97 #define SG_SECTOR_SZ 512
98 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
100 #define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */
102 static int sg_add(struct class_device *, struct class_interface *);
103 static void sg_remove(struct class_device *, struct class_interface *);
105 static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock
106 file descriptor list for device */
108 static struct class_interface sg_interface = {
113 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
114 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
115 unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
116 unsigned bufflen; /* Size of (aggregate) data buffer */
117 unsigned b_malloc_len; /* actual len malloc'ed in buffer */
118 struct scatterlist *buffer;/* scatter list */
119 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
120 unsigned char cmd_opcode; /* first byte of command */
123 struct sg_device; /* forward declarations */
126 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
127 struct sg_request *nextrp; /* NULL -> tail request (slist) */
128 struct sg_fd *parentfp; /* NULL -> not in use */
129 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
130 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
131 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
132 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
133 char orphan; /* 1 -> drop on sight, 0 -> normal */
134 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
135 volatile char done; /* 0->before bh, 1->before read, 2->read */
138 typedef struct sg_fd { /* holds the state of a file descriptor */
139 struct sg_fd *nextfp; /* NULL when last opened fd on this device */
140 struct sg_device *parentdp; /* owning device */
141 wait_queue_head_t read_wait; /* queue read until command done */
142 rwlock_t rq_list_lock; /* protect access to list in req_arr */
143 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
144 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
145 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
146 unsigned save_scat_len; /* original length of trunc. scat. element */
147 Sg_request *headrp; /* head of request slist, NULL->empty */
148 struct fasync_struct *async_qp; /* used by asynchronous notification */
149 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
150 char low_dma; /* as in parent but possibly overridden to 1 */
151 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
152 volatile char closed; /* 1 -> fd closed but request(s) outstanding */
153 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
154 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
155 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
156 char mmap_called; /* 0 -> mmap() never called on this fd */
159 typedef struct sg_device { /* holds the state of each scsi generic device */
160 struct scsi_device *device;
161 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
162 int sg_tablesize; /* adapter's max scatter-gather table size */
163 Sg_fd *headfp; /* first open fd belonging to this device */
164 volatile char detached; /* 0->attached, 1->detached pending removal */
165 volatile char exclude; /* opened for exclusive access */
166 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
167 struct gendisk *disk;
168 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
171 static int sg_fasync(int fd, struct file *filp, int mode);
172 /* tasklet or soft irq callback */
173 static void sg_cmd_done(void *data, char *sense, int result, int resid);
174 static int sg_start_req(Sg_request * srp);
175 static void sg_finish_rem_req(Sg_request * srp);
176 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
177 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
179 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
181 static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
182 int blocking, int read_only, Sg_request ** o_srp);
183 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
184 unsigned char *cmnd, int timeout, int blocking);
185 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
186 int wr_xf, int *countp, unsigned char __user **up);
187 static int sg_write_xfer(Sg_request * srp);
188 static int sg_read_xfer(Sg_request * srp);
189 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
190 static void sg_remove_scat(Sg_scatter_hold * schp);
191 static void sg_build_reserve(Sg_fd * sfp, int req_size);
192 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
193 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
194 static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
195 static void sg_page_free(struct page *page, int size);
196 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
197 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
198 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
199 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
200 static Sg_request *sg_add_request(Sg_fd * sfp);
201 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
202 static int sg_res_in_use(Sg_fd * sfp);
203 static int sg_allow_access(unsigned char opcode, char dev_type);
204 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
205 static Sg_device *sg_get_dev(int dev);
206 #ifdef CONFIG_SCSI_PROC_FS
207 static int sg_last_dev(void);
210 static Sg_device **sg_dev_arr = NULL;
211 static int sg_dev_max;
212 static int sg_nr_dev;
214 #define SZ_SG_HEADER sizeof(struct sg_header)
215 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
216 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
217 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
220 sg_open(struct inode *inode, struct file *filp)
222 int dev = iminor(inode);
223 int flags = filp->f_flags;
224 struct request_queue *q;
230 nonseekable_open(inode, filp);
231 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
232 sdp = sg_get_dev(dev);
233 if ((!sdp) || (!sdp->device))
238 /* This driver's module count bumped by fops_get in <linux/fs.h> */
239 /* Prevent the device driver from vanishing while we sleep */
240 retval = scsi_device_get(sdp->device);
244 if (!((flags & O_NONBLOCK) ||
245 scsi_block_when_processing_errors(sdp->device))) {
247 /* we are in error recovery for this device */
251 if (flags & O_EXCL) {
252 if (O_RDONLY == (flags & O_ACCMODE)) {
253 retval = -EPERM; /* Can't lock it with read only access */
256 if (sdp->headfp && (flags & O_NONBLOCK)) {
261 __wait_event_interruptible(sdp->o_excl_wait,
262 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
264 retval = res; /* -ERESTARTSYS because signal hit process */
267 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
268 if (flags & O_NONBLOCK) {
273 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
276 retval = res; /* -ERESTARTSYS because signal hit process */
284 if (!sdp->headfp) { /* no existing opens on this device */
286 q = sdp->device->request_queue;
287 sdp->sg_tablesize = min(q->max_hw_segments,
288 q->max_phys_segments);
290 if ((sfp = sg_add_sfp(sdp, dev)))
291 filp->private_data = sfp;
294 sdp->exclude = 0; /* undo if error */
301 scsi_device_put(sdp->device);
305 /* Following function was formerly called 'sg_close' */
307 sg_release(struct inode *inode, struct file *filp)
312 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
314 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
315 sg_fasync(-1, filp, 0); /* remove filp from async notification list */
316 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
317 if (!sdp->detached) {
318 scsi_device_put(sdp->device);
321 wake_up_interruptible(&sdp->o_excl_wait);
327 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
332 int req_pack_id = -1;
334 struct sg_header *old_hdr = NULL;
337 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
339 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
340 sdp->disk->disk_name, (int) count));
342 if (!access_ok(VERIFY_WRITE, buf, count))
344 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
345 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
348 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
352 if (old_hdr->reply_len < 0) {
353 if (count >= SZ_SG_IO_HDR) {
354 sg_io_hdr_t *new_hdr;
355 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
360 retval =__copy_from_user
361 (new_hdr, buf, SZ_SG_IO_HDR);
362 req_pack_id = new_hdr->pack_id;
370 req_pack_id = old_hdr->pack_id;
372 srp = sg_get_rq_mark(sfp, req_pack_id);
373 if (!srp) { /* now wait on packet to arrive */
378 if (filp->f_flags & O_NONBLOCK) {
383 retval = 0; /* following macro beats race condition */
384 __wait_event_interruptible(sfp->read_wait,
386 (srp = sg_get_rq_mark(sfp, req_pack_id))),
395 /* -ERESTARTSYS as signal hit process */
399 if (srp->header.interface_id != '\0') {
400 retval = sg_new_read(sfp, buf, count, srp);
405 if (old_hdr == NULL) {
406 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
412 memset(old_hdr, 0, SZ_SG_HEADER);
413 old_hdr->reply_len = (int) hp->timeout;
414 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
415 old_hdr->pack_id = hp->pack_id;
416 old_hdr->twelve_byte =
417 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
418 old_hdr->target_status = hp->masked_status;
419 old_hdr->host_status = hp->host_status;
420 old_hdr->driver_status = hp->driver_status;
421 if ((CHECK_CONDITION & hp->masked_status) ||
422 (DRIVER_SENSE & hp->driver_status))
423 memcpy(old_hdr->sense_buffer, srp->sense_b,
424 sizeof (old_hdr->sense_buffer));
425 switch (hp->host_status) {
426 /* This setup of 'result' is for backward compatibility and is best
427 ignored by the user who should use target, host + driver status */
429 case DID_PASSTHROUGH:
436 old_hdr->result = EBUSY;
443 old_hdr->result = EIO;
446 old_hdr->result = (srp->sense_b[0] == 0 &&
447 hp->masked_status == GOOD) ? 0 : EIO;
450 old_hdr->result = EIO;
454 /* Now copy the result back to the user buffer. */
455 if (count >= SZ_SG_HEADER) {
456 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
461 if (count > old_hdr->reply_len)
462 count = old_hdr->reply_len;
463 if (count > SZ_SG_HEADER) {
464 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
470 count = (old_hdr->result == 0) ? 0 : -EIO;
471 sg_finish_rem_req(srp);
479 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
481 sg_io_hdr_t *hp = &srp->header;
485 if (count < SZ_SG_IO_HDR) {
490 if ((hp->mx_sb_len > 0) && hp->sbp) {
491 if ((CHECK_CONDITION & hp->masked_status) ||
492 (DRIVER_SENSE & hp->driver_status)) {
493 int sb_len = SCSI_SENSE_BUFFERSIZE;
494 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
495 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
496 len = (len > sb_len) ? sb_len : len;
497 if (copy_to_user(hp->sbp, srp->sense_b, len)) {
504 if (hp->masked_status || hp->host_status || hp->driver_status)
505 hp->info |= SG_INFO_CHECK;
506 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
510 err = sg_read_xfer(srp);
512 sg_finish_rem_req(srp);
513 return (0 == err) ? count : err;
517 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
519 int mxsize, cmd_size, k;
520 int input_size, blocking;
521 unsigned char opcode;
525 struct sg_header old_hdr;
527 unsigned char cmnd[MAX_COMMAND_SIZE];
529 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
531 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
532 sdp->disk->disk_name, (int) count));
535 if (!((filp->f_flags & O_NONBLOCK) ||
536 scsi_block_when_processing_errors(sdp->device)))
539 if (!access_ok(VERIFY_READ, buf, count))
540 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
541 if (count < SZ_SG_HEADER)
543 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
545 blocking = !(filp->f_flags & O_NONBLOCK);
546 if (old_hdr.reply_len < 0)
547 return sg_new_write(sfp, buf, count, blocking, 0, NULL);
548 if (count < (SZ_SG_HEADER + 6))
549 return -EIO; /* The minimum scsi command length is 6 bytes. */
551 if (!(srp = sg_add_request(sfp))) {
552 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
556 __get_user(opcode, buf);
557 if (sfp->next_cmd_len > 0) {
558 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
559 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
560 sfp->next_cmd_len = 0;
561 sg_remove_request(sfp, srp);
564 cmd_size = sfp->next_cmd_len;
565 sfp->next_cmd_len = 0; /* reset so only this write() effected */
567 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
568 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
571 SCSI_LOG_TIMEOUT(4, printk(
572 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
573 /* Determine buffer size. */
574 input_size = count - cmd_size;
575 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
576 mxsize -= SZ_SG_HEADER;
577 input_size -= SZ_SG_HEADER;
578 if (input_size < 0) {
579 sg_remove_request(sfp, srp);
580 return -EIO; /* User did not pass enough bytes for this command. */
583 hp->interface_id = '\0'; /* indicator of old interface tunnelled */
584 hp->cmd_len = (unsigned char) cmd_size;
588 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
589 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
591 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
592 hp->dxfer_len = mxsize;
593 hp->dxferp = (char __user *)buf + cmd_size;
595 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
596 hp->flags = input_size; /* structure abuse ... */
597 hp->pack_id = old_hdr.pack_id;
599 if (__copy_from_user(cmnd, buf, cmd_size))
602 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
603 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
604 * is a non-zero input_size, so emit a warning.
606 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)
607 if (printk_ratelimit())
609 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
610 "guessing data in;\n" KERN_WARNING " "
611 "program %s not setting count and/or reply_len properly\n",
612 old_hdr.reply_len - (int)SZ_SG_HEADER,
613 input_size, (unsigned int) cmnd[0],
615 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
616 return (k < 0) ? k : count;
620 sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
621 int blocking, int read_only, Sg_request ** o_srp)
626 unsigned char cmnd[MAX_COMMAND_SIZE];
628 unsigned long ul_timeout;
630 if (count < SZ_SG_IO_HDR)
632 if (!access_ok(VERIFY_READ, buf, count))
633 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
635 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
636 if (!(srp = sg_add_request(sfp))) {
637 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
641 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
642 sg_remove_request(sfp, srp);
645 if (hp->interface_id != 'S') {
646 sg_remove_request(sfp, srp);
649 if (hp->flags & SG_FLAG_MMAP_IO) {
650 if (hp->dxfer_len > sfp->reserve.bufflen) {
651 sg_remove_request(sfp, srp);
652 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
654 if (hp->flags & SG_FLAG_DIRECT_IO) {
655 sg_remove_request(sfp, srp);
656 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
658 if (sg_res_in_use(sfp)) {
659 sg_remove_request(sfp, srp);
660 return -EBUSY; /* reserve buffer already being used */
663 ul_timeout = msecs_to_jiffies(srp->header.timeout);
664 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
665 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
666 sg_remove_request(sfp, srp);
669 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
670 sg_remove_request(sfp, srp);
671 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
673 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
674 sg_remove_request(sfp, srp);
678 (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
679 sg_remove_request(sfp, srp);
682 k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
691 sg_common_write(Sg_fd * sfp, Sg_request * srp,
692 unsigned char *cmnd, int timeout, int blocking)
695 Sg_device *sdp = sfp->parentdp;
696 sg_io_hdr_t *hp = &srp->header;
698 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
700 hp->masked_status = 0;
704 hp->driver_status = 0;
706 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
707 (int) cmnd[0], (int) hp->cmd_len));
709 if ((k = sg_start_req(srp))) {
710 SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k));
711 sg_finish_rem_req(srp);
712 return k; /* probably out of space --> ENOMEM */
714 if ((k = sg_write_xfer(srp))) {
715 SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
716 sg_finish_rem_req(srp);
720 sg_finish_rem_req(srp);
724 switch (hp->dxfer_direction) {
725 case SG_DXFER_TO_FROM_DEV:
726 case SG_DXFER_FROM_DEV:
727 data_dir = DMA_FROM_DEVICE;
729 case SG_DXFER_TO_DEV:
730 data_dir = DMA_TO_DEVICE;
732 case SG_DXFER_UNKNOWN:
733 data_dir = DMA_BIDIRECTIONAL;
739 hp->duration = jiffies_to_msecs(jiffies);
740 /* Now send everything of to mid-level. The next time we hear about this
741 packet is when sg_cmd_done() is called (i.e. a callback). */
742 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
743 hp->dxfer_len, srp->data.k_use_sg, timeout,
744 SG_DEFAULT_RETRIES, srp, sg_cmd_done,
746 SCSI_LOG_TIMEOUT(1, printk("sg_write: scsi_execute_async failed\n"));
748 * most likely out of mem, but could also be a bad map
750 sg_finish_rem_req(srp);
757 sg_srp_done(Sg_request *srp, Sg_fd *sfp)
759 unsigned long iflags;
762 read_lock_irqsave(&sfp->rq_list_lock, iflags);
764 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
769 sg_ioctl(struct inode *inode, struct file *filp,
770 unsigned int cmd_in, unsigned long arg)
772 void __user *p = (void __user *)arg;
774 int result, val, read_only;
778 unsigned long iflags;
780 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
782 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
783 sdp->disk->disk_name, (int) cmd_in));
784 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
789 int blocking = 1; /* ignore O_NONBLOCK flag */
793 if (!scsi_block_when_processing_errors(sdp->device))
795 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
798 sg_new_write(sfp, p, SZ_SG_IO_HDR,
799 blocking, read_only, &srp);
802 srp->sg_io_owned = 1;
804 result = 0; /* following macro to beat race condition */
805 __wait_event_interruptible(sfp->read_wait,
806 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
811 return 0; /* request packet dropped already */
815 return result; /* -ERESTARTSYS because signal hit process */
817 write_lock_irqsave(&sfp->rq_list_lock, iflags);
819 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
820 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
821 return (result < 0) ? result : 0;
824 result = get_user(val, ip);
829 if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
830 val = MULDIV (INT_MAX, USER_HZ, HZ);
831 sfp->timeout_user = val;
832 sfp->timeout = MULDIV (val, HZ, USER_HZ);
835 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
836 /* strange ..., for backward compatibility */
837 return sfp->timeout_user;
838 case SG_SET_FORCE_LOW_DMA:
839 result = get_user(val, ip);
844 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
845 val = (int) sfp->reserve.bufflen;
846 sg_remove_scat(&sfp->reserve);
847 sg_build_reserve(sfp, val);
852 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
856 return put_user((int) sfp->low_dma, ip);
858 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
861 sg_scsi_id_t __user *sg_idp = p;
865 __put_user((int) sdp->device->host->host_no,
867 __put_user((int) sdp->device->channel,
869 __put_user((int) sdp->device->id, &sg_idp->scsi_id);
870 __put_user((int) sdp->device->lun, &sg_idp->lun);
871 __put_user((int) sdp->device->type, &sg_idp->scsi_type);
872 __put_user((short) sdp->device->host->cmd_per_lun,
873 &sg_idp->h_cmd_per_lun);
874 __put_user((short) sdp->device->queue_depth,
875 &sg_idp->d_queue_depth);
876 __put_user(0, &sg_idp->unused[0]);
877 __put_user(0, &sg_idp->unused[1]);
880 case SG_SET_FORCE_PACK_ID:
881 result = get_user(val, ip);
884 sfp->force_packid = val ? 1 : 0;
887 if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
889 read_lock_irqsave(&sfp->rq_list_lock, iflags);
890 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
891 if ((1 == srp->done) && (!srp->sg_io_owned)) {
892 read_unlock_irqrestore(&sfp->rq_list_lock,
894 __put_user(srp->header.pack_id, ip);
898 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
901 case SG_GET_NUM_WAITING:
902 read_lock_irqsave(&sfp->rq_list_lock, iflags);
903 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
904 if ((1 == srp->done) && (!srp->sg_io_owned))
907 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
908 return put_user(val, ip);
909 case SG_GET_SG_TABLESIZE:
910 return put_user(sdp->sg_tablesize, ip);
911 case SG_SET_RESERVED_SIZE:
912 result = get_user(val, ip);
917 if (val != sfp->reserve.bufflen) {
918 if (sg_res_in_use(sfp) || sfp->mmap_called)
920 sg_remove_scat(&sfp->reserve);
921 sg_build_reserve(sfp, val);
924 case SG_GET_RESERVED_SIZE:
925 val = (int) sfp->reserve.bufflen;
926 return put_user(val, ip);
927 case SG_SET_COMMAND_Q:
928 result = get_user(val, ip);
931 sfp->cmd_q = val ? 1 : 0;
933 case SG_GET_COMMAND_Q:
934 return put_user((int) sfp->cmd_q, ip);
935 case SG_SET_KEEP_ORPHAN:
936 result = get_user(val, ip);
939 sfp->keep_orphan = val;
941 case SG_GET_KEEP_ORPHAN:
942 return put_user((int) sfp->keep_orphan, ip);
943 case SG_NEXT_CMD_LEN:
944 result = get_user(val, ip);
947 sfp->next_cmd_len = (val > 0) ? val : 0;
949 case SG_GET_VERSION_NUM:
950 return put_user(sg_version_num, ip);
951 case SG_GET_ACCESS_COUNT:
952 /* faked - we don't have a real access count anymore */
953 val = (sdp->device ? 1 : 0);
954 return put_user(val, ip);
955 case SG_GET_REQUEST_TABLE:
956 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
959 sg_req_info_t *rinfo;
962 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
966 read_lock_irqsave(&sfp->rq_list_lock, iflags);
967 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
968 ++val, srp = srp ? srp->nextrp : srp) {
969 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
971 rinfo[val].req_state = srp->done + 1;
973 srp->header.masked_status &
974 srp->header.host_status &
975 srp->header.driver_status;
977 rinfo[val].duration =
978 srp->header.duration;
980 ms = jiffies_to_msecs(jiffies);
981 rinfo[val].duration =
982 (ms > srp->header.duration) ?
983 (ms - srp->header.duration) : 0;
985 rinfo[val].orphan = srp->orphan;
986 rinfo[val].sg_io_owned =
994 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
995 result = __copy_to_user(p, rinfo,
996 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
997 result = result ? -EFAULT : 0;
1001 case SG_EMULATED_HOST:
1004 return put_user(sdp->device->host->hostt->emulated, ip);
1008 if (filp->f_flags & O_NONBLOCK) {
1009 if (scsi_host_in_recovery(sdp->device->host))
1011 } else if (!scsi_block_when_processing_errors(sdp->device))
1013 result = get_user(val, ip);
1016 if (SG_SCSI_RESET_NOTHING == val)
1019 case SG_SCSI_RESET_DEVICE:
1020 val = SCSI_TRY_RESET_DEVICE;
1022 case SG_SCSI_RESET_BUS:
1023 val = SCSI_TRY_RESET_BUS;
1025 case SG_SCSI_RESET_HOST:
1026 val = SCSI_TRY_RESET_HOST;
1031 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1033 return (scsi_reset_provider(sdp->device, val) ==
1034 SUCCESS) ? 0 : -EIO;
1035 case SCSI_IOCTL_SEND_COMMAND:
1039 unsigned char opcode = WRITE_6;
1040 Scsi_Ioctl_Command __user *siocp = p;
1042 if (copy_from_user(&opcode, siocp->data, 1))
1044 if (!sg_allow_access(opcode, sdp->device->type))
1047 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
1049 result = get_user(val, ip);
1052 sdp->sgdebug = (char) val;
1054 case SCSI_IOCTL_GET_IDLUN:
1055 case SCSI_IOCTL_GET_BUS_NUMBER:
1056 case SCSI_IOCTL_PROBE_HOST:
1057 case SG_GET_TRANSFORM:
1060 return scsi_ioctl(sdp->device, cmd_in, p);
1063 return -EPERM; /* don't know so take safe approach */
1064 return scsi_ioctl(sdp->device, cmd_in, p);
1068 #ifdef CONFIG_COMPAT
1069 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1073 struct scsi_device *sdev;
1075 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1079 if (sdev->host->hostt->compat_ioctl) {
1082 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1087 return -ENOIOCTLCMD;
1092 sg_poll(struct file *filp, poll_table * wait)
1094 unsigned int res = 0;
1099 unsigned long iflags;
1101 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1104 poll_wait(filp, &sfp->read_wait, wait);
1105 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1106 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1107 /* if any read waiting, flag it */
1108 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1109 res = POLLIN | POLLRDNORM;
1112 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1116 else if (!sfp->cmd_q) {
1118 res |= POLLOUT | POLLWRNORM;
1119 } else if (count < SG_MAX_QUEUE)
1120 res |= POLLOUT | POLLWRNORM;
1121 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1122 sdp->disk->disk_name, (int) res));
1127 sg_fasync(int fd, struct file *filp, int mode)
1133 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1135 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1136 sdp->disk->disk_name, mode));
1138 retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
1139 return (retval < 0) ? retval : 0;
1142 static struct page *
1143 sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1146 struct page *page = NOPAGE_SIGBUS;
1147 unsigned long offset, len, sa;
1148 Sg_scatter_hold *rsv_schp;
1149 struct scatterlist *sg;
1152 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1154 rsv_schp = &sfp->reserve;
1155 offset = addr - vma->vm_start;
1156 if (offset >= rsv_schp->bufflen)
1158 SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
1159 offset, rsv_schp->k_use_sg));
1160 sg = rsv_schp->buffer;
1162 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1164 len = vma->vm_end - sa;
1165 len = (len < sg->length) ? len : sg->length;
1167 page = virt_to_page(page_address(sg->page) + offset);
1168 get_page(page); /* increment page count */
1176 *type = VM_FAULT_MINOR;
1180 static struct vm_operations_struct sg_mmap_vm_ops = {
1181 .nopage = sg_vma_nopage,
1185 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1188 unsigned long req_sz, len, sa;
1189 Sg_scatter_hold *rsv_schp;
1191 struct scatterlist *sg;
1193 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1195 req_sz = vma->vm_end - vma->vm_start;
1196 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1197 (void *) vma->vm_start, (int) req_sz));
1199 return -EINVAL; /* want no offset */
1200 rsv_schp = &sfp->reserve;
1201 if (req_sz > rsv_schp->bufflen)
1202 return -ENOMEM; /* cannot map more than reserved buffer */
1205 sg = rsv_schp->buffer;
1206 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1208 len = vma->vm_end - sa;
1209 len = (len < sg->length) ? len : sg->length;
1213 sfp->mmap_called = 1;
1214 vma->vm_flags |= VM_RESERVED;
1215 vma->vm_private_data = sfp;
1216 vma->vm_ops = &sg_mmap_vm_ops;
1220 /* This function is a "bottom half" handler that is called by the
1221 * mid level when a command is completed (or has failed). */
1223 sg_cmd_done(void *data, char *sense, int result, int resid)
1225 Sg_request *srp = data;
1226 Sg_device *sdp = NULL;
1228 unsigned long iflags;
1232 printk(KERN_ERR "sg_cmd_done: NULL request\n");
1235 sfp = srp->parentfp;
1237 sdp = sfp->parentdp;
1238 if ((NULL == sdp) || sdp->detached) {
1239 printk(KERN_INFO "sg_cmd_done: device detached\n");
1244 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1245 sdp->disk->disk_name, srp->header.pack_id, result));
1246 srp->header.resid = resid;
1247 ms = jiffies_to_msecs(jiffies);
1248 srp->header.duration = (ms > srp->header.duration) ?
1249 (ms - srp->header.duration) : 0;
1251 struct scsi_sense_hdr sshdr;
1253 memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1254 srp->header.status = 0xff & result;
1255 srp->header.masked_status = status_byte(result);
1256 srp->header.msg_status = msg_byte(result);
1257 srp->header.host_status = host_byte(result);
1258 srp->header.driver_status = driver_byte(result);
1259 if ((sdp->sgdebug > 0) &&
1260 ((CHECK_CONDITION == srp->header.masked_status) ||
1261 (COMMAND_TERMINATED == srp->header.masked_status)))
1262 __scsi_print_sense("sg_cmd_done", sense,
1263 SCSI_SENSE_BUFFERSIZE);
1265 /* Following if statement is a patch supplied by Eric Youngdale */
1266 if (driver_byte(result) != 0
1267 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1268 && !scsi_sense_is_deferred(&sshdr)
1269 && sshdr.sense_key == UNIT_ATTENTION
1270 && sdp->device->removable) {
1271 /* Detected possible disc change. Set the bit - this */
1272 /* may be used if there are filesystems using this device */
1273 sdp->device->changed = 1;
1276 /* Rely on write phase to clean out srp status values, so no "else" */
1278 if (sfp->closed) { /* whoops this fd already released, cleanup */
1279 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
1280 sg_finish_rem_req(srp);
1282 if (NULL == sfp->headrp) {
1283 SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n"));
1284 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
1285 scsi_device_put(sdp->device);
1289 } else if (srp && srp->orphan) {
1290 if (sfp->keep_orphan)
1291 srp->sg_io_owned = 0;
1293 sg_finish_rem_req(srp);
1298 /* Now wake up any sg_read() that is waiting for this packet. */
1299 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1300 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1302 wake_up_interruptible(&sfp->read_wait);
1303 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1307 static struct file_operations sg_fops = {
1308 .owner = THIS_MODULE,
1313 #ifdef CONFIG_COMPAT
1314 .compat_ioctl = sg_compat_ioctl,
1318 .release = sg_release,
1319 .fasync = sg_fasync,
1322 static struct class *sg_sysfs_class;
1324 static int sg_sysfs_valid = 0;
1326 static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1328 struct request_queue *q = scsidp->request_queue;
1330 unsigned long iflags;
1331 void *old_sg_dev_arr = NULL;
1334 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1336 printk(KERN_WARNING "kmalloc Sg_device failure\n");
1340 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1341 if (unlikely(sg_nr_dev >= sg_dev_max)) { /* try to resize */
1343 int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;
1344 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1346 tmp_da = kzalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);
1347 if (unlikely(!tmp_da))
1350 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1351 memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));
1352 old_sg_dev_arr = sg_dev_arr;
1353 sg_dev_arr = tmp_da;
1354 sg_dev_max = tmp_dev_max;
1357 for (k = 0; k < sg_dev_max; k++)
1360 if (unlikely(k >= SG_MAX_DEVS))
1363 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1364 sprintf(disk->disk_name, "sg%d", k);
1365 disk->first_minor = k;
1367 sdp->device = scsidp;
1368 init_waitqueue_head(&sdp->o_excl_wait);
1369 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
1372 sg_dev_arr[k] = sdp;
1373 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1379 kfree(old_sg_dev_arr);
1383 printk(KERN_WARNING "sg_alloc: device array cannot be resized\n");
1388 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1389 sdev_printk(KERN_WARNING, scsidp,
1390 "Unable to attach sg device type=%d, minor "
1391 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1397 sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1399 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1400 struct gendisk *disk;
1401 Sg_device *sdp = NULL;
1402 struct cdev * cdev = NULL;
1404 unsigned long iflags;
1406 disk = alloc_disk(1);
1408 printk(KERN_WARNING "alloc_disk failed\n");
1411 disk->major = SCSI_GENERIC_MAJOR;
1414 cdev = cdev_alloc();
1416 printk(KERN_WARNING "cdev_alloc failed\n");
1419 cdev->owner = THIS_MODULE;
1420 cdev->ops = &sg_fops;
1422 error = sg_alloc(disk, scsidp);
1424 printk(KERN_WARNING "sg_alloc failed\n");
1428 sdp = sg_dev_arr[k];
1430 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
1435 if (sg_sysfs_valid) {
1436 struct class_device * sg_class_member;
1438 sg_class_member = class_device_create(sg_sysfs_class, NULL,
1439 MKDEV(SCSI_GENERIC_MAJOR, k),
1442 if (IS_ERR(sg_class_member))
1443 printk(KERN_WARNING "sg_add: "
1444 "class_device_create failed\n");
1445 class_set_devdata(sg_class_member, sdp);
1446 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1447 &sg_class_member->kobj, "generic");
1449 printk(KERN_ERR "sg_add: unable to make symlink "
1450 "'generic' back to sg%d\n", k);
1452 printk(KERN_WARNING "sg_add: sg_sys INvalid\n");
1454 sdev_printk(KERN_NOTICE, scsidp,
1455 "Attached scsi generic sg%d type %d\n", k,scsidp->type);
1460 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1461 kfree(sg_dev_arr[k]);
1462 sg_dev_arr[k] = NULL;
1464 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1474 sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
1476 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1477 Sg_device *sdp = NULL;
1478 unsigned long iflags;
1485 if (NULL == sg_dev_arr)
1488 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1489 for (k = 0; k < sg_dev_max; k++) {
1490 sdp = sg_dev_arr[k];
1491 if ((NULL == sdp) || (sdp->device != scsidp))
1492 continue; /* dirty but lowers nesting */
1495 for (sfp = sdp->headfp; sfp; sfp = tsfp) {
1497 for (srp = sfp->headrp; srp; srp = tsrp) {
1499 if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
1500 sg_finish_rem_req(srp);
1503 scsi_device_put(sdp->device);
1504 __sg_remove_sfp(sdp, sfp);
1507 wake_up_interruptible(&sfp->read_wait);
1508 kill_fasync(&sfp->async_qp, SIGPOLL,
1512 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k));
1513 if (NULL == sdp->headfp) {
1514 sg_dev_arr[k] = NULL;
1516 } else { /* nothing active, simple case */
1517 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k));
1518 sg_dev_arr[k] = NULL;
1523 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1526 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1527 class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k));
1528 cdev_del(sdp->cdev);
1530 put_disk(sdp->disk);
1532 if (NULL == sdp->headfp)
1533 kfree((char *) sdp);
1537 msleep(10); /* dirty detach so delay device destruction */
1540 /* Set 'perm' (4th argument) to 0 to disable module_param's definition
1541 * of sysfs parameters (which module_param doesn't yet support).
1542 * Sysfs parameters defined explicitly below.
1544 module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO);
1545 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1547 MODULE_AUTHOR("Douglas Gilbert");
1548 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1549 MODULE_LICENSE("GPL");
1550 MODULE_VERSION(SG_VERSION_STR);
1551 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1553 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1554 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1561 if (def_reserved_size >= 0)
1562 sg_big_buff = def_reserved_size;
1564 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1568 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1569 if ( IS_ERR(sg_sysfs_class) ) {
1570 rc = PTR_ERR(sg_sysfs_class);
1574 rc = scsi_register_interface(&sg_interface);
1576 #ifdef CONFIG_SCSI_PROC_FS
1578 #endif /* CONFIG_SCSI_PROC_FS */
1581 class_destroy(sg_sysfs_class);
1583 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1590 #ifdef CONFIG_SCSI_PROC_FS
1592 #endif /* CONFIG_SCSI_PROC_FS */
1593 scsi_unregister_interface(&sg_interface);
1594 class_destroy(sg_sysfs_class);
1596 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1598 kfree((char *)sg_dev_arr);
1604 sg_start_req(Sg_request * srp)
1607 Sg_fd *sfp = srp->parentfp;
1608 sg_io_hdr_t *hp = &srp->header;
1609 int dxfer_len = (int) hp->dxfer_len;
1610 int dxfer_dir = hp->dxfer_direction;
1611 Sg_scatter_hold *req_schp = &srp->data;
1612 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1614 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1615 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1617 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
1618 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
1619 (!sfp->parentdp->device->host->unchecked_isa_dma)) {
1620 res = sg_build_direct(srp, sfp, dxfer_len);
1621 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
1624 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
1625 sg_link_reserve(sfp, srp, dxfer_len);
1627 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1629 sg_remove_scat(req_schp);
1637 sg_finish_rem_req(Sg_request * srp)
1639 Sg_fd *sfp = srp->parentfp;
1640 Sg_scatter_hold *req_schp = &srp->data;
1642 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1644 sg_unlink_reserve(sfp, srp);
1646 sg_remove_scat(req_schp);
1647 sg_remove_request(sfp, srp);
1651 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1653 int sg_bufflen = tablesize * sizeof(struct scatterlist);
1654 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1657 * TODO: test without low_dma, we should not need it since
1658 * the block layer will bounce the buffer for us
1660 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1663 gfp_flags |= GFP_DMA;
1664 schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1667 schp->sglist_len = sg_bufflen;
1668 return tablesize; /* number of scat_gath elements allocated */
1671 #ifdef SG_ALLOW_DIO_CODE
1672 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1673 /* TODO: hopefully we can use the generic block layer code */
1675 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1676 - mapping of all pages not successful
1677 (i.e., either completely successful or fails)
1680 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1681 unsigned long uaddr, size_t count, int rw)
1683 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1684 unsigned long start = uaddr >> PAGE_SHIFT;
1685 const int nr_pages = end - start;
1687 struct page **pages;
1689 /* User attempted Overflow! */
1690 if ((uaddr + count) < uaddr)
1694 if (nr_pages > max_pages)
1701 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1704 /* Try to fault in all of the necessary pages */
1705 down_read(¤t->mm->mmap_sem);
1706 /* rw==READ means read from drive, write into memory area */
1707 res = get_user_pages(
1713 0, /* don't force */
1716 up_read(¤t->mm->mmap_sem);
1718 /* Errors and no page mapped should return here */
1722 for (i=0; i < nr_pages; i++) {
1723 /* FIXME: flush superflous for rw==READ,
1724 * probably wrong function for rw==WRITE
1726 flush_dcache_page(pages[i]);
1727 /* ?? Is locking needed? I don't think so */
1728 /* if (TestSetPageLocked(pages[i]))
1732 sgl[0].page = pages[0];
1733 sgl[0].offset = uaddr & ~PAGE_MASK;
1735 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1736 count -= sgl[0].length;
1737 for (i=1; i < nr_pages ; i++) {
1738 sgl[i].page = pages[i];
1739 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
1744 sgl[0].length = count;
1752 for (j=0; j < res; j++)
1753 page_cache_release(pages[j]);
1761 /* And unmap them... */
1763 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1768 for (i=0; i < nr_pages; i++) {
1769 struct page *page = sgl[i].page;
1773 /* unlock_page(page); */
1774 /* FIXME: cache flush missing for rw==READ
1775 * FIXME: call the correct reference counting function
1777 page_cache_release(page);
1783 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1787 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1789 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1791 #ifdef SG_ALLOW_DIO_CODE
1792 sg_io_hdr_t *hp = &srp->header;
1793 Sg_scatter_hold *schp = &srp->data;
1794 int sg_tablesize = sfp->parentdp->sg_tablesize;
1795 int mx_sc_elems, res;
1796 struct scsi_device *sdev = sfp->parentdp->device;
1798 if (((unsigned long)hp->dxferp &
1799 queue_dma_alignment(sdev->request_queue)) != 0)
1802 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1803 if (mx_sc_elems <= 0) {
1806 res = st_map_user_pages(schp->buffer, mx_sc_elems,
1807 (unsigned long)hp->dxferp, dxfer_len,
1808 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1810 sg_remove_scat(schp);
1813 schp->k_use_sg = res;
1814 schp->dio_in_use = 1;
1815 hp->info |= SG_INFO_DIRECT_IO;
1823 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1825 struct scatterlist *sg;
1826 int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1827 int sg_tablesize = sfp->parentdp->sg_tablesize;
1828 int blk_size = buff_size;
1829 struct page *p = NULL;
1831 if ((blk_size < 0) || (!sfp))
1834 ++blk_size; /* don't know why */
1835 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1836 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1837 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1838 buff_size, blk_size));
1840 /* N.B. ret_sz carried into this block ... */
1841 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1842 if (mx_sc_elems < 0)
1843 return mx_sc_elems; /* most likely -ENOMEM */
1845 for (k = 0, sg = schp->buffer, rem_sz = blk_size;
1846 (rem_sz > 0) && (k < mx_sc_elems);
1847 ++k, rem_sz -= ret_sz, ++sg) {
1849 num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
1850 p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
1855 sg->length = ret_sz;
1857 SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n",
1859 } /* end of for loop */
1862 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
1864 schp->bufflen = blk_size;
1865 if (rem_sz > 0) /* must have failed */
1872 sg_write_xfer(Sg_request * srp)
1874 sg_io_hdr_t *hp = &srp->header;
1875 Sg_scatter_hold *schp = &srp->data;
1876 struct scatterlist *sg = schp->buffer;
1878 int j, k, onum, usglen, ksglen, res;
1879 int iovec_count = (int) hp->iovec_count;
1880 int dxfer_dir = hp->dxfer_direction;
1882 unsigned char __user *up;
1883 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1885 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1886 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1887 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1888 if (schp->bufflen < num_xfer)
1889 num_xfer = schp->bufflen;
1891 if ((num_xfer <= 0) || (schp->dio_in_use) ||
1893 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1896 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1897 num_xfer, iovec_count, schp->k_use_sg));
1900 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1905 ksglen = sg->length;
1906 p = page_address(sg->page);
1907 for (j = 0, k = 0; j < onum; ++j) {
1908 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1912 for (; p; ++sg, ksglen = sg->length,
1913 p = page_address(sg->page)) {
1916 if (ksglen > usglen) {
1917 if (usglen >= num_xfer) {
1918 if (__copy_from_user(p, up, num_xfer))
1922 if (__copy_from_user(p, up, usglen))
1928 if (ksglen >= num_xfer) {
1929 if (__copy_from_user(p, up, num_xfer))
1933 if (__copy_from_user(p, up, ksglen))
1939 if (k >= schp->k_use_sg)
1948 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
1949 int wr_xf, int *countp, unsigned char __user **up)
1951 int num_xfer = (int) hp->dxfer_len;
1952 unsigned char __user *p = hp->dxferp;
1956 if (wr_xf && ('\0' == hp->interface_id))
1957 count = (int) hp->flags; /* holds "old" input_size */
1962 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
1965 count = (int) iovec.iov_len;
1967 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
1977 sg_remove_scat(Sg_scatter_hold * schp)
1979 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
1980 if (schp->buffer && (schp->sglist_len > 0)) {
1981 struct scatterlist *sg = schp->buffer;
1983 if (schp->dio_in_use) {
1984 #ifdef SG_ALLOW_DIO_CODE
1985 st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
1990 for (k = 0; (k < schp->k_use_sg) && sg->page;
1992 SCSI_LOG_TIMEOUT(5, printk(
1993 "sg_remove_scat: k=%d, a=0x%p, len=%d\n",
1994 k, sg->page, sg->length));
1995 sg_page_free(sg->page, sg->length);
1998 kfree(schp->buffer);
2000 memset(schp, 0, sizeof (*schp));
2004 sg_read_xfer(Sg_request * srp)
2006 sg_io_hdr_t *hp = &srp->header;
2007 Sg_scatter_hold *schp = &srp->data;
2008 struct scatterlist *sg = schp->buffer;
2010 int j, k, onum, usglen, ksglen, res;
2011 int iovec_count = (int) hp->iovec_count;
2012 int dxfer_dir = hp->dxfer_direction;
2014 unsigned char __user *up;
2015 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2017 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2018 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2019 num_xfer = hp->dxfer_len;
2020 if (schp->bufflen < num_xfer)
2021 num_xfer = schp->bufflen;
2023 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2025 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2028 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2029 num_xfer, iovec_count, schp->k_use_sg));
2032 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2037 p = page_address(sg->page);
2038 ksglen = sg->length;
2039 for (j = 0, k = 0; j < onum; ++j) {
2040 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2044 for (; p; ++sg, ksglen = sg->length,
2045 p = page_address(sg->page)) {
2048 if (ksglen > usglen) {
2049 if (usglen >= num_xfer) {
2050 if (__copy_to_user(up, p, num_xfer))
2054 if (__copy_to_user(up, p, usglen))
2060 if (ksglen >= num_xfer) {
2061 if (__copy_to_user(up, p, num_xfer))
2065 if (__copy_to_user(up, p, ksglen))
2071 if (k >= schp->k_use_sg)
2080 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2082 Sg_scatter_hold *schp = &srp->data;
2083 struct scatterlist *sg = schp->buffer;
2086 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2088 if ((!outp) || (num_read_xfer <= 0))
2091 for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) {
2093 if (num > num_read_xfer) {
2094 if (__copy_to_user(outp, page_address(sg->page),
2099 if (__copy_to_user(outp, page_address(sg->page),
2102 num_read_xfer -= num;
2103 if (num_read_xfer <= 0)
2113 sg_build_reserve(Sg_fd * sfp, int req_size)
2115 Sg_scatter_hold *schp = &sfp->reserve;
2117 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
2119 if (req_size < PAGE_SIZE)
2120 req_size = PAGE_SIZE;
2121 if (0 == sg_build_indirect(schp, sfp, req_size))
2124 sg_remove_scat(schp);
2125 req_size >>= 1; /* divide by 2 */
2126 } while (req_size > (PAGE_SIZE / 2));
2130 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2132 Sg_scatter_hold *req_schp = &srp->data;
2133 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2134 struct scatterlist *sg = rsv_schp->buffer;
2138 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2141 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
2144 sfp->save_scat_len = num;
2146 req_schp->k_use_sg = k + 1;
2147 req_schp->sglist_len = rsv_schp->sglist_len;
2148 req_schp->buffer = rsv_schp->buffer;
2150 req_schp->bufflen = size;
2151 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2157 if (k >= rsv_schp->k_use_sg)
2158 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2162 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2164 Sg_scatter_hold *req_schp = &srp->data;
2165 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2167 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2168 (int) req_schp->k_use_sg));
2169 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2170 struct scatterlist *sg = rsv_schp->buffer;
2172 if (sfp->save_scat_len > 0)
2173 (sg + (req_schp->k_use_sg - 1))->length =
2174 (unsigned) sfp->save_scat_len;
2176 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2178 req_schp->k_use_sg = 0;
2179 req_schp->bufflen = 0;
2180 req_schp->buffer = NULL;
2181 req_schp->sglist_len = 0;
2182 sfp->save_scat_len = 0;
2187 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2190 unsigned long iflags;
2192 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2193 for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2194 /* look for requests that are ready + not SG_IO owned */
2195 if ((1 == resp->done) && (!resp->sg_io_owned) &&
2196 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2197 resp->done = 2; /* guard against other readers */
2201 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2205 #ifdef CONFIG_SCSI_PROC_FS
2207 sg_get_nth_request(Sg_fd * sfp, int nth)
2210 unsigned long iflags;
2213 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2214 for (k = 0, resp = sfp->headrp; resp && (k < nth);
2215 ++k, resp = resp->nextrp) ;
2216 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2221 /* always adds to end of list */
2223 sg_add_request(Sg_fd * sfp)
2226 unsigned long iflags;
2228 Sg_request *rp = sfp->req_arr;
2230 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2233 memset(rp, 0, sizeof (Sg_request));
2238 if (0 == sfp->cmd_q)
2239 resp = NULL; /* command queuing disallowed */
2241 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2245 if (k < SG_MAX_QUEUE) {
2246 memset(rp, 0, sizeof (Sg_request));
2248 while (resp->nextrp)
2249 resp = resp->nextrp;
2257 resp->nextrp = NULL;
2258 resp->header.duration = jiffies_to_msecs(jiffies);
2260 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2264 /* Return of 1 for found; 0 for not found */
2266 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2268 Sg_request *prev_rp;
2270 unsigned long iflags;
2273 if ((!sfp) || (!srp) || (!sfp->headrp))
2275 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2276 prev_rp = sfp->headrp;
2277 if (srp == prev_rp) {
2278 sfp->headrp = prev_rp->nextrp;
2279 prev_rp->parentfp = NULL;
2282 while ((rp = prev_rp->nextrp)) {
2284 prev_rp->nextrp = rp->nextrp;
2285 rp->parentfp = NULL;
2292 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2296 #ifdef CONFIG_SCSI_PROC_FS
2298 sg_get_nth_sfp(Sg_device * sdp, int nth)
2301 unsigned long iflags;
2304 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2305 for (k = 0, resp = sdp->headfp; resp && (k < nth);
2306 ++k, resp = resp->nextfp) ;
2307 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2313 sg_add_sfp(Sg_device * sdp, int dev)
2316 unsigned long iflags;
2318 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2322 init_waitqueue_head(&sfp->read_wait);
2323 rwlock_init(&sfp->rq_list_lock);
2325 sfp->timeout = SG_DEFAULT_TIMEOUT;
2326 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2327 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2328 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2329 sdp->device->host->unchecked_isa_dma : 1;
2330 sfp->cmd_q = SG_DEF_COMMAND_Q;
2331 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2332 sfp->parentdp = sdp;
2333 write_lock_irqsave(&sg_dev_arr_lock, iflags);
2336 else { /* add to tail of existing list */
2337 Sg_fd *pfp = sdp->headfp;
2342 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2343 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2344 sg_build_reserve(sfp, sg_big_buff);
2345 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2346 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2351 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2356 prev_fp = sdp->headfp;
2358 sdp->headfp = prev_fp->nextfp;
2360 while ((fp = prev_fp->nextfp)) {
2362 prev_fp->nextfp = fp->nextfp;
2368 if (sfp->reserve.bufflen > 0) {
2370 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2371 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2372 sg_remove_scat(&sfp->reserve);
2374 sfp->parentdp = NULL;
2375 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp));
2379 /* Returns 0 in normal case, 1 when detached and sdp object removed */
2381 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2388 for (srp = sfp->headrp; srp; srp = tsrp) {
2390 if (sg_srp_done(srp, sfp))
2391 sg_finish_rem_req(srp);
2396 unsigned long iflags;
2398 write_lock_irqsave(&sg_dev_arr_lock, iflags);
2399 __sg_remove_sfp(sdp, sfp);
2400 if (sdp->detached && (NULL == sdp->headfp)) {
2404 for (k = 0; k < maxd; ++k) {
2405 if (sdp == sg_dev_arr[k])
2409 sg_dev_arr[k] = NULL;
2410 kfree((char *) sdp);
2413 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2415 /* MOD_INC's to inhibit unloading sg and associated adapter driver */
2416 /* only bump the access_count if we actually succeeded in
2417 * throwing another counter on the host module */
2418 scsi_device_get(sdp->device); /* XXX: retval ignored? */
2419 sfp->closed = 1; /* flag dirty state on this fd */
2420 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2427 sg_res_in_use(Sg_fd * sfp)
2429 const Sg_request *srp;
2430 unsigned long iflags;
2432 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2433 for (srp = sfp->headrp; srp; srp = srp->nextrp)
2436 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2440 /* If retSzp==NULL want exact size or fail */
2441 static struct page *
2442 sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2444 struct page *resp = NULL;
2453 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2455 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2457 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2458 order++, a_size <<= 1) ;
2459 resp = alloc_pages(page_mask, order);
2460 while ((!resp) && order && retSzp) {
2462 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2463 resp = alloc_pages(page_mask, order); /* try half */
2467 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2468 memset(page_address(resp), 0, resSz);
2476 sg_page_free(struct page *page, int size)
2482 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2483 order++, a_size <<= 1) ;
2484 __free_pages(page, order);
2487 #ifndef MAINTENANCE_IN_CMD
2488 #define MAINTENANCE_IN_CMD 0xa3
2491 static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
2492 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2493 READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
2494 SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
2498 sg_allow_access(unsigned char opcode, char dev_type)
2502 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
2504 for (k = 0; k < sizeof (allow_ops); ++k) {
2505 if (opcode == allow_ops[k])
2511 #ifdef CONFIG_SCSI_PROC_FS
2516 unsigned long iflags;
2518 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2519 for (k = sg_dev_max - 1; k >= 0; --k)
2520 if (sg_dev_arr[k] && sg_dev_arr[k]->device)
2522 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2523 return k + 1; /* origin 1 */
2530 Sg_device *sdp = NULL;
2531 unsigned long iflags;
2533 if (sg_dev_arr && (dev >= 0)) {
2534 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2535 if (dev < sg_dev_max)
2536 sdp = sg_dev_arr[dev];
2537 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2542 #ifdef CONFIG_SCSI_PROC_FS
2544 static struct proc_dir_entry *sg_proc_sgp = NULL;
2546 static char sg_proc_sg_dirname[] = "scsi/sg";
2548 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2550 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2551 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2552 size_t count, loff_t *off);
2553 static struct file_operations adio_fops = {
2554 /* .owner, .read and .llseek added in sg_proc_init() */
2555 .open = sg_proc_single_open_adio,
2556 .write = sg_proc_write_adio,
2557 .release = single_release,
2560 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2561 static ssize_t sg_proc_write_dressz(struct file *filp,
2562 const char __user *buffer, size_t count, loff_t *off);
2563 static struct file_operations dressz_fops = {
2564 .open = sg_proc_single_open_dressz,
2565 .write = sg_proc_write_dressz,
2566 .release = single_release,
2569 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2570 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2571 static struct file_operations version_fops = {
2572 .open = sg_proc_single_open_version,
2573 .release = single_release,
2576 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2577 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2578 static struct file_operations devhdr_fops = {
2579 .open = sg_proc_single_open_devhdr,
2580 .release = single_release,
2583 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2584 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2585 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2586 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2587 static void dev_seq_stop(struct seq_file *s, void *v);
2588 static struct file_operations dev_fops = {
2589 .open = sg_proc_open_dev,
2590 .release = seq_release,
2592 static struct seq_operations dev_seq_ops = {
2593 .start = dev_seq_start,
2594 .next = dev_seq_next,
2595 .stop = dev_seq_stop,
2596 .show = sg_proc_seq_show_dev,
2599 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2600 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2601 static struct file_operations devstrs_fops = {
2602 .open = sg_proc_open_devstrs,
2603 .release = seq_release,
2605 static struct seq_operations devstrs_seq_ops = {
2606 .start = dev_seq_start,
2607 .next = dev_seq_next,
2608 .stop = dev_seq_stop,
2609 .show = sg_proc_seq_show_devstrs,
2612 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2613 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2614 static struct file_operations debug_fops = {
2615 .open = sg_proc_open_debug,
2616 .release = seq_release,
2618 static struct seq_operations debug_seq_ops = {
2619 .start = dev_seq_start,
2620 .next = dev_seq_next,
2621 .stop = dev_seq_stop,
2622 .show = sg_proc_seq_show_debug,
2626 struct sg_proc_leaf {
2628 struct file_operations * fops;
2631 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2632 {"allow_dio", &adio_fops},
2633 {"debug", &debug_fops},
2634 {"def_reserved_size", &dressz_fops},
2635 {"device_hdr", &devhdr_fops},
2636 {"devices", &dev_fops},
2637 {"device_strs", &devstrs_fops},
2638 {"version", &version_fops}
2645 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2646 struct proc_dir_entry *pdep;
2647 struct sg_proc_leaf * leaf;
2649 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2652 for (k = 0; k < num_leaves; ++k) {
2653 leaf = &sg_proc_leaf_arr[k];
2654 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2655 pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp);
2657 leaf->fops->owner = THIS_MODULE,
2658 leaf->fops->read = seq_read,
2659 leaf->fops->llseek = seq_lseek,
2660 pdep->proc_fops = leaf->fops;
2667 sg_proc_cleanup(void)
2670 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2674 for (k = 0; k < num_leaves; ++k)
2675 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2676 remove_proc_entry(sg_proc_sg_dirname, NULL);
2680 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2682 seq_printf(s, "%d\n", *((int *)s->private));
2686 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2688 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2692 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2693 size_t count, loff_t *off)
2698 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2700 num = (count < 10) ? count : 10;
2701 if (copy_from_user(buff, buffer, num))
2704 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2708 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2710 return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2714 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2715 size_t count, loff_t *off)
2718 unsigned long k = ULONG_MAX;
2721 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2723 num = (count < 10) ? count : 10;
2724 if (copy_from_user(buff, buffer, num))
2727 k = simple_strtoul(buff, NULL, 10);
2728 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2735 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2737 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2742 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2744 return single_open(file, sg_proc_seq_show_version, NULL);
2747 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2749 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2754 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2756 return single_open(file, sg_proc_seq_show_devhdr, NULL);
2759 struct sg_proc_deviter {
2764 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2766 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2772 if (NULL == sg_dev_arr)
2775 it->max = sg_last_dev();
2776 if (it->index >= it->max)
2781 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2783 struct sg_proc_deviter * it = s->private;
2786 return (it->index < it->max) ? it : NULL;
2789 static void dev_seq_stop(struct seq_file *s, void *v)
2794 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2796 return seq_open(file, &dev_seq_ops);
2799 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2801 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2803 struct scsi_device *scsidp;
2805 sdp = it ? sg_get_dev(it->index) : NULL;
2806 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2807 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2808 scsidp->host->host_no, scsidp->channel,
2809 scsidp->id, scsidp->lun, (int) scsidp->type,
2811 (int) scsidp->queue_depth,
2812 (int) scsidp->device_busy,
2813 (int) scsi_device_online(scsidp));
2815 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2819 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2821 return seq_open(file, &devstrs_seq_ops);
2824 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2826 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2828 struct scsi_device *scsidp;
2830 sdp = it ? sg_get_dev(it->index) : NULL;
2831 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2832 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2833 scsidp->vendor, scsidp->model, scsidp->rev);
2835 seq_printf(s, "<no active device>\n");
2839 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2841 int k, m, new_interface, blen, usg;
2844 const sg_io_hdr_t *hp;
2848 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
2849 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
2850 "(res)sgat=%d low_dma=%d\n", k + 1,
2851 jiffies_to_msecs(fp->timeout),
2852 fp->reserve.bufflen,
2853 (int) fp->reserve.k_use_sg,
2855 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2856 (int) fp->cmd_q, (int) fp->force_packid,
2857 (int) fp->keep_orphan, (int) fp->closed);
2858 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
2860 new_interface = (hp->interface_id == '\0') ? 0 : 1;
2861 if (srp->res_used) {
2862 if (new_interface &&
2863 (SG_FLAG_MMAP_IO & hp->flags))
2868 if (SG_INFO_DIRECT_IO_MASK & hp->info)
2874 blen = srp->data.bufflen;
2875 usg = srp->data.k_use_sg;
2876 seq_printf(s, srp->done ?
2877 ((1 == srp->done) ? "rcv:" : "fin:")
2879 seq_printf(s, " id=%d blen=%d",
2880 srp->header.pack_id, blen);
2882 seq_printf(s, " dur=%d", hp->duration);
2884 ms = jiffies_to_msecs(jiffies);
2885 seq_printf(s, " t_o/elap=%d/%d",
2886 (new_interface ? hp->timeout :
2887 jiffies_to_msecs(fp->timeout)),
2888 (ms > hp->duration ? ms - hp->duration : 0));
2890 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2891 (int) srp->data.cmd_opcode);
2894 seq_printf(s, " No requests active\n");
2898 static int sg_proc_open_debug(struct inode *inode, struct file *file)
2900 return seq_open(file, &debug_seq_ops);
2903 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2905 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2908 if (it && (0 == it->index)) {
2909 seq_printf(s, "dev_max(currently)=%d max_active_device=%d "
2910 "(origin 1)\n", sg_dev_max, (int)it->max);
2911 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2913 sdp = it ? sg_get_dev(it->index) : NULL;
2915 struct scsi_device *scsidp = sdp->device;
2917 if (NULL == scsidp) {
2918 seq_printf(s, "device %d detached ??\n",
2923 if (sg_get_nth_sfp(sdp, 0)) {
2924 seq_printf(s, " >>> device=%s ",
2925 sdp->disk->disk_name);
2927 seq_printf(s, "detached pending close ");
2930 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2931 scsidp->host->host_no,
2932 scsidp->channel, scsidp->id,
2934 scsidp->host->hostt->emulated);
2935 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2936 sdp->sg_tablesize, sdp->exclude);
2938 sg_proc_debug_helper(s, sdp);
2943 #endif /* CONFIG_SCSI_PROC_FS */
2945 module_init(init_sg);
2946 module_exit(exit_sg);