summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2008-09-03 18:16:50 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2008-09-03 18:16:50 +1000
commit25a5c8f2f7f248163dd11984ebde095f2c851d6f (patch)
treeb8700702f451b70e06c62cf48d0cb09147926b8a /drivers/scsi
parent8986f5658ecf19461da37cdded77b366463cf612 (diff)
Revert "Merge commit 'block/for-next'"
This reverts commit 305e6d2b61e0279d56488e20dce3d2af85b94ce3.
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/gdth.c60
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/gdth_proc.c66
-rw-r--r--drivers/scsi/gdth_proc.h3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/libiscsi.c17
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c6
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c94
-rw-r--r--drivers/scsi/scsi_error.c90
-rw-r--r--drivers/scsi/scsi_lib.c40
-rw-r--r--drivers/scsi/scsi_priv.h8
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c6
-rw-r--r--drivers/scsi/sd.c91
-rw-r--r--drivers/scsi/sg.c589
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
28 files changed, 757 insertions, 392 deletions
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 8abfd06b5a72..aa4e77c25273 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->id = cpu_to_le32(scmd_id(cmd));
srbcmd->lun = cpu_to_le32(cmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
- timeout = cmd->request->timeout/HZ;
+ timeout = cmd->timeout_per_command/HZ;
if (timeout == 0)
timeout = 1;
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index c387c15a2128..822d5214692b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -464,6 +464,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
/* use request field to save the ptr. to completion struct. */
scp->request = (struct request *)&wait;
+ scp->timeout_per_command = timeout*HZ;
scp->cmd_len = 12;
scp->cmnd = cmnd;
cmndinfo.priority = IOCTL_PRI;
@@ -1994,12 +1995,23 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
register Scsi_Cmnd *pscp;
register Scsi_Cmnd *nscp;
ulong flags;
+ unchar b, t;
TRACE(("gdth_putq() priority %d\n",priority));
spin_lock_irqsave(&ha->smp_lock, flags);
- if (!cmndinfo->internal_command)
+ if (!cmndinfo->internal_command) {
cmndinfo->priority = priority;
+ b = scp->device->channel;
+ t = scp->device->id;
+ if (priority >= DEFAULT_PRI) {
+ if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
+ (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
+ TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
+ cmndinfo->timeout = gdth_update_timeout(scp, 0);
+ }
+ }
+ }
if (ha->req_first==NULL) {
ha->req_first = scp; /* queue was empty */
@@ -3887,39 +3899,6 @@ static const char *gdth_info(struct Scsi_Host *shp)
return ((const char *)ha->binfo.type_string);
}
-static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
-{
- gdth_ha_str *ha = shost_priv(scp->device->host);
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- unchar b, t;
- ulong flags;
- enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
-
- TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
- b = scp->device->channel;
- t = scp->device->id;
-
- /*
- * We don't really honor the command timeout, but we try to
- * honor 6 times of the actual command timeout! So reset the
- * timer if this is less than 6th timeout on this command!
- */
- if (++cmndinfo->timeout_count < 6)
- retval = BLK_EH_RESET_TIMER;
-
- /* Reset the timeout if it is locked IO */
- spin_lock_irqsave(&ha->smp_lock, flags);
- if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
- (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
- TRACE2(("%s(): locked IO, reset timeout\n", __func__));
- retval = BLK_EH_RESET_TIMER;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- return retval;
-}
-
-
static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
@@ -4013,7 +3992,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
BUG_ON(!cmndinfo);
scp->scsi_done = done;
- cmndinfo->timeout_count = 0;
+ gdth_update_timeout(scp, scp->timeout_per_command * 6);
cmndinfo->priority = DEFAULT_PRI;
return __gdth_queuecommand(ha, scp, cmndinfo);
@@ -4117,10 +4096,12 @@ static int ioc_lockdrv(void __user *arg)
ha->hdr[j].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
gdth_wait_completion(ha, ha->bus_cnt, j);
+ gdth_stop_timeout(ha, ha->bus_cnt, j);
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[j].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
+ gdth_start_timeout(ha, ha->bus_cnt, j);
gdth_next(ha);
}
}
@@ -4558,14 +4539,18 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j)
+ for (j = 0; j < ha->tid_cnt; ++j) {
gdth_wait_completion(ha, i, j);
+ gdth_stop_timeout(ha, i, j);
+ }
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j)
+ for (j = 0; j < ha->tid_cnt; ++j) {
+ gdth_start_timeout(ha, i, j);
gdth_next(ha);
+ }
}
}
break;
@@ -4659,7 +4644,6 @@ static struct scsi_host_template gdth_template = {
.slave_configure = gdth_slave_configure,
.bios_param = gdth_bios_param,
.proc_info = gdth_proc_info,
- .eh_timed_out = gdth_timed_out,
.proc_name = "gdth",
.can_queue = GDTH_MAXCMDS,
.this_id = -1,
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 1646444e9bd5..ca92476727cf 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -916,7 +916,7 @@ typedef struct {
gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
dma_addr_t sense_paddr; /* sense dma-addr */
unchar priority;
- int timeout_count; /* # of timeout calls */
+ int timeout;
volatile int wait_for_completion;
ushort status;
ulong32 info;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 59349a316e13..ce0228e26aec 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -748,3 +748,69 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
+
+static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
+{
+ ulong flags;
+ Scsi_Cmnd *scp;
+ unchar b, t;
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+ if (!cmndinfo->internal_command) {
+ b = scp->device->channel;
+ t = scp->device->id;
+ if (t == (unchar)id && b == (unchar)busnum) {
+ TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
+ cmndinfo->timeout = gdth_update_timeout(scp, 0);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+}
+
+static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
+{
+ ulong flags;
+ Scsi_Cmnd *scp;
+ unchar b, t;
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+ if (!cmndinfo->internal_command) {
+ b = scp->device->channel;
+ t = scp->device->id;
+ if (t == (unchar)id && b == (unchar)busnum) {
+ TRACE2(("gdth_start_timeout(): update_timeout()\n"));
+ gdth_update_timeout(scp, cmndinfo->timeout);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+}
+
+static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
+{
+ int oldto;
+
+ oldto = scp->timeout_per_command;
+ scp->timeout_per_command = timeout;
+
+ if (timeout == 0) {
+ del_timer(&scp->eh_timeout);
+ scp->eh_timeout.data = (unsigned long) NULL;
+ scp->eh_timeout.expires = 0;
+ } else {
+ if (scp->eh_timeout.data != (unsigned long) NULL)
+ del_timer(&scp->eh_timeout);
+ scp->eh_timeout.data = (unsigned long) scp;
+ scp->eh_timeout.expires = jiffies + timeout;
+ add_timer(&scp->eh_timeout);
+ }
+
+ return oldto;
+}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 9b900cc9ebe8..45e6fdacf36e 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -20,6 +20,9 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
ulong64 *paddr);
static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
+static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
+static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
+static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
#endif
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 87e09f35d3d4..7b1502c0ab6e 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
- cmnd->request->timeout/HZ);
+ cmnd->timeout_per_command/HZ);
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 740bad435995..8723182dff35 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -544,7 +544,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
pc->scsi_cmd = cmd;
pc->done = done;
- pc->timeout = jiffies + cmd->request->timeout;
+ pc->timeout = jiffies + cmd->timeout_per_command;
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 098739deb02e..973aca9b2862 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3670,8 +3670,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
sdev->no_uld_attach = 1;
}
if (ipr_is_vset_device(res)) {
- blk_queue_rq_timeout(sdev->request_queue,
- IPR_VSET_RW_TIMEOUT);
+ sdev->timeout = IPR_VSET_RW_TIMEOUT;
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
}
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index ef683f0d2b5a..bc9e6ddf41df 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
scb->cmd.dcdb.segment_4G = 0;
scb->cmd.dcdb.enhanced_sg = 0;
- TimeOut = scb->scsi_cmd->request->timeout;
+ TimeOut = scb->scsi_cmd->timeout_per_command;
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
if (!scb->sg_len) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 1eca82420aab..299e075a7b34 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
scsi_queue_work(conn->session->host, &conn->xmitwork);
}
-static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct iscsi_conn *conn;
- enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
+ enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
cls_session = starget_to_session(scsi_target(scmd->device));
session = cls_session->dd_data;
@@ -1494,14 +1494,14 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
- rc = BLK_EH_RESET_TIMER;
+ rc = EH_RESET_TIMER;
goto done;
}
conn = session->leadconn;
if (!conn) {
/* In the middle of shuting down */
- rc = BLK_EH_RESET_TIMER;
+ rc = EH_RESET_TIMER;
goto done;
}
@@ -1513,21 +1513,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
- rc = BLK_EH_RESET_TIMER;
+ rc = EH_RESET_TIMER;
/*
* if we are about to check the transport then give the command
* more time
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
jiffies))
- rc = BLK_EH_RESET_TIMER;
+ rc = EH_RESET_TIMER;
/* if in the middle of checking the transport then give us more time */
if (conn->ping_task)
- rc = BLK_EH_RESET_TIMER;
+ rc = EH_RESET_TIMER;
done:
spin_unlock(&session->lock);
- debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
return rc;
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 837b095ba90d..48ee8c7f5bdd 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
- blk_abort_request(qc->scsicmd->request);
+ scsi_req_abort_cmd(qc->scsicmd);
scsi_schedule_eh(qc->scsicmd->device->host);
return;
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 0001374bd6b2..b4f9368f116a 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
-enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
+enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 744838780ada..a8e3ef309070 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -673,43 +673,43 @@ out:
return;
}
-enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
+enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
unsigned long flags;
if (!task) {
- cmd->request->timeout /= 2;
+ cmd->timeout_per_command /= 2;
SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
- cmd, task, (cmd->request->timeout ?
- "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
- if (!cmd->request->timeout)
- return BLK_EH_NOT_HANDLED;
- return BLK_EH_RESET_TIMER;
+ cmd, task, (cmd->timeout_per_command ?
+ "EH_RESET_TIMER" : "EH_NOT_HANDLED"));
+ if (!cmd->timeout_per_command)
+ return EH_NOT_HANDLED;
+ return EH_RESET_TIMER;
}
spin_lock_irqsave(&task->task_state_lock, flags);
BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
- "BLK_EH_HANDLED\n", cmd, task);
- return BLK_EH_HANDLED;
+ SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
+ cmd, task);
+ return EH_HANDLED;
}
if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
- "BLK_EH_RESET_TIMER\n",
+ "EH_RESET_TIMER\n",
cmd, task);
- return BLK_EH_RESET_TIMER;
+ return EH_RESET_TIMER;
}
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
+ SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
cmd, task);
- return BLK_EH_NOT_HANDLED;
+ return EH_NOT_HANDLED;
}
int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task)
return;
}
- blk_abort_request(sc->request);
+ scsi_req_abort_cmd(sc);
scsi_schedule_eh(sc->device->host);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index afe1de998763..97b763378e7d 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
* cmd has not been completed within the timeout period.
*/
static enum
-blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
{
struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
struct megasas_instance *instance;
@@ -1175,7 +1175,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
if (time_after(jiffies, scmd->jiffies_at_alloc +
(MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
- return BLK_EH_NOT_HANDLED;
+ return EH_NOT_HANDLED;
}
instance = cmd->instance;
@@ -1189,7 +1189,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
spin_unlock_irqrestore(instance->host->host_lock, flags);
}
- return BLK_EH_RESET_TIMER;
+ return EH_RESET_TIMER;
}
/**
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 3b7240e40819..c57c94c0ffd2 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
**
**----------------------------------------------------
*/
- if (np->settle_time && cmd->request->timeout >= HZ) {
- u_long tlimit = jiffies + cmd->request->timeout - HZ;
+ if (np->settle_time && cmd->timeout_per_command >= HZ) {
+ u_long tlimit = jiffies + cmd->timeout_per_command - HZ;
if (time_after(np->settle_time, tlimit))
np->settle_time = tlimit;
}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8e425bf6743d..d0bd273f0f42 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2876,7 +2876,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
@@ -3145,7 +3145,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index de8279ad7d89..88bebb13bc52 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
"dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
- cmd, jiffies, cmd->request->timeout / HZ,
+ cmd, jiffies, cmd->timeout_per_command / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
/* FIXME: wait for hba to go online */
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
"to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
- ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
+ ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
stat = qla4xxx_reset_target(ha, ddb_entry);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2ee7526902a2..ee6be596503d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -291,6 +291,7 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
unsigned long flags;
cmd->device = dev;
+ init_timer(&cmd->eh_timeout);
INIT_LIST_HEAD(&cmd->list);
spin_lock_irqsave(&dev->list_lock, flags);
list_add_tail(&cmd->list, &dev->cmd_list);
@@ -651,19 +652,14 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
unsigned long timeout;
int rtn = 0;
- /*
- * We will use a queued command if possible, otherwise we will
- * emulate the queuing and calling of completion function ourselves.
- */
- atomic_inc(&cmd->device->iorequest_cnt);
-
/* check if the device is still usable */
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
/* in SDEV_DEL we error all commands. DID_NO_CONNECT
* returns an immediate error upwards, and signals
* that the device is no longer present */
cmd->result = DID_NO_CONNECT << 16;
- scsi_done(cmd);
+ atomic_inc(&cmd->device->iorequest_cnt);
+ __scsi_done(cmd);
/* return 0 (because the command has been processed) */
goto out;
}
@@ -676,8 +672,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
* future requests should not occur until the device
* transitions out of the suspend state.
*/
-
- scsi_queue_retry(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
@@ -719,9 +714,21 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
host->resetting = 0;
}
+ /*
+ * AK: unlikely race here: for some reason the timer could
+ * expire before the serial number is set up below.
+ */
+ scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
+
scsi_log_send(cmd);
/*
+ * We will use a queued command if possible, otherwise we will
+ * emulate the queuing and calling of completion function ourselves.
+ */
+ atomic_inc(&cmd->device->iorequest_cnt);
+
+ /*
* Before we queue this command, check if the command
* length exceeds what the host adapter can handle.
*/
@@ -737,12 +744,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(host->host_lock, flags);
- /*
- * AK: unlikely race here: for some reason the timer could
- * expire before the serial number is set up below.
- *
- * TODO: kill serial or move to blk layer
- */
scsi_cmd_get_serial(host, cmd);
if (unlikely(host->shost_state == SHOST_DEL)) {
@@ -753,8 +754,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
spin_unlock_irqrestore(host->host_lock, flags);
if (rtn) {
- scsi_queue_retry(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
- rtn : SCSI_MLQUEUE_HOST_BUSY);
+ if (scsi_delete_timer(cmd)) {
+ atomic_inc(&cmd->device->iodone_cnt);
+ scsi_queue_insert(cmd,
+ (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
+ rtn : SCSI_MLQUEUE_HOST_BUSY);
+ }
SCSI_LOG_MLQUEUE(3,
printk("queuecommand : request rejected\n"));
}
@@ -765,6 +770,24 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
/**
+ * scsi_req_abort_cmd -- Request command recovery for the specified command
+ * @cmd: pointer to the SCSI command of interest
+ *
+ * This function requests that SCSI Core start recovery for the
+ * command by deleting the timer and adding the command to the eh
+ * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
+ * implement their own error recovery MAY ignore the timeout event if
+ * they generated scsi_req_abort_cmd.
+ */
+void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
+{
+ if (!scsi_delete_timer(cmd))
+ return;
+ scsi_times_out(cmd);
+}
+EXPORT_SYMBOL(scsi_req_abort_cmd);
+
+/**
* scsi_done - Enqueue the finished SCSI command into the done queue.
* @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
* ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -779,7 +802,42 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
*/
static void scsi_done(struct scsi_cmnd *cmd)
{
- blk_complete_request(cmd->request);
+ /*
+ * We don't have to worry about this one timing out anymore.
+ * If we are unable to remove the timer, then the command
+ * has already timed out. In which case, we have no choice but to
+ * let the timeout function run, as we have no idea where in fact
+ * that function could really be. It might be on another processor,
+ * etc, etc.
+ */
+ if (!scsi_delete_timer(cmd))
+ return;
+ __scsi_done(cmd);
+}
+
+/* Private entry to scsi_done() to complete a command when the timer
+ * isn't running --- used by scsi_times_out */
+void __scsi_done(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+
+ /*
+ * Set the serial numbers back to zero
+ */
+ cmd->serial_number = 0;
+
+ atomic_inc(&cmd->device->iodone_cnt);
+ if (cmd->result)
+ atomic_inc(&cmd->device->ioerr_cnt);
+
+ BUG_ON(!rq);
+
+ /*
+ * The uptodate/nbytes values don't matter, as we allow partial
+ * completes and thus will check this in the softirq callback
+ */
+ rq->completion_data = cmd;
+ blk_complete_request(rq);
}
/* Move this to a header if it becomes more generally useful */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index fecefa05cb62..39ce3aba1dac 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -112,8 +112,69 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
}
/**
+ * scsi_add_timer - Start timeout timer for a single scsi command.
+ * @scmd: scsi command that is about to start running.
+ * @timeout: amount of time to allow this command to run.
+ * @complete: timeout function to call if timer isn't canceled.
+ *
+ * Notes:
+ * This should be turned into an inline function. Each scsi command
+ * has its own timer, and as it is added to the queue, we set up the
+ * timer. When the command completes, we cancel the timer.
+ */
+void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
+ void (*complete)(struct scsi_cmnd *))
+{
+
+ /*
+ * If the clock was already running for this command, then
+ * first delete the timer. The timer handling code gets rather
+ * confused if we don't do this.
+ */
+ if (scmd->eh_timeout.function)
+ del_timer(&scmd->eh_timeout);
+
+ scmd->eh_timeout.data = (unsigned long)scmd;
+ scmd->eh_timeout.expires = jiffies + timeout;
+ scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
+
+ SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
+ " %d, (%p)\n", __func__,
+ scmd, timeout, complete));
+
+ add_timer(&scmd->eh_timeout);
+}
+
+/**
+ * scsi_delete_timer - Delete/cancel timer for a given function.
+ * @scmd: Cmd that we are canceling timer for
+ *
+ * Notes:
+ * This should be turned into an inline function.
+ *
+ * Return value:
+ * 1 if we were able to detach the timer. 0 if we blew it, and the
+ * timer function has already started to run.
+ */
+int scsi_delete_timer(struct scsi_cmnd *scmd)
+{
+ int rtn;
+
+ rtn = del_timer(&scmd->eh_timeout);
+
+ SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
+ " rtn: %d\n", __func__,
+ scmd, rtn));
+
+ scmd->eh_timeout.data = (unsigned long)NULL;
+ scmd->eh_timeout.function = NULL;
+
+ return rtn;
+}
+
+/**
* scsi_times_out - Timeout function for normal scsi commands.
- * @req: request that is timing out.
+ * @scmd: Cmd that is timing out.
*
* Notes:
* We do not need to lock this. There is the potential for a race
@@ -121,11 +182,9 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
*/
-enum blk_eh_timer_return scsi_times_out(struct request *req)
+void scsi_times_out(struct scsi_cmnd *scmd)
{
- struct scsi_cmnd *scmd = req->special;
- enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
- enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
+ enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
scsi_log_completion(scmd, TIMEOUT_ERROR);
@@ -137,20 +196,22 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
eh_timed_out = NULL;
if (eh_timed_out)
- rtn = eh_timed_out(scmd);
- switch (rtn) {
- case BLK_EH_NOT_HANDLED:
+ switch (eh_timed_out(scmd)) {
+ case EH_HANDLED:
+ __scsi_done(scmd);
+ return;
+ case EH_RESET_TIMER:
+ scsi_add_timer(scmd, scmd->timeout_per_command,
+ scsi_times_out);
+ return;
+ case EH_NOT_HANDLED:
break;
- default:
- return rtn;
}
if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
scmd->result |= DID_TIME_OUT << 16;
- return BLK_EH_HANDLED;
+ __scsi_done(scmd);
}
-
- return BLK_EH_NOT_HANDLED;
}
/**
@@ -1732,6 +1793,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
blk_rq_init(NULL, &req);
scmd->request = &req;
+ memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
scmd->cmnd = req.cmd;
@@ -1742,6 +1804,8 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
scmd->sc_data_direction = DMA_BIDIRECTIONAL;
+ init_timer(&scmd->eh_timeout);
+
spin_lock_irqsave(shost->host_lock, flags);
shost->tmf_in_progress = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a873ffdf2562..ff5d56b3ee4d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -165,29 +165,6 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
}
/**
- * scsi_queue_retry - Try inserting a command in the midlevel queue.
- *
- * @cmd: command that we are adding to queue.
- * @reason: why we are inserting command to queue.
- *
- * Notes: This is very similar to scsi_queue_insert except that we
- * call this function when we don't know if the blk layer timer
- * is active or not. We could implement this either by calling
- * blk_delete_timer and inserting in the midlevel queue if we
- * successfully delete the timer OR setting appropriate result
- * field in the cmd and letting it go through the normal done
- * routines which will retry the command. For now, We call
- * blk_delete_timer!
- */
-void scsi_queue_retry(struct scsi_cmnd *cmd, int reason)
-{
- if (blk_delete_timer(cmd->request)) {
- atomic_inc(&cmd->device->iodone_cnt);
- scsi_queue_insert(cmd, reason);
- }
-}
-
-/**
* scsi_execute - insert request and wait for the result
* @sdev: scsi device
* @cmd: scsi command
@@ -1203,6 +1180,7 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
cmd->transfersize = req->data_len;
cmd->allowed = req->retries;
+ cmd->timeout_per_command = req->timeout;
return BLKPREP_OK;
}
EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
@@ -1437,26 +1415,17 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);
- blk_complete_request(req);
+ __scsi_done(cmd);
}
static void scsi_softirq_done(struct request *rq)
{
- struct scsi_cmnd *cmd = rq->special;
- unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
+ struct scsi_cmnd *cmd = rq->completion_data;
+ unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
int disposition;
INIT_LIST_HEAD(&cmd->eh_entry);
- /*
- * Set the serial numbers back to zero
- */
- cmd->serial_number = 0;
-
- atomic_inc(&cmd->device->iodone_cnt);
- if (cmd->result)
- atomic_inc(&cmd->device->ioerr_cnt);
-
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
@@ -1705,7 +1674,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
- blk_queue_rq_timed_out(q, scsi_times_out);
return q;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 98faf0040517..79f0f7511204 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -4,7 +4,6 @@
#include <linux/device.h>
struct request_queue;
-struct request;
struct scsi_cmnd;
struct scsi_device;
struct scsi_host_template;
@@ -28,6 +27,7 @@ extern void scsi_exit_hosts(void);
extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
+extern void __scsi_done(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd);
void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -49,7 +49,10 @@ extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
/* scsi_error.c */
-extern enum blk_eh_timer_return scsi_times_out(struct request *req);
+extern void scsi_add_timer(struct scsi_cmnd *, int,
+ void (*)(struct scsi_cmnd *));
+extern int scsi_delete_timer(struct scsi_cmnd *);
+extern void scsi_times_out(struct scsi_cmnd *cmd);
extern int scsi_error_handler(void *host);
extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
@@ -64,7 +67,6 @@ int scsi_eh_get_sense(struct list_head *work_q,
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev);
extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
-extern void scsi_queue_retry(struct scsi_cmnd *cmd, int reason);
extern void scsi_next_command(struct scsi_cmnd *cmd);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 7f618ee5ecea..ab3c71869be5 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -560,15 +560,12 @@ sdev_rd_attr (vendor, "%.8s\n");
sdev_rd_attr (model, "%.16s\n");
sdev_rd_attr (rev, "%.4s\n");
-/*
- * TODO: can we make these symlinks to the block layer ones?
- */
static ssize_t
sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev;
sdev = to_scsi_device(dev);
- return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
+ return snprintf (buf, 20, "%d\n", sdev->timeout / HZ);
}
static ssize_t
@@ -579,7 +576,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
int timeout;
sdev = to_scsi_device(dev);
sscanf (buf, "%d\n", &timeout);
- blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
+ sdev->timeout = timeout * HZ;
return count;
}
static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6a..f26299dfc5d5 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
int err;
dprintk("%lx %u\n", uaddr, len);
- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
+ err = blk_rq_map_user(q, rq, (void *)uaddr, len);
if (err) {
/*
* TODO: need to fixup sg_tablesize, max_segment_size,
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 9168883d0dfe..56823fd1fb84 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1950,15 +1950,15 @@ static int fc_vport_match(struct attribute_container *cont,
* Notes:
* This routine assumes no locks are held on entry.
*/
-static enum blk_eh_timer_return
+static enum scsi_eh_timer_return
fc_timed_out(struct scsi_cmnd *scmd)
{
struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
if (rport->port_state == FC_PORTSTATE_BLOCKED)
- return BLK_EH_RESET_TIMER;
+ return EH_RESET_TIMER;
- return BLK_EH_NOT_HANDLED;
+ return EH_NOT_HANDLED;
}
/*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index aaa3b0528a35..2a2bc89aba83 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -86,12 +86,6 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
-#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
-#define SD_MINORS 16
-#else
-#define SD_MINORS 0
-#endif
-
static int sd_revalidate_disk(struct gendisk *);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
@@ -383,6 +377,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
sector_t block = rq->sector;
sector_t threshold;
unsigned int this_count = rq->nr_sectors;
+ unsigned int timeout = sdp->timeout;
int ret;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -583,6 +578,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->transfersize = sdp->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = SD_MAX_RETRIES;
+ SCpnt->timeout_per_command = timeout;
/*
* This indicates that the command is ready from our end to be
@@ -1768,52 +1764,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
}
/**
- * sd_format_disk_name - format disk name
- * @prefix: name prefix - ie. "sd" for SCSI disks
- * @index: index of the disk to format name for
- * @buf: output buffer
- * @buflen: length of the output buffer
- *
- * SCSI disk names starts at sda. The 26th device is sdz and the
- * 27th is sdaa. The last one for two lettered suffix is sdzz
- * which is followed by sdaaa.
- *
- * This is basically 26 base counting with one extra 'nil' entry
- * at the beggining from the second digit on and can be
- * determined using similar method as 26 base conversion with the
- * index shifted -1 after each digit is computed.
- *
- * CONTEXT:
- * Don't care.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
-{
- const int base = 'z' - 'a' + 1;
- char *begin = buf + strlen(prefix);
- char *end = buf + buflen;
- char *p;
- int unit;
-
- p = end - 1;
- *p = '\0';
- unit = base;
- do {
- if (p == begin)
- return -EINVAL;
- *--p = 'a' + (index % unit);
- index = (index / unit) - 1;
- } while (index >= 0);
-
- memmove(begin, p, end - p);
- memcpy(buf, prefix, strlen(prefix));
-
- return 0;
-}
-
-/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
* for each scsi device (not just disks) present.
@@ -1851,7 +1801,7 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- gd = alloc_disk(SD_MINORS);
+ gd = alloc_disk(16);
if (!gd)
goto out_free;
@@ -1865,8 +1815,8 @@ static int sd_probe(struct device *dev)
if (error)
goto out_put;
- error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
- if (error)
+ error = -EBUSY;
+ if (index >= SD_MAX_DISKS)
goto out_free_index;
sdkp->device = sdp;
@@ -1876,12 +1826,11 @@ static int sd_probe(struct device *dev)
sdkp->openers = 0;
sdkp->previous_state = 1;
- if (!sdp->request_queue->rq_timeout) {
+ if (!sdp->timeout) {
if (sdp->type != TYPE_MOD)
- blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
+ sdp->timeout = SD_TIMEOUT;
else
- blk_queue_rq_timeout(sdp->request_queue,
- SD_MOD_TIMEOUT);
+ sdp->timeout = SD_MOD_TIMEOUT;
}
device_initialize(&sdkp->dev);
@@ -1894,12 +1843,24 @@ static int sd_probe(struct device *dev)
get_device(&sdp->sdev_gendev);
- if (index < SD_MAX_DISKS) {
- gd->major = sd_major((index & 0xf0) >> 4);
- gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
- gd->minors = SD_MINORS;
- }
+ gd->major = sd_major((index & 0xf0) >> 4);
+ gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+ gd->minors = 16;
gd->fops = &sd_fops;
+
+ if (index < 26) {
+ sprintf(gd->disk_name, "sd%c", 'a' + index % 26);
+ } else if (index < (26 + 1) * 26) {
+ sprintf(gd->disk_name, "sd%c%c",
+ 'a' + index / 26 - 1,'a' + index % 26);
+ } else {
+ const unsigned int m1 = (index / 26 - 1) / 26 - 1;
+ const unsigned int m2 = (index / 26 - 1) % 26;
+ const unsigned int m3 = index % 26;
+ sprintf(gd->disk_name, "sd%c%c%c",
+ 'a' + m1, 'a' + m2, 'a' + m3);
+ }
+
gd->private_data = &sdkp->driver;
gd->queue = sdkp->device->request_queue;
@@ -1908,7 +1869,7 @@ static int sd_probe(struct device *dev)
blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
gd->driverfs_dev = &sdp->sdev_gendev;
- gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
+ gd->flags = GENHD_FL_DRIVERFS;
if (sdp->removable)
gd->flags |= GENHD_FL_REMOVABLE;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 05fc98833a37..9360e57b0468 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -47,6 +47,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
+#include <linux/scatterlist.h>
#include <linux/blktrace_api.h>
#include <linux/smp_lock.h>
@@ -118,8 +119,7 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
unsigned bufflen; /* Size of (aggregate) data buffer */
unsigned b_malloc_len; /* actual len malloc'ed in buffer */
- struct page **pages;
- int page_order;
+ struct scatterlist *buffer;/* scatter list */
char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
unsigned char cmd_opcode; /* first byte of command */
} Sg_scatter_hold;
@@ -137,8 +137,6 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
char orphan; /* 1 -> drop on sight, 0 -> normal */
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
volatile char done; /* 0->before bh, 1->before read, 2->read */
- struct request *rq;
- struct bio *bio;
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -178,7 +176,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
static int sg_fasync(int fd, struct file *filp, int mode);
/* tasklet or soft irq callback */
static void sg_cmd_done(void *data, char *sense, int result, int resid);
-static int sg_start_req(Sg_request *srp, unsigned char *cmd);
+static int sg_start_req(Sg_request * srp);
static void sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
@@ -190,6 +188,8 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
int read_only, Sg_request **o_srp);
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
+static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
+ int wr_xf, int *countp, unsigned char __user **up);
static int sg_write_xfer(Sg_request * srp);
static int sg_read_xfer(Sg_request * srp);
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
@@ -197,6 +197,8 @@ static void sg_remove_scat(Sg_scatter_hold * schp);
static void sg_build_reserve(Sg_fd * sfp, int req_size);
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
+static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
+static void sg_page_free(struct page *page, int size);
static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
@@ -227,11 +229,6 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
cmd, filp->f_mode & FMODE_WRITE);
}
-static void sg_rq_end_io(struct request *rq, int uptodate)
-{
- sg_cmd_done(rq->end_io_data, rq->sense, rq->errors, rq->data_len);
-}
-
static int
sg_open(struct inode *inode, struct file *filp)
{
@@ -735,8 +732,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- k = sg_start_req(srp, cmnd);
- if (k) {
+ if ((k = sg_start_req(srp))) {
SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
return k; /* probably out of space --> ENOMEM */
@@ -767,11 +763,20 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
break;
}
hp->duration = jiffies_to_msecs(jiffies);
-
- srp->rq->timeout = timeout;
- blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
- srp->rq, 1, sg_rq_end_io);
- return 0;
+/* Now send everything of to mid-level. The next time we hear about this
+ packet is when sg_cmd_done() is called (i.e. a callback). */
+ if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
+ hp->dxfer_len, srp->data.k_use_sg, timeout,
+ SG_DEFAULT_RETRIES, srp, sg_cmd_done,
+ GFP_ATOMIC)) {
+ SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
+ /*
+ * most likely out of mem, but could also be a bad map
+ */
+ sg_finish_rem_req(srp);
+ return -ENOMEM;
+ } else
+ return 0;
}
static int
@@ -1187,7 +1192,8 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Sg_fd *sfp;
unsigned long offset, len, sa;
Sg_scatter_hold *rsv_schp;
- int k, length;
+ struct scatterlist *sg;
+ int k;
if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
return VM_FAULT_SIGBUS;
@@ -1197,14 +1203,15 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
offset, rsv_schp->k_use_sg));
+ sg = rsv_schp->buffer;
sa = vma->vm_start;
- length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
- for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
+ for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
+ ++k, sg = sg_next(sg)) {
len = vma->vm_end - sa;
- len = (len < length) ? len : length;
+ len = (len < sg->length) ? len : sg->length;
if (offset < len) {
- struct page *page = nth_page(rsv_schp->pages[k],
- offset >> PAGE_SHIFT);
+ struct page *page;
+ page = virt_to_page(page_address(sg_page(sg)) + offset);
get_page(page); /* increment page count */
vmf->page = page;
return 0; /* success */
@@ -1226,7 +1233,8 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
Sg_fd *sfp;
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
- int k, length;
+ int k;
+ struct scatterlist *sg;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
@@ -1240,10 +1248,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
return -ENOMEM; /* cannot map more than reserved buffer */
sa = vma->vm_start;
- length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
- for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
+ sg = rsv_schp->buffer;
+ for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
+ ++k, sg = sg_next(sg)) {
len = vma->vm_end - sa;
- len = (len < length) ? len : length;
+ len = (len < sg->length) ? len : sg->length;
sa += len;
}
@@ -1287,6 +1296,7 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
if (0 != result) {
struct scsi_sense_hdr sshdr;
+ memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
srp->header.msg_status = msg_byte(result);
@@ -1622,32 +1632,8 @@ exit_sg(void)
idr_destroy(&sg_index_idr);
}
-static int __sg_start_req(struct sg_request *srp, struct sg_io_hdr *hp,
- unsigned char *cmd)
-{
- struct sg_fd *sfp = srp->parentfp;
- struct request_queue *q = sfp->parentdp->device->request_queue;
- struct request *rq;
- int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
-
- rq = blk_get_request(q, rw, GFP_ATOMIC);
- if (!rq)
- return -ENOMEM;
-
- memcpy(rq->cmd, cmd, hp->cmd_len);
-
- rq->cmd_len = hp->cmd_len;
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
-
- srp->rq = rq;
- rq->end_io_data = srp;
- rq->sense = srp->sense_b;
- rq->retries = SG_DEFAULT_RETRIES;
-
- return 0;
-}
-
-static int sg_start_req(Sg_request *srp, unsigned char *cmd)
+static int
+sg_start_req(Sg_request * srp)
{
int res;
Sg_fd *sfp = srp->parentfp;
@@ -1656,52 +1642,27 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
int dxfer_dir = hp->dxfer_direction;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
- struct request_queue *q = sfp->parentdp->device->request_queue;
- struct rq_map_data map_data;
SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
-
- res = __sg_start_req(srp, hp, cmd);
- if (res)
- return res;
-
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
-
-#ifdef SG_ALLOW_DIO_CODE
if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
(dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
- (!sfp->parentdp->device->host->unchecked_isa_dma) &&
- blk_rq_aligned(q, hp->dxferp, dxfer_len))
- return sg_build_direct(srp, sfp, dxfer_len);
-#endif
+ (!sfp->parentdp->device->host->unchecked_isa_dma)) {
+ res = sg_build_direct(srp, sfp, dxfer_len);
+ if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
+ return res;
+ }
if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
sg_link_reserve(sfp, srp, dxfer_len);
- else
+ else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
-
- if (!res) {
- struct request *rq = srp->rq;
- Sg_scatter_hold *schp = &srp->data;
- int iovec_count = (int) hp->iovec_count;
-
- map_data.pages = schp->pages;
- map_data.page_order = schp->page_order;
- map_data.nr_entries = schp->k_use_sg;
-
- if (iovec_count)
- res = blk_rq_map_user_iov(q, rq, &map_data, hp->dxferp,
- iovec_count,
- hp->dxfer_len, GFP_ATOMIC);
- else
- res = blk_rq_map_user(q, rq, &map_data, hp->dxferp,
- hp->dxfer_len, GFP_ATOMIC);
-
- if (!res)
- srp->bio = rq->bio;
+ if (res) {
+ sg_remove_scat(req_schp);
+ return res;
+ }
}
-
- return res;
+ return 0;
}
static void
@@ -1715,56 +1676,186 @@ sg_finish_rem_req(Sg_request * srp)
sg_unlink_reserve(sfp, srp);
else
sg_remove_scat(req_schp);
-
- if (srp->rq) {
- if (srp->bio)
- blk_rq_unmap_user(srp->bio);
-
- blk_put_request(srp->rq);
- }
-
sg_remove_request(sfp, srp);
}
static int
sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
{
- int sg_bufflen = tablesize * sizeof(struct page *);
+ int sg_bufflen = tablesize * sizeof(struct scatterlist);
gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
- schp->pages = kzalloc(sg_bufflen, gfp_flags);
- if (!schp->pages)
+ /*
+ * TODO: test without low_dma, we should not need it since
+ * the block layer will bounce the buffer for us
+ *
+ * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
+ */
+ if (sfp->low_dma)
+ gfp_flags |= GFP_DMA;
+ schp->buffer = kzalloc(sg_bufflen, gfp_flags);
+ if (!schp->buffer)
return -ENOMEM;
+ sg_init_table(schp->buffer, tablesize);
schp->sglist_len = sg_bufflen;
return tablesize; /* number of scat_gath elements allocated */
}
+#ifdef SG_ALLOW_DIO_CODE
+/* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
+ /* TODO: hopefully we can use the generic block layer code */
+
+/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
+ - mapping of all pages not successful
+ (i.e., either completely successful or fails)
+*/
+static int
+st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
+ unsigned long uaddr, size_t count, int rw)
+{
+ unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ int res, i, j;
+ struct page **pages;
+
+ /* User attempted Overflow! */
+ if ((uaddr + count) < uaddr)
+ return -EINVAL;
+
+ /* Too big */
+ if (nr_pages > max_pages)
+ return -ENOMEM;
+
+ /* Hmm? */
+ if (count == 0)
+ return 0;
+
+ if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
+ return -ENOMEM;
+
+ /* Try to fault in all of the necessary pages */
+ down_read(&current->mm->mmap_sem);
+ /* rw==READ means read from drive, write into memory area */
+ res = get_user_pages(
+ current,
+ current->mm,
+ uaddr,
+ nr_pages,
+ rw == READ,
+ 0, /* don't force */
+ pages,
+ NULL);
+ up_read(&current->mm->mmap_sem);
+
+ /* Errors and no page mapped should return here */
+ if (res < nr_pages)
+ goto out_unmap;
+
+ for (i=0; i < nr_pages; i++) {
+ /* FIXME: flush superflous for rw==READ,
+ * probably wrong function for rw==WRITE
+ */
+ flush_dcache_page(pages[i]);
+ /* ?? Is locking needed? I don't think so */
+ /* if (!trylock_page(pages[i]))
+ goto out_unlock; */
+ }
+
+ sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
+ if (nr_pages > 1) {
+ sgl[0].length = PAGE_SIZE - sgl[0].offset;
+ count -= sgl[0].length;
+ for (i=1; i < nr_pages ; i++)
+ sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
+ }
+ else {
+ sgl[0].length = count;
+ }
+
+ kfree(pages);
+ return nr_pages;
+
+ out_unmap:
+ if (res > 0) {
+ for (j=0; j < res; j++)
+ page_cache_release(pages[j]);
+ res = 0;
+ }
+ kfree(pages);
+ return res;
+}
+
+
+/* And unmap them... */
+static int
+st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
+ int dirtied)
+{
+ int i;
+
+ for (i=0; i < nr_pages; i++) {
+ struct page *page = sg_page(&sgl[i]);
+
+ if (dirtied)
+ SetPageDirty(page);
+ /* unlock_page(page); */
+ /* FIXME: cache flush missing for rw==READ
+ * FIXME: call the correct reference counting function
+ */
+ page_cache_release(page);
+ }
+
+ return 0;
+}
+
+/* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
+#endif
+
+
/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
static int
sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
{
+#ifdef SG_ALLOW_DIO_CODE
sg_io_hdr_t *hp = &srp->header;
Sg_scatter_hold *schp = &srp->data;
- int res;
- struct request *rq = srp->rq;
- struct request_queue *q = sfp->parentdp->device->request_queue;
+ int sg_tablesize = sfp->parentdp->sg_tablesize;
+ int mx_sc_elems, res;
+ struct scsi_device *sdev = sfp->parentdp->device;
- res = blk_rq_map_user(q, rq, NULL, hp->dxferp, dxfer_len, GFP_ATOMIC);
- if (res)
- return res;
- srp->bio = rq->bio;
+ if (((unsigned long)hp->dxferp &
+ queue_dma_alignment(sdev->request_queue)) != 0)
+ return 1;
+
+ mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
+ if (mx_sc_elems <= 0) {
+ return 1;
+ }
+ res = st_map_user_pages(schp->buffer, mx_sc_elems,
+ (unsigned long)hp->dxferp, dxfer_len,
+ (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
+ if (res <= 0) {
+ sg_remove_scat(schp);
+ return 1;
+ }
+ schp->k_use_sg = res;
schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;
return 0;
+#else
+ return 1;
+#endif
}
static int
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
{
- int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
+ struct scatterlist *sg;
+ int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
- int blk_size = buff_size, order;
- gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+ int blk_size = buff_size;
+ struct page *p = NULL;
if (blk_size < 0)
return -EFAULT;
@@ -1788,26 +1879,15 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
} else
scatter_elem_sz_prev = num;
}
-
- if (sfp->low_dma)
- gfp_mask |= GFP_DMA;
-
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- gfp_mask |= __GFP_ZERO;
-
- order = get_order(num);
-retry:
- ret_sz = 1 << (PAGE_SHIFT + order);
-
- for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
- k++, rem_sz -= ret_sz) {
-
+ for (k = 0, sg = schp->buffer, rem_sz = blk_size;
+ (rem_sz > 0) && (k < mx_sc_elems);
+ ++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
+
num = (rem_sz > scatter_elem_sz_prev) ?
- scatter_elem_sz_prev : rem_sz;
-
- schp->pages[k] = alloc_pages(gfp_mask, order);
- if (!schp->pages[k])
- goto out;
+ scatter_elem_sz_prev : rem_sz;
+ p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
+ if (!p)
+ return -ENOMEM;
if (num == scatter_elem_sz_prev) {
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
@@ -1815,12 +1895,12 @@ retry:
scatter_elem_sz_prev = ret_sz;
}
}
+ sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
"ret_sz=%d\n", k, num, ret_sz));
} /* end of for loop */
- schp->page_order = order;
schp->k_use_sg = k;
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
"rem_sz=%d\n", k, rem_sz));
@@ -1828,15 +1908,8 @@ retry:
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
- return 0;
-out:
- for (i = 0; i < k; i++)
- __free_pages(schp->pages[k], order);
-
- if (--order >= 0)
- goto retry;
- return -ENOMEM;
+ return 0;
}
static int
@@ -1844,8 +1917,13 @@ sg_write_xfer(Sg_request * srp)
{
sg_io_hdr_t *hp = &srp->header;
Sg_scatter_hold *schp = &srp->data;
+ struct scatterlist *sg = schp->buffer;
int num_xfer = 0;
+ int j, k, onum, usglen, ksglen, res;
+ int iovec_count = (int) hp->iovec_count;
int dxfer_dir = hp->dxfer_direction;
+ unsigned char *p;
+ unsigned char __user *up;
int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
@@ -1859,9 +1937,83 @@ sg_write_xfer(Sg_request * srp)
&& ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
return 0;
- SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, k_use_sg=%d\n",
- num_xfer, schp->k_use_sg));
+ SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
+ num_xfer, iovec_count, schp->k_use_sg));
+ if (iovec_count) {
+ onum = iovec_count;
+ if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
+ return -EFAULT;
+ } else
+ onum = 1;
+
+ ksglen = sg->length;
+ p = page_address(sg_page(sg));
+ for (j = 0, k = 0; j < onum; ++j) {
+ res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
+ if (res)
+ return res;
+
+ for (; p; sg = sg_next(sg), ksglen = sg->length,
+ p = page_address(sg_page(sg))) {
+ if (usglen <= 0)
+ break;
+ if (ksglen > usglen) {
+ if (usglen >= num_xfer) {
+ if (__copy_from_user(p, up, num_xfer))
+ return -EFAULT;
+ return 0;
+ }
+ if (__copy_from_user(p, up, usglen))
+ return -EFAULT;
+ p += usglen;
+ ksglen -= usglen;
+ break;
+ } else {
+ if (ksglen >= num_xfer) {
+ if (__copy_from_user(p, up, num_xfer))
+ return -EFAULT;
+ return 0;
+ }
+ if (__copy_from_user(p, up, ksglen))
+ return -EFAULT;
+ up += ksglen;
+ usglen -= ksglen;
+ }
+ ++k;
+ if (k >= schp->k_use_sg)
+ return 0;
+ }
+ }
+
+ return 0;
+}
+static int
+sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
+ int wr_xf, int *countp, unsigned char __user **up)
+{
+ int num_xfer = (int) hp->dxfer_len;
+ unsigned char __user *p = hp->dxferp;
+ int count;
+
+ if (0 == sg_num) {
+ if (wr_xf && ('\0' == hp->interface_id))
+ count = (int) hp->flags; /* holds "old" input_size */
+ else
+ count = num_xfer;
+ } else {
+ sg_iovec_t iovec;
+ if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
+ return -EFAULT;
+ p = iovec.iov_base;
+ count = (int) iovec.iov_len;
+ }
+ if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
+ return -EFAULT;
+ if (up)
+ *up = p;
+ if (countp)
+ *countp = count;
return 0;
}
@@ -1869,19 +2021,25 @@ static void
sg_remove_scat(Sg_scatter_hold * schp)
{
SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
- if (schp->pages && schp->sglist_len > 0) {
- if (!schp->dio_in_use) {
+ if (schp->buffer && (schp->sglist_len > 0)) {
+ struct scatterlist *sg = schp->buffer;
+
+ if (schp->dio_in_use) {
+#ifdef SG_ALLOW_DIO_CODE
+ st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
+#endif
+ } else {
int k;
- for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
+ for (k = 0; (k < schp->k_use_sg) && sg_page(sg);
+ ++k, sg = sg_next(sg)) {
SCSI_LOG_TIMEOUT(5, printk(
- "sg_remove_scat: k=%d, pg=0x%p\n",
- k, schp->pages[k]));
- __free_pages(schp->pages[k], schp->page_order);
+ "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
+ k, sg_page(sg), sg->length));
+ sg_page_free(sg_page(sg), sg->length);
}
-
- kfree(schp->pages);
}
+ kfree(schp->buffer);
}
memset(schp, 0, sizeof (*schp));
}
@@ -1891,8 +2049,13 @@ sg_read_xfer(Sg_request * srp)
{
sg_io_hdr_t *hp = &srp->header;
Sg_scatter_hold *schp = &srp->data;
+ struct scatterlist *sg = schp->buffer;
int num_xfer = 0;
+ int j, k, onum, usglen, ksglen, res;
+ int iovec_count = (int) hp->iovec_count;
int dxfer_dir = hp->dxfer_direction;
+ unsigned char *p;
+ unsigned char __user *up;
int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
@@ -1907,7 +2070,53 @@ sg_read_xfer(Sg_request * srp)
return 0;
SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
- num_xfer, (int)hp->iovec_count, schp->k_use_sg));
+ num_xfer, iovec_count, schp->k_use_sg));
+ if (iovec_count) {
+ onum = iovec_count;
+ if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
+ return -EFAULT;
+ } else
+ onum = 1;
+
+ p = page_address(sg_page(sg));
+ ksglen = sg->length;
+ for (j = 0, k = 0; j < onum; ++j) {
+ res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
+ if (res)
+ return res;
+
+ for (; p; sg = sg_next(sg), ksglen = sg->length,
+ p = page_address(sg_page(sg))) {
+ if (usglen <= 0)
+ break;
+ if (ksglen > usglen) {
+ if (usglen >= num_xfer) {
+ if (__copy_to_user(up, p, num_xfer))
+ return -EFAULT;
+ return 0;
+ }
+ if (__copy_to_user(up, p, usglen))
+ return -EFAULT;
+ p += usglen;
+ ksglen -= usglen;
+ break;
+ } else {
+ if (ksglen >= num_xfer) {
+ if (__copy_to_user(up, p, num_xfer))
+ return -EFAULT;
+ return 0;
+ }
+ if (__copy_to_user(up, p, ksglen))
+ return -EFAULT;
+ up += ksglen;
+ usglen -= ksglen;
+ }
+ ++k;
+ if (k >= schp->k_use_sg)
+ return 0;
+ }
+ }
+
return 0;
}
@@ -1915,6 +2124,7 @@ static int
sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
{
Sg_scatter_hold *schp = &srp->data;
+ struct scatterlist *sg = schp->buffer;
int k, num;
SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
@@ -1922,18 +2132,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
if ((!outp) || (num_read_xfer <= 0))
return 0;
- blk_rq_unmap_user(srp->bio);
- srp->bio = NULL;
-
- num = 1 << (PAGE_SHIFT + schp->page_order);
- for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
+ for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) {
+ num = sg->length;
if (num > num_read_xfer) {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (__copy_to_user(outp, page_address(sg_page(sg)),
num_read_xfer))
return -EFAULT;
break;
} else {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (__copy_to_user(outp, page_address(sg_page(sg)),
num))
return -EFAULT;
num_read_xfer -= num;
@@ -1968,22 +2175,24 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
{
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
+ struct scatterlist *sg = rsv_schp->buffer;
int k, num, rem;
srp->res_used = 1;
SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
rem = size;
- num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
- for (k = 0; k < rsv_schp->k_use_sg; k++) {
+ for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
+ num = sg->length;
if (rem <= num) {
+ sfp->save_scat_len = num;
+ sg->length = rem;
req_schp->k_use_sg = k + 1;
req_schp->sglist_len = rsv_schp->sglist_len;
- req_schp->pages = rsv_schp->pages;
+ req_schp->buffer = rsv_schp->buffer;
req_schp->bufflen = size;
req_schp->b_malloc_len = rsv_schp->b_malloc_len;
- req_schp->page_order = rsv_schp->page_order;
break;
} else
rem -= num;
@@ -1997,13 +2206,22 @@ static void
sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
{
Sg_scatter_hold *req_schp = &srp->data;
+ Sg_scatter_hold *rsv_schp = &sfp->reserve;
SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
(int) req_schp->k_use_sg));
+ if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
+ struct scatterlist *sg = rsv_schp->buffer;
+
+ if (sfp->save_scat_len > 0)
+ (sg + (req_schp->k_use_sg - 1))->length =
+ (unsigned) sfp->save_scat_len;
+ else
+ SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
+ }
req_schp->k_use_sg = 0;
req_schp->bufflen = 0;
- req_schp->pages = NULL;
- req_schp->page_order = 0;
+ req_schp->buffer = NULL;
req_schp->sglist_len = 0;
sfp->save_scat_len = 0;
srp->res_used = 0;
@@ -2261,6 +2479,53 @@ sg_res_in_use(Sg_fd * sfp)
return srp ? 1 : 0;
}
+/* The size fetched (value output via retSzp) set when non-NULL return */
+static struct page *
+sg_page_malloc(int rqSz, int lowDma, int *retSzp)
+{
+ struct page *resp = NULL;
+ gfp_t page_mask;
+ int order, a_size;
+ int resSz;
+
+ if ((rqSz <= 0) || (NULL == retSzp))
+ return resp;
+
+ if (lowDma)
+ page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
+ else
+ page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+
+ for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
+ order++, a_size <<= 1) ;
+ resSz = a_size; /* rounded up if necessary */
+ resp = alloc_pages(page_mask, order);
+ while ((!resp) && order) {
+ --order;
+ a_size >>= 1; /* divide by 2, until PAGE_SIZE */
+ resp = alloc_pages(page_mask, order); /* try half */
+ resSz = a_size;
+ }
+ if (resp) {
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ memset(page_address(resp), 0, resSz);
+ *retSzp = resSz;
+ }
+ return resp;
+}
+
+static void
+sg_page_free(struct page *page, int size)
+{
+ int order, a_size;
+
+ if (!page)
+ return;
+ for (order = 0, a_size = PAGE_SIZE; a_size < size;
+ order++, a_size <<= 1) ;
+ __free_pages(page, order);
+}
+
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index a8a33749da30..3292965bfd84 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
static int sr_prep_fn(struct request_queue *q, struct request *rq)
{
- int block = 0, this_count, s_size;
+ int block=0, this_count, s_size, timeout = SR_TIMEOUT;
struct scsi_cd *cd;
struct scsi_cmnd *SCpnt;
struct scsi_device *sdp = q->queuedata;
@@ -461,6 +461,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->transfersize = cd->device->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = MAX_RETRIES;
+ SCpnt->timeout_per_command = timeout;
/*
* This indicates that the command is ready from our end to be
@@ -619,8 +620,6 @@ static int sr_probe(struct device *dev)
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD;
- blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
-
cd->device = sdev;
cd->disk = disk;
cd->driver = &sr_template;
@@ -880,7 +879,7 @@ static void sr_kref_release(struct kref *kref)
struct gendisk *disk = cd->disk;
spin_lock(&sr_index_lock);
- clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
+ clear_bit(disk->first_minor, sr_index_bits);
spin_unlock(&sr_index_lock);
unregister_cdrom(&cd->cdi);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index f4e6cde1fd0d..d39107b7669b 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
* Shorten our settle_time if needed for
* this command not to time out.
*/
- if (np->s.settle_time_valid && cmd->request->timeout) {
- unsigned long tlimit = jiffies + cmd->request->timeout;
+ if (np->s.settle_time_valid && cmd->timeout_per_command) {
+ unsigned long tlimit = jiffies + cmd->timeout_per_command;
tlimit -= SYM_CONF_TIMER_INTERVAL*2;
if (time_after(np->s.settle_time, tlimit)) {
np->s.settle_time = tlimit;