summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBorislav Petkov <petkovbb@googlemail.com>2008-04-02 11:46:46 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2008-04-02 11:46:46 +1100
commit4c4830a986741e84567ee5f439ee1d64009b3e94 (patch)
treea4e349a9666be83dbe4b5fae6b5e7d63b5b1fa7e
parentca06fdfa3cbc3c9b0bab5d4b99ebb5a0ff1ccba1 (diff)
ide-tape: remove pipeline-specific code from idetape_add_chrdev_write_request
Refrain from adding more write requests to the pipeline and queue them directly on the device's request queue instead. [bart: re-do for minimal behavior changes] Signed-off-by: Borislav Petkov <petkovbb@gmail.com> Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-rw-r--r--drivers/ide/ide-tape.c55
1 files changed, 2 insertions, 53 deletions
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 8883eea4658f..71ebee001882 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2202,28 +2202,16 @@ static void idetape_wait_first_stage(ide_drive_t *drive)
spin_unlock_irqrestore(&tape->lock, flags);
}
-/*
- * Try to add a character device originated write request to our pipeline. In
- * case we don't succeed, we revert to non-pipelined operation mode for this
- * request. In order to accomplish that, we
- *
- * 1. Try to allocate a new pipeline stage.
- * 2. If we can't, wait for more and more requests to be serviced and try again
- * each time.
- * 3. If we still can't allocate a stage, fallback to non-pipelined operation
- * mode for this request.
- */
+/* Queue up a character device originated write request. */
static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
{
idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *new_stage;
unsigned long flags;
- struct request *rq;
debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
/* Attempt to allocate a new stage. Beware possible race conditions. */
- while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
+ while (1) {
spin_lock_irqsave(&tape->lock, flags);
if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
idetape_wait_for_request(drive, tape->active_data_rq);
@@ -2234,49 +2222,10 @@ static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
&tape->flags))
continue;
- /*
- * The machine is short on memory. Fallback to non-
- * pipelined operation mode for this request.
- */
return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
blocks, tape->merge_stage->bh);
}
}
- rq = &new_stage->rq;
- idetape_init_rq(rq, REQ_IDETAPE_WRITE);
- /* Doesn't actually matter - We always assume sequential access */
- rq->sector = tape->first_frame;
- rq->current_nr_sectors = blocks;
- rq->nr_sectors = blocks;
-
- idetape_switch_buffers(tape, new_stage);
- idetape_add_stage_tail(drive, new_stage);
- tape->pipeline_head++;
- idetape_calculate_speeds(drive);
-
- /*
- * Estimate whether the tape has stopped writing by checking if our
- * write pipeline is currently empty. If we are not writing anymore,
- * wait for the pipeline to be almost completely full (90%) before
- * starting to service requests, so that we will be able to keep up with
- * the higher speeds of the tape.
- */
- if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
- if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
- tape->nr_stages >= tape->max_stages -
- tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
- tape->blk_size) {
- tape->measure_insert_time = 1;
- tape->insert_time = jiffies;
- tape->insert_size = 0;
- tape->insert_speed = 0;
- idetape_plug_pipeline(drive);
- }
- }
- if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
- /* Return a deferred error */
- return -EIO;
- return blocks;
}
/*