summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-cgroup.h3
-rw-r--r--block/blk-core.c44
-rw-r--r--block/blk-mq.c8
-rw-r--r--block/bsg.c9
-rw-r--r--block/cfq-iosched.c10
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/ioctl.c4
-rw-r--r--block/scsi_ioctl.c11
9 files changed, 42 insertions, 53 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index e17da947f6bd..0ac817b750db 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -822,7 +822,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
{
- static atomic64_t id_seq = ATOMIC64_INIT(0);
struct blkcg *blkcg;
if (!parent_css) {
@@ -836,7 +835,6 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
done:
spin_lock_init(&blkcg->lock);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index d3fd7aa3d2a3..c567865b5f1d 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -50,9 +50,6 @@ struct blkcg {
struct blkcg_gq *blkg_hint;
struct hlist_head blkg_list;
- /* for policies to test whether associated blkcg has changed */
- uint64_t id;
-
/* TODO: per-policy storage in blkcg */
unsigned int cfq_weight; /* belongs to cfq */
unsigned int cfq_leaf_weight;
diff --git a/block/blk-core.c b/block/blk-core.c
index bf930f481d43..6946a4275e6f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -83,18 +83,14 @@ void blk_queue_congestion_threshold(struct request_queue *q)
* @bdev: device
*
* Locates the passed device's request queue and returns the address of its
- * backing_dev_info
- *
- * Will return NULL if the request queue cannot be located.
+ * backing_dev_info. This function can only be called if @bdev is opened
+ * and the return value is never NULL.
*/
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
- struct backing_dev_info *ret = NULL;
struct request_queue *q = bdev_get_queue(bdev);
- if (q)
- ret = &q->backing_dev_info;
- return ret;
+ return &q->backing_dev_info;
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
@@ -933,9 +929,9 @@ static struct io_context *rq_ioc(struct bio *bio)
* Get a free request from @q. This function may fail under memory
* pressure or if @q is dead.
*
- * Must be callled with @q->queue_lock held and,
- * Returns %NULL on failure, with @q->queue_lock held.
- * Returns !%NULL on success, with @q->queue_lock *not held*.
+ * Must be called with @q->queue_lock held and,
+ * Returns ERR_PTR on failure, with @q->queue_lock held.
+ * Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *__get_request(struct request_list *rl, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
@@ -949,7 +945,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
int may_queue;
if (unlikely(blk_queue_dying(q)))
- return NULL;
+ return ERR_PTR(-ENODEV);
may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
@@ -974,7 +970,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
* process is not a "batcher", and not
* exempted by the IO scheduler
*/
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
}
}
@@ -992,7 +988,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
* allocated with any setting of ->nr_requests
*/
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
- return NULL;
+ return ERR_PTR(-ENOMEM);
q->nr_rqs[is_sync]++;
rl->count[is_sync]++;
@@ -1097,7 +1093,7 @@ fail_alloc:
rq_starved:
if (unlikely(rl->count[is_sync] == 0))
rl->starved[is_sync] = 1;
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
/**
@@ -1110,9 +1106,9 @@ rq_starved:
* Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
* function keeps retrying under memory pressure and fails iff @q is dead.
*
- * Must be callled with @q->queue_lock held and,
- * Returns %NULL on failure, with @q->queue_lock held.
- * Returns !%NULL on success, with @q->queue_lock *not held*.
+ * Must be called with @q->queue_lock held and,
+ * Returns ERR_PTR on failure, with @q->queue_lock held.
+ * Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
@@ -1125,12 +1121,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
rq = __get_request(rl, rw_flags, bio, gfp_mask);
- if (rq)
+ if (!IS_ERR(rq))
return rq;
if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
- return NULL;
+ return rq;
}
/* wait on @rl and retry */
@@ -1167,7 +1163,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
spin_lock_irq(q->queue_lock);
rq = get_request(q, rw, NULL, gfp_mask);
- if (!rq)
+ if (IS_ERR(rq))
spin_unlock_irq(q->queue_lock);
/* q->queue_lock is unlocked at this point */
@@ -1219,8 +1215,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
{
struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
- if (unlikely(!rq))
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(rq))
+ return rq;
blk_rq_set_block_pc(rq);
@@ -1614,8 +1610,8 @@ get_rq:
* Returns with the queue unlocked.
*/
req = get_request(q, rw_flags, bio, GFP_NOIO);
- if (unlikely(!req)) {
- bio_endio(bio, -ENODEV); /* @q is dead */
+ if (IS_ERR(req)) {
+ bio_endio(bio, PTR_ERR(req)); /* @q is dead */
goto out_unlock;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 383ea0cb1f0a..067e600002d3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -224,9 +224,11 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct blk_mq_hw_ctx *hctx;
struct request *rq;
struct blk_mq_alloc_data alloc_data;
+ int ret;
- if (blk_mq_queue_enter(q))
- return NULL;
+ ret = blk_mq_queue_enter(q);
+ if (ret)
+ return ERR_PTR(ret);
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
@@ -246,6 +248,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
ctx = alloc_data.ctx;
}
blk_mq_put_ctx(ctx);
+ if (!rq)
+ return ERR_PTR(-EWOULDBLOCK);
return rq;
}
EXPORT_SYMBOL(blk_mq_alloc_request);
diff --git a/block/bsg.c b/block/bsg.c
index ff46addde5d8..276e869e686c 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -270,8 +270,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
* map scatter-gather elements separately and string them to request
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
- if (!rq)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(rq))
+ return rq;
blk_rq_set_block_pc(rq);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
@@ -285,8 +285,9 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
}
next_rq = blk_get_request(q, READ, GFP_KERNEL);
- if (!next_rq) {
- ret = -ENOMEM;
+ if (IS_ERR(next_rq)) {
+ ret = PTR_ERR(next_rq);
+ next_rq = NULL;
goto out;
}
rq->next_rq = next_rq;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3f31cf9508e6..6f2751d305de 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -299,7 +299,7 @@ struct cfq_io_cq {
struct cfq_ttime ttime;
int ioprio; /* the current ioprio */
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- uint64_t blkcg_id; /* the current blkcg ID */
+ uint64_t blkcg_serial_nr; /* the current blkcg serial */
#endif
};
@@ -3547,17 +3547,17 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
{
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *sync_cfqq;
- uint64_t id;
+ uint64_t serial_nr;
rcu_read_lock();
- id = bio_blkcg(bio)->id;
+ serial_nr = bio_blkcg(bio)->css.serial_nr;
rcu_read_unlock();
/*
* Check whether blkcg has changed. The condition may trigger
* spuriously on a newly created cic but there's no harm.
*/
- if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
+ if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
return;
sync_cfqq = cic_to_cfqq(cic, 1);
@@ -3571,7 +3571,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
cfq_put_queue(sync_cfqq);
}
- cic->blkcg_id = id;
+ cic->blkcg_serial_nr = serial_nr;
}
#else
static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 18b282ce361e..f678c733df40 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -709,8 +709,6 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
- if (bdi == NULL)
- return -ENOTTY;
return compat_put_long(arg,
(bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
case BLKROGET: /* compatible */
@@ -731,8 +729,6 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
- if (bdi == NULL)
- return -ENOTTY;
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
return 0;
case BLKGETSIZE:
diff --git a/block/ioctl.c b/block/ioctl.c
index d6cda8147c91..6c7bf903742f 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -356,8 +356,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (!arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
- if (bdi == NULL)
- return -ENOTTY;
return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
case BLKROGET:
return put_int(arg, bdev_read_only(bdev) != 0);
@@ -386,8 +384,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
- if (bdi == NULL)
- return -ENOTTY;
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
return 0;
case BLKBSZSET:
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 9b8eaeca6a79..abb2e65b24cc 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -316,8 +316,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
ret = -ENOMEM;
rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
- if (!rq)
- goto out;
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
blk_rq_set_block_pc(rq);
if (hdr->cmd_len > BLK_MAX_CDB) {
@@ -387,7 +387,6 @@ out_free_cdb:
kfree(rq->cmd);
out_put_request:
blk_put_request(rq);
-out:
return ret;
}
@@ -457,8 +456,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
}
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
- if (!rq) {
- err = -ENOMEM;
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto error;
}
blk_rq_set_block_pc(rq);
@@ -548,6 +547,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
int err;
rq = blk_get_request(q, WRITE, __GFP_WAIT);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
blk_rq_set_block_pc(rq);
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq->cmd[0] = cmd;