summaryrefslogtreecommitdiff
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c33
1 files changed, 15 insertions, 18 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2b75f1ef7386..54c2a23f4e55 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -716,7 +716,7 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
}
static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
- int *srcu_idx, unsigned bio_opf)
+ int *srcu_idx, blk_opf_t bio_opf)
{
if (bio_opf & REQ_NOWAIT)
return dm_get_live_table_fast(md);
@@ -725,7 +725,7 @@ static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
}
static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
- unsigned bio_opf)
+ blk_opf_t bio_opf)
{
if (bio_opf & REQ_NOWAIT)
dm_put_live_table_fast(md);
@@ -1033,7 +1033,7 @@ static void clone_endio(struct bio *bio)
}
if (static_branch_unlikely(&zoned_enabled) &&
- unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
+ unlikely(bdev_is_zoned(bio->bi_bdev)))
dm_zone_endio(io, bio);
if (endio) {
@@ -1086,23 +1086,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
{
sector_t target_offset = dm_target_offset(ti, sector);
sector_t len = max_io_len_target_boundary(ti, target_offset);
- sector_t max_len;
/*
* Does the target need to split IO even further?
* - varied (per target) IO splitting is a tenet of DM; this
* explains why stacked chunk_sectors based splitting via
- * blk_max_size_offset() isn't possible here. So pass in
- * ti->max_io_len to override stacked chunk_sectors.
+ * blk_queue_split() isn't possible here.
*/
- if (ti->max_io_len) {
- max_len = blk_max_size_offset(ti->table->md->queue,
- target_offset, ti->max_io_len);
- if (len > max_len)
- len = max_len;
- }
-
- return len;
+ if (!ti->max_io_len)
+ return len;
+ return min_t(sector_t, len,
+ min(queue_max_sectors(ti->table->md->queue),
+ blk_chunk_sectors_left(target_offset, ti->max_io_len)));
}
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
@@ -1516,7 +1511,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
static bool is_abnormal_io(struct bio *bio)
{
- unsigned int op = bio_op(bio);
+ enum req_op op = bio_op(bio);
if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
switch (op) {
@@ -1547,6 +1542,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios;
break;
+ default:
+ break;
}
/*
@@ -1628,7 +1625,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
* Only support bio polling for normal IO, and the target io is
* exactly inside the dm_io instance (verified in dm_poll_dm_io)
*/
- ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED;
+ ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
setup_split_accounting(ci, len);
@@ -1725,7 +1722,7 @@ static void dm_submit_bio(struct bio *bio)
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
int srcu_idx;
struct dm_table *map;
- unsigned bio_opf = bio->bi_opf;
+ blk_opf_t bio_opf = bio->bi_opf;
map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
@@ -1899,7 +1896,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
del_gendisk(md->disk);
}
dm_queue_destroy_crypto_profile(md->queue);
- blk_cleanup_disk(md->disk);
+ put_disk(md->disk);
}
if (md->pending_io) {