summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:46:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:46:35 -0700
commitc013d0af81f60cc7dbe357c4e2a925fb6738dbfe (patch)
tree171dfdf928d0450a3fa98a58b2297d857804bb35 /drivers
parent42df1cbf6a4726934cc5dac12bf263aa73c49fa3 (diff)
parent8d9fdb6011b4d413271eba3a62e10f89efecc419 (diff)
Merge tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: - Improve the type checking of request flags (Bart) - Ensure queue mapping for a single queues always picks the right queue (Bart) - Sanitize the io priority handling (Jan) - rq-qos race fix (Jinke) - Reserved tags handling improvements (John) - Separate memory alignment from file/disk offset aligment for O_DIRECT (Keith) - Add new ublk driver, userspace block driver using io_uring for communication with the userspace backend (Ming) - Use try_cmpxchg() to cleanup the code in various spots (Uros) - Finally remove bdevname() (Christoph) - Clean up the zoned device handling (Christoph) - Clean up independent access range support (Christoph) - Clean up and improve block sysfs handling (Christoph) - Clean up and improve teardown of block devices. This turns the usual two step process into something that is simpler to implement and handle in block drivers (Christoph) - Clean up chunk size handling (Christoph) - Misc cleanups and fixes (Bart, Bo, Dan, GuoYong, Jason, Keith, Liu, Ming, Sebastian, Yang, Ying) * tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block: (178 commits) ublk_drv: fix double shift bug ublk_drv: make sure that correct flags(features) returned to userspace ublk_drv: fix error handling of ublk_add_dev ublk_drv: fix lockdep warning block: remove __blk_get_queue block: call blk_mq_exit_queue from disk_release for never added disks blk-mq: fix error handling in __blk_mq_alloc_disk ublk: defer disk allocation ublk: rewrite ublk_ctrl_get_queue_affinity to not rely on hctx->cpumask ublk: fold __ublk_create_dev into ublk_ctrl_add_dev ublk: cleanup ublk_ctrl_uring_cmd ublk: simplify ublk_ch_open and ublk_ch_release ublk: remove the empty open and release block device operations ublk: remove UBLK_IO_F_PREFLUSH ublk: add a MAINTAINERS entry block: don't allow the same type rq_qos add more than once mmc: fix disk/queue leak in case of adding disk failure ublk_drv: fix an IS_ERR() vs NULL check ublk: remove UBLK_IO_F_INTEGRITY ublk_drv: remove unneeded semicolon ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/Kconfig9
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/ataflop.c5
-rw-r--r--drivers/block/brd.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c9
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h5
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/block/drbd/drbd_receiver.c24
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c8
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c307
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h5
-rw-r--r--drivers/block/n64cart.c2
-rw-r--r--drivers/block/nbd.c9
-rw-r--r--drivers/block/null_blk/main.c15
-rw-r--r--drivers/block/null_blk/null_blk.h12
-rw-r--r--drivers/block/null_blk/trace.h2
-rw-r--r--drivers/block/null_blk/zoned.c12
-rw-r--r--drivers/block/paride/pcd.c4
-rw-r--r--drivers/block/paride/pd.c6
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/pktcdvd.c14
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/ps3vram.c4
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c6
-rw-r--r--drivers/block/rnbd/rnbd-proto.h7
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c1
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h1
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c5
-rw-r--r--drivers/block/rnbd/rnbd-srv.c9
-rw-r--r--drivers/block/rnbd/rnbd-srv.h3
-rw-r--r--drivers/block/sunvdc.c4
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/sx8.c6
-rw-r--r--drivers/block/ublk_drv.c1545
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/block/xen-blkback/blkback.c6
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/z2ram.c3
-rw-r--r--drivers/block/zram/zram_drv.c8
-rw-r--r--drivers/cdrom/gdrom.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c3
-rw-r--r--drivers/md/bcache/super.c27
-rw-r--r--drivers/md/dm-bufio.c26
-rw-r--r--drivers/md/dm-ebs-target.c15
-rw-r--r--drivers/md/dm-flakey.c8
-rw-r--r--drivers/md/dm-integrity.c76
-rw-r--r--drivers/md/dm-io.c38
-rw-r--r--drivers/md/dm-kcopyd.c26
-rw-r--r--drivers/md/dm-log.c8
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c12
-rw-r--r--drivers/md/dm-snap-persistent.c25
-rw-r--r--drivers/md/dm-table.c6
-rw-r--r--drivers/md/dm-writecache.c12
-rw-r--r--drivers/md/dm-zone.c88
-rw-r--r--drivers/md/dm-zoned-metadata.c5
-rw-r--r--drivers/md/dm-zoned-target.c25
-rw-r--r--drivers/md/dm-zoned.h2
-rw-r--r--drivers/md/dm.c33
-rw-r--r--drivers/md/md-bitmap.c6
-rw-r--r--drivers/md/md.c16
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c22
-rw-r--r--drivers/md/raid5-cache.c12
-rw-r--r--drivers/md/raid5-ppl.c12
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--drivers/memstick/core/ms_block.c3
-rw-r--r--drivers/memstick/core/mspro_block.c3
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/core/queue.c4
-rw-r--r--drivers/mtd/mtd_blkdevs.c4
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/nvdimm/btt.c8
-rw-r--r--drivers/nvdimm/pmem.c6
-rw-r--r--drivers/nvme/host/apple.c5
-rw-r--r--drivers/nvme/host/core.c5
-rw-r--r--drivers/nvme/host/fc.c18
-rw-r--r--drivers/nvme/host/ioctl.c4
-rw-r--r--drivers/nvme/host/multipath.c4
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c15
-rw-r--r--drivers/nvme/host/tcp.c15
-rw-r--r--drivers/nvme/host/zns.c6
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c17
-rw-r--r--drivers/nvme/target/loop.c12
-rw-r--r--drivers/nvme/target/zns.c24
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c8
-rw-r--r--drivers/s390/block/scm_blk.c4
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c14
-rw-r--r--drivers/scsi/hosts.c14
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c16
-rw-r--r--drivers/scsi/scsi_error.c22
-rw-r--r--drivers/scsi/scsi_lib.c26
-rw-r--r--drivers/scsi/scsi_priv.h4
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/sd.c10
-rw-r--r--drivers/scsi/sd_zbc.c12
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/ufs/core/ufshcd.c4
-rw-r--r--drivers/ufs/core/ufshpb.c7
121 files changed, 2130 insertions, 856 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index fdb81f2794cd..e19fcab016ba 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -408,6 +408,15 @@ config BLK_DEV_RBD
If unsure, say N.
+config BLK_DEV_UBLK
+ tristate "Userspace block driver (Experimental)"
+ select IO_URING
+ help
+ io_uring based userspace block driver. Together with ublk server, ublk
+ has been working well, but interface with userspace or command data
+ definition isn't finalized yet, and might change according to future
+ requirement, so mark is as experimental now.
+
source "drivers/block/rnbd/Kconfig"
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 934a9c7c3a7c..be631352567e 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,4 +39,6 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
+obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
+
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 5a566f2fd533..4c8b2ba579ee 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1802,7 +1802,7 @@ static int fd_alloc_disk(int drive, int system)
unit[drive].gendisk[system] = disk;
err = add_disk(disk);
if (err)
- blk_cleanup_disk(disk);
+ put_disk(disk);
return err;
}
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 348adf335217..12b3ca8f6f4a 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -427,7 +427,7 @@ aoeblk_gdalloc(void *vp)
return;
out_disk_cleanup:
- blk_cleanup_disk(gd);
+ put_disk(gd);
err_tagset:
blk_mq_free_tag_set(set);
err_mempool:
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index b381d1c3ef32..3523dd82d7a0 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -277,7 +277,7 @@ freedev(struct aoedev *d)
if (d->gd) {
aoedisk_rm_debugfs(d);
del_gendisk(d->gd);
- blk_cleanup_disk(d->gd);
+ put_disk(d->gd);
blk_mq_free_tag_set(&d->tag_set);
}
t = d->targets;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index e232cc4fd444..9deb4df6bdb8 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -2031,7 +2031,7 @@ static void ataflop_probe(dev_t dev)
return;
cleanup_disk:
- blk_cleanup_disk(unit[drive].disk[type]);
+ put_disk(unit[drive].disk[type]);
unit[drive].disk[type] = NULL;
}
@@ -2045,7 +2045,6 @@ static void atari_floppy_cleanup(void)
if (!unit[i].disk[type])
continue;
del_gendisk(unit[i].disk[type]);
- blk_cleanup_queue(unit[i].disk[type]->queue);
put_disk(unit[i].disk[type]);
}
blk_mq_free_tag_set(&unit[i].tag_set);
@@ -2064,7 +2063,7 @@ static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs)
continue;
if (fs->registered[type])
del_gendisk(fs->disk[type]);
- blk_cleanup_disk(fs->disk[type]);
+ put_disk(fs->disk[type]);
}
blk_mq_free_tag_set(&fs->tag_set);
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 6e3f2f0d2352..859499cd1ff8 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -256,7 +256,7 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, unsigned int op,
+ unsigned int len, unsigned int off, enum req_op op,
sector_t sector)
{
void *mem;
@@ -310,7 +310,7 @@ static void brd_submit_bio(struct bio *bio)
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
struct brd_device *brd = bdev->bd_disk->private_data;
int err;
@@ -419,7 +419,7 @@ static int brd_alloc(int i)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_free_dev:
list_del(&brd->brd_list);
kfree(brd);
@@ -439,7 +439,7 @@ static void brd_cleanup(void)
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
del_gendisk(brd->brd_disk);
- blk_cleanup_disk(brd->brd_disk);
+ put_disk(brd->brd_disk);
brd_free_pages(brd);
list_del(&brd->brd_list);
kfree(brd);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index f5bcded3640d..e27478ae579c 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -124,12 +124,13 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b
static int _drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev,
- sector_t sector, int op)
+ sector_t sector, enum req_op op)
{
struct bio *bio;
/* we do all our meta data IO in aligned 4k blocks. */
const int size = 4096;
- int err, op_flags = 0;
+ int err;
+ blk_opf_t op_flags = 0;
device->md_io.done = 0;
device->md_io.error = -ENODEV;
@@ -174,7 +175,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
}
int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
- sector_t sector, int op)
+ sector_t sector, enum req_op op)
{
int err;
D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
@@ -385,7 +386,7 @@ static int __al_write_transaction(struct drbd_device *device, struct al_transact
write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates) {
- if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
err = -EIO;
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
} else {
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 9e060e49b3f8..603f6828dd79 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -977,7 +977,7 @@ static void drbd_bm_endio(struct bio *bio)
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
struct drbd_device *device = ctx->device;
- unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
+ enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE;
struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
GFP_NOIO, &drbd_md_io_bio_set);
struct drbd_bitmap *b = device->bitmap;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 4d3efaa20b7b..f15f2f041596 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1495,7 +1495,7 @@ extern int drbd_resync_finished(struct drbd_device *device);
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
extern void drbd_md_put_buffer(struct drbd_device *device);
extern int drbd_md_sync_page_io(struct drbd_device *device,
- struct drbd_backing_dev *bdev, sector_t sector, int op);
+ struct drbd_backing_dev *bdev, sector_t sector, enum req_op op);
extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
@@ -1547,8 +1547,7 @@ extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
bool throttle_if_app_is_waiting);
extern int drbd_submit_peer_request(struct drbd_device *,
- struct drbd_peer_request *, const unsigned,
- const unsigned, const int);
+ struct drbd_peer_request *, blk_opf_t, int);
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
sector_t, unsigned int,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2887350ae010..f3e4db16fd07 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2207,7 +2207,7 @@ void drbd_destroy_device(struct kref *kref)
if (device->bitmap) /* should no longer be there. */
drbd_bm_cleanup(device);
__free_page(device->md_io.page);
- blk_cleanup_disk(device->vdisk);
+ put_disk(device->vdisk);
kfree(device->rs_plan_s);
/* not for_each_connection(connection, resource):
@@ -2807,7 +2807,7 @@ out_no_minor_idr:
out_no_bitmap:
__free_page(device->md_io.page);
out_no_io_page:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_no_disk:
kref_put(&resource->kref, drbd_destroy_resource);
kfree(device);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6762be53f409..af4c7d65490b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1621,8 +1621,7 @@ static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, stru
/* TODO allocate from our own bio_set. */
int drbd_submit_peer_request(struct drbd_device *device,
struct drbd_peer_request *peer_req,
- const unsigned op, const unsigned op_flags,
- const int fault_type)
+ const blk_opf_t opf, const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
@@ -1668,8 +1667,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
* generated bio, but a bio allocated on behalf of the peer.
*/
next_bio:
- bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags,
- GFP_NOIO);
+ bio = bio_alloc(device->ldev->backing_bdev, nr_pages, opf, GFP_NOIO);
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
bio->bi_private = peer_req;
@@ -2060,7 +2058,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
- if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE,
DRBD_FAULT_RS_WR) == 0)
return 0;
@@ -2383,14 +2381,14 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co
/* see also bio_flags_to_wire()
* DRBD_REQ_*, because we need to semantically map the flags to data packet
* flags and back. We may replicate to other kernel versions. */
-static unsigned long wire_flags_to_bio_flags(u32 dpf)
+static blk_opf_t wire_flags_to_bio_flags(u32 dpf)
{
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
(dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
}
-static unsigned long wire_flags_to_bio_op(u32 dpf)
+static enum req_op wire_flags_to_bio_op(u32 dpf)
{
if (dpf & DP_ZEROES)
return REQ_OP_WRITE_ZEROES;
@@ -2543,7 +2541,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
struct drbd_peer_request *peer_req;
struct p_data *p = pi->data;
u32 peer_seq = be32_to_cpu(p->seq_num);
- int op, op_flags;
+ enum req_op op;
+ blk_opf_t op_flags;
u32 dp_flags;
int err, tp;
@@ -2681,7 +2680,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
}
- err = drbd_submit_peer_request(device, peer_req, op, op_flags,
+ err = drbd_submit_peer_request(device, peer_req, op | op_flags,
DRBD_FAULT_DT_WR);
if (!err)
return 0;
@@ -2979,7 +2978,7 @@ submit_for_resync:
submit:
update_receiver_timing_details(connection, drbd_submit_peer_request);
inc_unacked(device);
- if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ,
fault_type) == 0)
return 0;
@@ -4951,7 +4950,7 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac
if (get_ldev(device)) {
struct drbd_peer_request *peer_req;
- const int op = REQ_OP_WRITE_ZEROES;
+ const enum req_op op = REQ_OP_WRITE_ZEROES;
peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
size, 0, GFP_NOIO);
@@ -4969,7 +4968,8 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
- err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
+ err = drbd_submit_peer_request(device, peer_req, op,
+ DRBD_FAULT_RS_WR);
if (err) {
spin_lock_irq(&device->resource->req_lock);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index e64bcfba30ef..6d8dd14458c6 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -523,16 +523,14 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
{
- char b[BDEVNAME_SIZE];
-
if (!__ratelimit(&drbd_ratelimit_state))
return;
- drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
+ drbd_warn(device, "local %s IO error sector %llu+%u on %pg\n",
(req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
(unsigned long long)req->i.sector,
req->i.size >> 9,
- bdevname(device->ldev->backing_bdev, b));
+ device->ldev->backing_bdev);
}
/* Helper for HANDED_OVER_TO_NETWORK.
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index af3051dd8912..0bb1a900c2d5 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -405,7 +405,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
spin_unlock_irq(&device->resource->req_lock);
atomic_add(size >> 9, &device->rs_sect_ev);
- if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ,
DRBD_FAULT_RS_RD) == 0)
return 0;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 015841f50f4e..ccad3d7b3ddd 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2859,7 +2859,7 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req),
- (unsigned long long) current_req->cmd_flags))
+ (__force unsigned long long) current_req->cmd_flags))
return BLK_STS_IOERR;
if (test_and_set_bit(0, &fdc_busy)) {
@@ -4557,7 +4557,7 @@ out:
return;
cleanup_disk:
- blk_cleanup_disk(disks[drive][type]);
+ put_disk(disks[drive][type]);
disks[drive][type] = NULL;
mutex_unlock(&floppy_probe_lock);
}
@@ -4753,7 +4753,7 @@ out_put_disk:
if (!disks[drive][0])
break;
del_timer_sync(&motor_off_timer[drive]);
- blk_cleanup_disk(disks[drive][0]);
+ put_disk(disks[drive][0]);
blk_mq_free_tag_set(&tag_sets[drive]);
}
return err;
@@ -4985,7 +4985,7 @@ static void __exit floppy_module_exit(void)
}
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
- blk_cleanup_disk(disks[drive][i]);
+ put_disk(disks[drive][i]);
}
blk_mq_free_tag_set(&tag_sets[drive]);
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 084f9b8a0ba3..e3c0ba93c1a3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -2040,7 +2040,7 @@ static int loop_add(int i)
return i;
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
@@ -2057,7 +2057,6 @@ static void loop_remove(struct loop_device *lo)
{
/* Make this loop device unreachable from pathname. */
del_gendisk(lo->lo_disk);
- blk_cleanup_queue(lo->lo_disk->queue);
blk_mq_free_tag_set(&lo->tag_set);
mutex_lock(&loop_ctl_mutex);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 27386a572ba4..562725d222a7 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -94,17 +94,12 @@
/* Device instance number, incremented each time a device is probed. */
static int instance;
-static LIST_HEAD(online_list);
-static LIST_HEAD(removing_list);
-static DEFINE_SPINLOCK(dev_lock);
-
/*
* Global variable used to hold the major block device number
* allocated in mtip_init().
*/
static int mtip_major;
static struct dentry *dfs_parent;
-static struct dentry *dfs_device_status;
static u32 cpu_use[NR_CPUS];
@@ -146,11 +141,8 @@ static bool mtip_check_surprise_removal(struct driver_data *dd)
pci_read_config_word(dd->pdev, 0x00, &vendor_id);
if (vendor_id == 0xFFFF) {
dd->sr = true;
- if (dd->queue)
- blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue);
- else
- dev_warn(&dd->pdev->dev,
- "%s: dd->queue is NULL\n", __func__);
+ if (dd->disk)
+ blk_mark_disk_dead(dd->disk);
return true; /* device removed */
}
@@ -2170,106 +2162,6 @@ static const struct attribute_group *mtip_disk_attr_groups[] = {
NULL,
};
-/* debugsfs entries */
-
-static ssize_t show_device_status(struct device_driver *drv, char *buf)
-{
- int size = 0;
- struct driver_data *dd, *tmp;
- unsigned long flags;
- char id_buf[42];
- u16 status = 0;
-
- spin_lock_irqsave(&dev_lock, flags);
- size += sprintf(&buf[size], "Devices Present:\n");
- list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
- if (dd->pdev) {
- if (dd->port &&
- dd->port->identify &&
- dd->port->identify_valid) {
- strlcpy(id_buf,
- (char *) (dd->port->identify + 10), 21);
- status = *(dd->port->identify + 141);
- } else {
- memset(id_buf, 0, 42);
- status = 0;
- }
-
- if (dd->port &&
- test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
- size += sprintf(&buf[size],
- " device %s %s (ftl rebuild %d %%)\n",
- dev_name(&dd->pdev->dev),
- id_buf,
- status);
- } else {
- size += sprintf(&buf[size],
- " device %s %s\n",
- dev_name(&dd->pdev->dev),
- id_buf);
- }
- }
- }
-
- size += sprintf(&buf[size], "Devices Being Removed:\n");
- list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
- if (dd->pdev) {
- if (dd->port &&
- dd->port->identify &&
- dd->port->identify_valid) {
- strlcpy(id_buf,
- (char *) (dd->port->identify+10), 21);
- status = *(dd->port->identify + 141);
- } else {
- memset(id_buf, 0, 42);
- status = 0;
- }
-
- if (dd->port &&
- test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
- size += sprintf(&buf[size],
- " device %s %s (ftl rebuild %d %%)\n",
- dev_name(&dd->pdev->dev),
- id_buf,
- status);
- } else {
- size += sprintf(&buf[size],
- " device %s %s\n",
- dev_name(&dd->pdev->dev),
- id_buf);
- }
- }
- }
- spin_unlock_irqrestore(&dev_lock, flags);
-
- return size;
-}
-
-static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
- size_t len, loff_t *offset)
-{
- int size = *offset;
- char *buf;
- int rv = 0;
-
- if (!len || *offset)
- return 0;
-
- buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- size += show_device_status(NULL, buf);
-
- *offset = size <= len ? size : len;
- size = copy_to_user(ubuf, buf, *offset);
- if (size)
- rv = -EFAULT;
-
- kfree(buf);
- return rv ? rv : *offset;
-}
-
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
@@ -2363,13 +2255,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
return rv ? rv : *offset;
}
-static const struct file_operations mtip_device_status_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = mtip_hw_read_device_status,
- .llseek = no_llseek,
-};
-
static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -2556,7 +2441,7 @@ static void mtip_softirq_done_fn(struct request *rq)
blk_mq_end_request(rq, cmd->status);
}
-static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
+static bool mtip_abort_cmd(struct request *req, void *data)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
struct driver_data *dd = data;
@@ -2569,7 +2454,7 @@ static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
return true;
}
-static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
+static bool mtip_queue_cmd(struct request *req, void *data)
{
struct driver_data *dd = data;
@@ -3297,26 +3182,12 @@ static int mtip_block_getgeo(struct block_device *dev,
return 0;
}
-static int mtip_block_open(struct block_device *dev, fmode_t mode)
+static void mtip_block_free_disk(struct gendisk *disk)
{
- struct driver_data *dd;
+ struct driver_data *dd = disk->private_data;
- if (dev && dev->bd_disk) {
- dd = (struct driver_data *) dev->bd_disk->private_data;
-
- if (dd) {
- if (test_bit(MTIP_DDF_REMOVAL_BIT,
- &dd->dd_flag)) {
- return -ENODEV;
- }
- return 0;
- }
- }
- return -ENODEV;
-}
-
-static void mtip_block_release(struct gendisk *disk, fmode_t mode)
-{
+ ida_free(&rssd_index_ida, dd->index);
+ kfree(dd);
}
/*
@@ -3326,13 +3197,12 @@ static void mtip_block_release(struct gendisk *disk, fmode_t mode)
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
- .open = mtip_block_open,
- .release = mtip_block_release,
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
#endif
.getgeo = mtip_block_getgeo,
+ .free_disk = mtip_block_free_disk,
.owner = THIS_MODULE
};
@@ -3487,12 +3357,11 @@ static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
return 0;
}
-static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
- bool reserved)
+static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req)
{
struct driver_data *dd = req->q->queuedata;
- if (reserved) {
+ if (blk_mq_is_reserved_rq(req)) {
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
cmd->status = BLK_STS_TIMEOUT;
@@ -3664,7 +3533,7 @@ init_hw_cmds_error:
disk_index_error:
ida_free(&rssd_index_ida, index);
ida_get_error:
- blk_cleanup_disk(dd->disk);
+ put_disk(dd->disk);
block_queue_alloc_init_error:
blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_tag_error:
@@ -3673,72 +3542,6 @@ protocol_init_error:
return rv;
}
-static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
-{
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
- cmd->status = BLK_STS_IOERR;
- blk_mq_complete_request(rq);
- return true;
-}
-
-/*
- * Block layer deinitialization function.
- *
- * Called by the PCI layer as each P320 device is removed.
- *
- * @dd Pointer to the driver data structure.
- *
- * return value
- * 0
- */
-static int mtip_block_remove(struct driver_data *dd)
-{
- mtip_hw_debugfs_exit(dd);
-
- if (dd->mtip_svc_handler) {
- set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
- wake_up_interruptible(&dd->port->svc_wait);
- kthread_stop(dd->mtip_svc_handler);
- }
-
- if (!dd->sr) {
- /*
- * Explicitly wait here for IOs to quiesce,
- * as mtip_standby_drive usually won't wait for IOs.
- */
- if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
- mtip_standby_drive(dd);
- }
- else
- dev_info(&dd->pdev->dev, "device %s surprise removal\n",
- dd->disk->disk_name);
-
- blk_freeze_queue_start(dd->queue);
- blk_mq_quiesce_queue(dd->queue);
- blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
- blk_mq_unquiesce_queue(dd->queue);
-
- if (dd->disk) {
- if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
- del_gendisk(dd->disk);
- if (dd->disk->queue) {
- blk_cleanup_queue(dd->queue);
- blk_mq_free_tag_set(&dd->tags);
- dd->queue = NULL;
- }
- put_disk(dd->disk);
- }
- dd->disk = NULL;
-
- ida_free(&rssd_index_ida, dd->index);
-
- /* De-initialize the protocol layer. */
- mtip_hw_exit(dd);
-
- return 0;
-}
-
/*
* Function called by the PCI layer when just before the
* machine shuts down.
@@ -3755,23 +3558,14 @@ static int mtip_block_shutdown(struct driver_data *dd)
{
mtip_hw_shutdown(dd);
- /* Delete our gendisk structure, and cleanup the blk queue. */
- if (dd->disk) {
- dev_info(&dd->pdev->dev,
- "Shutting down %s ...\n", dd->disk->disk_name);
+ dev_info(&dd->pdev->dev,
+ "Shutting down %s ...\n", dd->disk->disk_name);
- if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
- del_gendisk(dd->disk);
- if (dd->disk->queue) {
- blk_cleanup_queue(dd->queue);
- blk_mq_free_tag_set(&dd->tags);
- }
- put_disk(dd->disk);
- dd->disk = NULL;
- dd->queue = NULL;
- }
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
- ida_free(&rssd_index_ida, dd->index);
+ blk_mq_free_tag_set(&dd->tags);
+ put_disk(dd->disk);
return 0;
}
@@ -3905,7 +3699,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
const struct cpumask *node_mask;
int cpu, i = 0, j = 0;
int my_node = NUMA_NO_NODE;
- unsigned long flags;
/* Allocate memory for this devices private data. */
my_node = pcibus_to_node(pdev->bus);
@@ -3952,9 +3745,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
dd->pdev = pdev;
dd->numa_node = my_node;
- INIT_LIST_HEAD(&dd->online_list);
- INIT_LIST_HEAD(&dd->remove_list);
-
memset(dd->workq_name, 0, 32);
snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
@@ -4047,11 +3837,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
else
rv = 0; /* device in rebuild state, return 0 from probe */
- /* Add to online list even if in ftl rebuild */
- spin_lock_irqsave(&dev_lock, flags);
- list_add(&dd->online_list, &online_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
goto done;
block_initialize_err:
@@ -4085,14 +3870,7 @@ done:
static void mtip_pci_remove(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
- unsigned long flags, to;
-
- set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
-
- spin_lock_irqsave(&dev_lock, flags);
- list_del_init(&dd->online_list);
- list_add(&dd->remove_list, &removing_list);
- spin_unlock_irqrestore(&dev_lock, flags);
+ unsigned long to;
mtip_check_surprise_removal(dd);
synchronize_irq(dd->pdev->irq);
@@ -4109,11 +3887,35 @@ static void mtip_pci_remove(struct pci_dev *pdev)
"Completion workers still active!\n");
}
- blk_mark_disk_dead(dd->disk);
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
- /* Clean up the block layer. */
- mtip_block_remove(dd);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
+
+ mtip_hw_debugfs_exit(dd);
+
+ if (dd->mtip_svc_handler) {
+ set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
+ wake_up_interruptible(&dd->port->svc_wait);
+ kthread_stop(dd->mtip_svc_handler);
+ }
+
+ if (!dd->sr) {
+ /*
+ * Explicitly wait here for IOs to quiesce,
+ * as mtip_standby_drive usually won't wait for IOs.
+ */
+ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
+ mtip_standby_drive(dd);
+ }
+ else
+ dev_info(&dd->pdev->dev, "device %s surprise removal\n",
+ dd->disk->disk_name);
+
+ blk_mq_free_tag_set(&dd->tags);
+
+ /* De-initialize the protocol layer. */
+ mtip_hw_exit(dd);
if (dd->isr_workq) {
destroy_workqueue(dd->isr_workq);
@@ -4124,14 +3926,10 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
- spin_lock_irqsave(&dev_lock, flags);
- list_del_init(&dd->remove_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
- kfree(dd);
-
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
+
+ put_disk(dd->disk);
}
/*
@@ -4250,15 +4048,6 @@ static int __init mtip_init(void)
pr_warn("Error creating debugfs parent\n");
dfs_parent = NULL;
}
- if (dfs_parent) {
- dfs_device_status = debugfs_create_file("device_status",
- 0444, dfs_parent, NULL,
- &mtip_device_status_fops);
- if (IS_ERR_OR_NULL(dfs_device_status)) {
- pr_err("Error creating device_status node\n");
- dfs_device_status = NULL;
- }
- }
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 6816beb45352..f7328f19ac5c 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -149,7 +149,6 @@ enum {
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
- MTIP_DDF_REMOVAL_BIT = 9,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
(1 << MTIP_DDF_SEC_LOCK_BIT) |
@@ -462,10 +461,6 @@ struct driver_data {
int isr_binding;
- struct list_head online_list; /* linkage for online list */
-
- struct list_head remove_list; /* linkage for removing list */
-
int unal_qdepth; /* qdepth of unaligned IO queue */
};
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index e094d2b8b5a9..d914156db2d8 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -157,7 +157,7 @@ static int __init n64cart_probe(struct platform_device *pdev)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out:
return err;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 07f3c139a3d7..f5d098a148cb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -250,7 +250,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
struct gendisk *disk = nbd->disk;
del_gendisk(disk);
- blk_cleanup_disk(disk);
+ put_disk(disk);
blk_mq_free_tag_set(&nbd->tag_set);
/*
@@ -393,8 +393,7 @@ static u32 req_to_nbd_cmd_type(struct request *req)
}
}
-static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
- bool reserved)
+static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
struct nbd_device *nbd = cmd->nbd;
@@ -880,7 +879,7 @@ static void recv_work(struct work_struct *work)
kfree(args);
}
-static bool nbd_clear_req(struct request *req, void *data, bool reserved)
+static bool nbd_clear_req(struct request *req, void *data)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -1833,7 +1832,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
out_free_work:
destroy_workqueue(nbd->recv_workq);
out_err_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_free_idr:
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, index);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 6b67088f4ea7..8b224ede2e33 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1310,7 +1310,7 @@ static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
}
static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
- enum req_opf op,
+ enum req_op op,
sector_t sector,
sector_t nr_sectors)
{
@@ -1381,9 +1381,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
}
}
-blk_status_t null_process_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- unsigned int nr_sectors)
+blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
blk_status_t ret;
@@ -1401,7 +1400,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd,
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
- sector_t nr_sectors, enum req_opf op)
+ sector_t nr_sectors, enum req_op op)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
@@ -1578,7 +1577,7 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return nr;
}
-static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
@@ -1737,7 +1736,7 @@ static void null_del_dev(struct nullb *nullb)
null_restart_queue_async(nullb);
}
- blk_cleanup_disk(nullb->disk);
+ put_disk(nullb->disk);
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
@@ -2082,7 +2081,7 @@ static int null_add_dev(struct nullb_device *dev)
out_cleanup_zone:
null_free_zoned_dev(dev);
out_cleanup_disk:
- blk_cleanup_disk(nullb->disk);
+ put_disk(nullb->disk);
out_cleanup_tags:
if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 8359b43842f2..6fbf0a1b2622 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -136,9 +136,8 @@ struct nullb {
blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
sector_t nr_sectors);
-blk_status_t null_process_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- unsigned int nr_sectors);
+blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, unsigned int nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
@@ -146,9 +145,8 @@ int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
-blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- sector_t nr_sectors);
+blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len);
#else
@@ -164,7 +162,7 @@ static inline int null_register_zoned_dev(struct nullb *nullb)
}
static inline void null_free_zoned_dev(struct nullb_device *dev) {}
static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector, sector_t nr_sectors)
+ enum req_op op, sector_t sector, sector_t nr_sectors)
{
return BLK_STS_NOTSUPP;
}
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index 86d6c12c603c..6b2b370e786f 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -36,7 +36,7 @@ TRACE_EVENT(nullb_zone_op,
TP_ARGS(cmd, zone_no, zone_cond),
TP_STRUCT__entry(
__array(char, disk, DISK_NAME_LEN)
- __field(enum req_opf, op)
+ __field(enum req_op, op)
__field(unsigned int, zone_no)
__field(unsigned int, zone_cond)
),
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 2fdd7b20c224..55a69e48ef8b 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -159,7 +159,7 @@ int null_register_zoned_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
+ disk_set_zoned(nullb->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
@@ -170,12 +170,12 @@ int null_register_zoned_dev(struct nullb *nullb)
return ret;
} else {
blk_queue_chunk_sectors(q, dev->zone_size_sects);
- q->nr_zones = blkdev_nr_zones(nullb->disk);
+ nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
}
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
- blk_queue_max_open_zones(q, dev->zone_max_open);
- blk_queue_max_active_zones(q, dev->zone_max_active);
+ disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
+ disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
return 0;
}
@@ -600,7 +600,7 @@ static blk_status_t null_reset_zone(struct nullb_device *dev,
return BLK_STS_OK;
}
-static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
+static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
@@ -653,7 +653,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
return ret;
}
-blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
+blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, sector_t nr_sectors)
{
struct nullb_device *dev;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index f462ad67931a..a5ab40784119 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -956,7 +956,7 @@ out_unreg_cdrom:
out_pi_release:
pi_release(cd->pi);
out_free_disk:
- blk_cleanup_disk(cd->disk);
+ put_disk(cd->disk);
out_free_tag_set:
blk_mq_free_tag_set(&cd->tag_set);
return ret;
@@ -1029,7 +1029,7 @@ static void __exit pcd_exit(void)
unregister_cdrom(&cd->info);
del_gendisk(cd->disk);
pi_release(cd->pi);
- blk_cleanup_disk(cd->disk);
+ put_disk(cd->disk);
blk_mq_free_tag_set(&cd->tag_set);
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 3637c38c72f9..f8a75bc90f70 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -501,6 +501,8 @@ static enum action do_pd_io_start(void)
return do_pd_read_start();
else
return do_pd_write_start();
+ default:
+ break;
}
return Fail;
}
@@ -943,7 +945,7 @@ static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port,
goto cleanup_disk;
return 0;
cleanup_disk:
- blk_cleanup_disk(disk->gd);
+ put_disk(disk->gd);
put_disk:
put_disk(p);
disk->gd = NULL;
@@ -1018,7 +1020,7 @@ static void __exit pd_exit(void)
if (p) {
disk->gd = NULL;
del_gendisk(p);
- blk_cleanup_disk(p);
+ put_disk(p);
blk_mq_free_tag_set(&disk->tag_set);
pi_release(disk->pi);
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 292e9a4ce1b9..eec1b9fde245 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -975,7 +975,7 @@ static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
out_pi_release:
pi_release(pf->pi);
out_free_disk:
- blk_cleanup_disk(pf->disk);
+ put_disk(pf->disk);
out_free_tag_set:
blk_mq_free_tag_set(&pf->tag_set);
return ret;
@@ -1044,7 +1044,7 @@ static void __exit pf_exit(void)
if (!pf->present)
continue;
del_gendisk(pf->disk);
- blk_cleanup_disk(pf->disk);
+ put_disk(pf->disk);
blk_mq_free_tag_set(&pf->tag_set);
pi_release(pf->pi);
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 789093375344..01a15dbd9cde 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2460,11 +2460,9 @@ static int pkt_seq_show(struct seq_file *m, void *p)
{
struct pktcdvd_device *pd = m->private;
char *msg;
- char bdev_buf[BDEVNAME_SIZE];
int states[PACKET_NUM_STATES];
- seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
- bdevname(pd->bdev, bdev_buf));
+ seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
@@ -2521,7 +2519,6 @@ static int pkt_seq_show(struct seq_file *m, void *p)
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
int i;
- char b[BDEVNAME_SIZE];
struct block_device *bdev;
struct scsi_device *sdev;
@@ -2534,8 +2531,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (!pd2)
continue;
if (pd2->bdev->bd_dev == dev) {
- pkt_err(pd, "%s already setup\n",
- bdevname(pd2->bdev, b));
+ pkt_err(pd, "%pg already setup\n", pd2->bdev);
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
@@ -2570,7 +2566,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
- pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
+ pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
return 0;
out_mem:
@@ -2733,7 +2729,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
return 0;
out_mem2:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_mem:
mempool_exit(&pd->rb_pool);
kfree(pd);
@@ -2783,7 +2779,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_dbg(1, pd, "writer unmapped\n");
del_gendisk(pd->disk);
- blk_cleanup_disk(pd->disk);
+ put_disk(pd->disk);
mempool_exit(&pd->rb_pool);
kfree(pd);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 3054adf77460..36d7b36c60c7 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -473,7 +473,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
return 0;
fail_cleanup_disk:
- blk_cleanup_disk(gendisk);
+ put_disk(gendisk);
fail_free_tag_set:
blk_mq_free_tag_set(&priv->tag_set);
fail_teardown:
@@ -500,7 +500,7 @@ static void ps3disk_remove(struct ps3_system_bus_device *_dev)
&ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
- blk_cleanup_disk(priv->gendisk);
+ put_disk(priv->gendisk);
blk_mq_free_tag_set(&priv->tag_set);
dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
ps3disk_sync_cache(dev);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 4f90819e245e..d1e0fefec90b 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -761,7 +761,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(gendisk);
+ put_disk(gendisk);
out_cache_cleanup:
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
@@ -792,7 +792,7 @@ static void ps3vram_remove(struct ps3_system_bus_device *dev)
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
del_gendisk(priv->gendisk);
- blk_cleanup_disk(priv->gendisk);
+ put_disk(priv->gendisk);
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
iounmap(priv->reports);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index ef9bc62e9afd..0d8ec2fe5740 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4729,7 +4729,7 @@ static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
- blk_cleanup_disk(rbd_dev->disk);
+ put_disk(rbd_dev->disk);
blk_mq_free_tag_set(&rbd_dev->tag_set);
rbd_dev->disk = NULL;
}
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 409c76b81aed..b8d9e2824d9c 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1408,7 +1408,7 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
return err;
}
@@ -1630,7 +1630,7 @@ put_sess:
static void destroy_gen_disk(struct rnbd_clt_dev *dev)
{
del_gendisk(dev->gd);
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
}
static void destroy_sysfs(struct rnbd_clt_dev *dev,
@@ -1755,7 +1755,7 @@ static void rnbd_destroy_sessions(void)
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
* Here unmap happens in parallel for only one reason:
- * blk_cleanup_queue() takes around half a second, so
+ * del_gendisk() takes around half a second, so
* on huge amount of devices the whole module unload
* procedure takes minutes.
*/
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index bfb08dd434d1..ea7ac8bca63c 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -229,9 +229,9 @@ static inline bool rnbd_flags_supported(u32 flags)
return true;
}
-static inline u32 rnbd_to_bio_flags(u32 rnbd_opf)
+static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
{
- u32 bio_opf;
+ blk_opf_t bio_opf;
switch (rnbd_op(rnbd_opf)) {
case RNBD_OP_READ:
@@ -286,7 +286,8 @@ static inline u32 rq_to_rnbd_flags(struct request *rq)
break;
default:
WARN(1, "Unknown request type %d (flags %llu)\n",
- req_op(rq), (unsigned long long)rq->cmd_flags);
+ (__force u32)req_op(rq),
+ (__force unsigned long long)rq->cmd_flags);
rnbd_opf = 0;
}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
index c5d0a0391165..c63017f6e421 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.c
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -28,7 +28,6 @@ struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags)
goto err;
dev->blk_open_flags = flags;
- bdevname(dev->bdev, dev->name);
return dev;
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
index 4309e5252469..8407d12f70af 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -15,7 +15,6 @@
struct rnbd_dev {
struct block_device *bdev;
fmode_t blk_open_flags;
- char name[BDEVNAME_SIZE];
};
/**
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
index feaa76c5a342..297a6924ff4e 100644
--- a/drivers/block/rnbd/rnbd-srv-sysfs.c
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -38,14 +38,13 @@ static struct kobj_type dev_ktype = {
};
int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
- struct block_device *bdev,
- const char *dev_name)
+ struct block_device *bdev)
{
struct kobject *bdev_kobj;
int ret;
ret = kobject_init_and_add(&dev->dev_kobj, &dev_ktype,
- rnbd_devs_kobj, dev_name);
+ rnbd_devs_kobj, "%pg", bdev);
if (ret) {
kobject_put(&dev->dev_kobj);
return ret;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index beaef43a67b9..0713014bf423 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -419,7 +419,7 @@ static struct rnbd_srv_sess_dev
return sess_dev;
}
-static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id)
+static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(struct block_device *bdev)
{
struct rnbd_srv_dev *dev;
@@ -427,7 +427,7 @@ static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id)
if (!dev)
return ERR_PTR(-ENOMEM);
- strscpy(dev->id, id, sizeof(dev->id));
+ snprintf(dev->id, sizeof(dev->id), "%pg", bdev);
kref_init(&dev->kref);
INIT_LIST_HEAD(&dev->sess_dev_list);
mutex_init(&dev->lock);
@@ -512,7 +512,7 @@ rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
int ret;
struct rnbd_srv_dev *new_dev, *dev;
- new_dev = rnbd_srv_init_srv_dev(rnbd_dev->name);
+ new_dev = rnbd_srv_init_srv_dev(rnbd_dev->bdev);
if (IS_ERR(new_dev))
return new_dev;
@@ -758,8 +758,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
- ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev,
- rnbd_dev->name);
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev);
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index be2ae486d407..6926f9069dc4 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -68,8 +68,7 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev,
/* rnbd-srv-sysfs.c */
int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
- struct block_device *bdev,
- const char *dir_name);
+ struct block_device *bdev);
void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev);
int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index dd0a1a6fed29..fb855da971ee 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -886,7 +886,7 @@ static int probe_disk(struct vdc_port *port)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(g);
+ put_disk(g);
out_free_tag:
blk_mq_free_tag_set(&port->tag_set);
return err;
@@ -1070,7 +1070,7 @@ static void vdc_port_remove(struct vio_dev *vdev)
del_timer_sync(&port->vio.timer);
del_gendisk(port->disk);
- blk_cleanup_disk(port->disk);
+ put_disk(port->disk);
blk_mq_free_tag_set(&port->tag_set);
vdc_free_tx_ring(port);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index fef65a18d56f..42b4b6828690 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -783,7 +783,7 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs)
if (fs->registered)
del_gendisk(fs->disk);
- blk_cleanup_disk(disk);
+ put_disk(disk);
blk_mq_free_tag_set(&fs->tag_set);
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 6c39f2c9f806..da811a7da03f 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1238,7 +1238,7 @@ static int swim3_attach(struct macio_dev *mdev,
return 0;
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_free_tag_set:
blk_mq_free_tag_set(&fs->tag_set);
out_unregister:
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 63b4f6431d2e..0e1a484cab0b 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1377,7 +1377,7 @@ static void carm_free_disk(struct carm_host *host, unsigned int port_no)
if (host->state > HST_DEV_ACTIVATE)
del_gendisk(disk);
- blk_cleanup_disk(disk);
+ put_disk(disk);
}
static int carm_init_shm(struct carm_host *host)
@@ -1536,7 +1536,7 @@ err_out_free_majors:
clear_bit(0, &carm_major_alloc);
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
- blk_cleanup_queue(host->oob_q);
+ blk_mq_destroy_queue(host->oob_q);
blk_mq_free_tag_set(&host->tag_set);
err_out_dma_free:
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
@@ -1570,7 +1570,7 @@ static void carm_remove_one (struct pci_dev *pdev)
clear_bit(0, &carm_major_alloc);
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
- blk_cleanup_queue(host->oob_q);
+ blk_mq_destroy_queue(host->oob_q);
blk_mq_free_tag_set(&host->tag_set);
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
iounmap(host->mmio);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
new file mode 100644
index 000000000000..3f1906965ac8
--- /dev/null
+++ b/drivers/block/ublk_drv.c
@@ -0,0 +1,1545 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Userspace block device - block device which IO is handled from userspace
+ *
+ * Take full use of io_uring passthrough command for communicating with
+ * ublk userspace daemon(ublksrvd) for handling basic IO request.
+ *
+ * Copyright 2022 Ming Lei <ming.lei@redhat.com>
+ *
+ * (part of code stolen from loop.c)
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/major.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/mutex.h>
+#include <linux/writeback.h>
+#include <linux/completion.h>
+#include <linux/highmem.h>
+#include <linux/sysfs.h>
+#include <linux/miscdevice.h>
+#include <linux/falloc.h>
+#include <linux/uio.h>
+#include <linux/ioprio.h>
+#include <linux/sched/mm.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/io_uring.h>
+#include <linux/blk-mq.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <linux/task_work.h>
+#include <uapi/linux/ublk_cmd.h>
+
+#define UBLK_MINORS (1U << MINORBITS)
+
+/* All UBLK_F_* have to be included into UBLK_F_ALL */
+#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_URING_CMD_COMP_IN_TASK)
+
+struct ublk_rq_data {
+ struct callback_head work;
+};
+
+struct ublk_uring_cmd_pdu {
+ struct request *req;
+};
+
+/*
+ * io command is active: sqe cmd is received, and its cqe isn't done
+ *
+ * If the flag is set, the io command is owned by ublk driver, and waited
+ * for incoming blk-mq request from the ublk block device.
+ *
+ * If the flag is cleared, the io command will be completed, and owned by
+ * ublk server.
+ */
+#define UBLK_IO_FLAG_ACTIVE 0x01
+
+/*
+ * IO command is completed via cqe, and it is being handled by ublksrv, and
+ * not committed yet
+ *
+ * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
+ * cross verification
+ */
+#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
+
+/*
+ * IO command is aborted, so this flag is set in case of
+ * !UBLK_IO_FLAG_ACTIVE.
+ *
+ * After this flag is observed, any pending or new incoming request
+ * associated with this io command will be failed immediately
+ */
+#define UBLK_IO_FLAG_ABORTED 0x04
+
+struct ublk_io {
+ /* userspace buffer address from io cmd */
+ __u64 addr;
+ unsigned int flags;
+ int res;
+
+ struct io_uring_cmd *cmd;
+};
+
+struct ublk_queue {
+ int q_id;
+ int q_depth;
+
+ unsigned long flags;
+ struct task_struct *ubq_daemon;
+ char *io_cmd_buf;
+
+ unsigned long io_addr; /* mapped vm address */
+ unsigned int max_io_sz;
+ bool abort_work_pending;
+ unsigned short nr_io_ready; /* how many ios setup */
+ struct ublk_device *dev;
+ struct ublk_io ios[0];
+};
+
+#define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
+
+struct ublk_device {
+ struct gendisk *ub_disk;
+
+ char *__queues;
+
+ unsigned short queue_size;
+ unsigned short bs_shift;
+ struct ublksrv_ctrl_dev_info dev_info;
+
+ struct blk_mq_tag_set tag_set;
+
+ struct cdev cdev;
+ struct device cdev_dev;
+
+#define UB_STATE_OPEN 0
+#define UB_STATE_USED 1
+ unsigned long state;
+ int ub_number;
+
+ struct mutex mutex;
+
+ spinlock_t mm_lock;
+ struct mm_struct *mm;
+
+ struct completion completion;
+ unsigned int nr_queues_ready;
+ atomic_t nr_aborted_queues;
+
+ /*
+ * Our ubq->daemon may be killed without any notification, so
+ * monitor each queue's daemon periodically
+ */
+ struct delayed_work monitor_work;
+ struct work_struct stop_work;
+};
+
+static dev_t ublk_chr_devt;
+static struct class *ublk_chr_class;
+
+static DEFINE_IDR(ublk_index_idr);
+static DEFINE_SPINLOCK(ublk_idr_lock);
+static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
+
+static DEFINE_MUTEX(ublk_ctl_mutex);
+
+static struct miscdevice ublk_misc;
+
+static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
+{
+ if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
+ !(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK))
+ return true;
+ return false;
+}
+
+static struct ublk_device *ublk_get_device(struct ublk_device *ub)
+{
+ if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
+ return ub;
+ return NULL;
+}
+
+static void ublk_put_device(struct ublk_device *ub)
+{
+ put_device(&ub->cdev_dev);
+}
+
+static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
+ int qid)
+{
+ return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
+}
+
+static inline bool ublk_rq_has_data(const struct request *rq)
+{
+ return rq->bio && bio_has_data(rq->bio);
+}
+
+static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
+ int tag)
+{
+ return (struct ublksrv_io_desc *)
+ &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
+}
+
+static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
+{
+ return ublk_get_queue(ub, q_id)->io_cmd_buf;
+}
+
+static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+{
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+
+ return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
+ PAGE_SIZE);
+}
+
+static void ublk_free_disk(struct gendisk *disk)
+{
+ struct ublk_device *ub = disk->private_data;
+
+ clear_bit(UB_STATE_USED, &ub->state);
+ put_device(&ub->cdev_dev);
+}
+
+static const struct block_device_operations ub_fops = {
+ .owner = THIS_MODULE,
+ .free_disk = ublk_free_disk,
+};
+
+#define UBLK_MAX_PIN_PAGES 32
+
+struct ublk_map_data {
+ const struct ublk_queue *ubq;
+ const struct request *rq;
+ const struct ublk_io *io;
+ unsigned max_bytes;
+};
+
+struct ublk_io_iter {
+ struct page *pages[UBLK_MAX_PIN_PAGES];
+ unsigned pg_off; /* offset in the 1st page in pages */
+ int nr_pages; /* how many page pointers in pages */
+ struct bio *bio;
+ struct bvec_iter iter;
+};
+
+static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
+ unsigned max_bytes, bool to_vm)
+{
+ const unsigned total = min_t(unsigned, max_bytes,
+ PAGE_SIZE - data->pg_off +
+ ((data->nr_pages - 1) << PAGE_SHIFT));
+ unsigned done = 0;
+ unsigned pg_idx = 0;
+
+ while (done < total) {
+ struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
+ const unsigned int bytes = min3(bv.bv_len, total - done,
+ (unsigned)(PAGE_SIZE - data->pg_off));
+ void *bv_buf = bvec_kmap_local(&bv);
+ void *pg_buf = kmap_local_page(data->pages[pg_idx]);
+
+ if (to_vm)
+ memcpy(pg_buf + data->pg_off, bv_buf, bytes);
+ else
+ memcpy(bv_buf, pg_buf + data->pg_off, bytes);
+
+ kunmap_local(pg_buf);
+ kunmap_local(bv_buf);
+
+ /* advance page array */
+ data->pg_off += bytes;
+ if (data->pg_off == PAGE_SIZE) {
+ pg_idx += 1;
+ data->pg_off = 0;
+ }
+
+ done += bytes;
+
+ /* advance bio */
+ bio_advance_iter_single(data->bio, &data->iter, bytes);
+ if (!data->iter.bi_size) {
+ data->bio = data->bio->bi_next;
+ if (data->bio == NULL)
+ break;
+ data->iter = data->bio->bi_iter;
+ }
+ }
+
+ return done;
+}
+
+static inline int ublk_copy_user_pages(struct ublk_map_data *data,
+ bool to_vm)
+{
+ const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
+ const unsigned long start_vm = data->io->addr;
+ unsigned int done = 0;
+ struct ublk_io_iter iter = {
+ .pg_off = start_vm & (PAGE_SIZE - 1),
+ .bio = data->rq->bio,
+ .iter = data->rq->bio->bi_iter,
+ };
+ const unsigned int nr_pages = round_up(data->max_bytes +
+ (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
+
+ while (done < nr_pages) {
+ const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES,
+ nr_pages - done);
+ unsigned i, len;
+
+ iter.nr_pages = get_user_pages_fast(start_vm +
+ (done << PAGE_SHIFT), to_pin, gup_flags,
+ iter.pages);
+ if (iter.nr_pages <= 0)
+ return done == 0 ? iter.nr_pages : done;
+ len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
+ for (i = 0; i < iter.nr_pages; i++) {
+ if (to_vm)
+ set_page_dirty(iter.pages[i]);
+ put_page(iter.pages[i]);
+ }
+ data->max_bytes -= len;
+ done += iter.nr_pages;
+ }
+
+ return done;
+}
+
+static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
+ struct ublk_io *io)
+{
+ const unsigned int rq_bytes = blk_rq_bytes(req);
+ /*
+ * no zero copy, we delay copy WRITE request data into ublksrv
+ * context and the big benefit is that pinning pages in current
+ * context is pretty fast, see ublk_pin_user_pages
+ */
+ if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH)
+ return rq_bytes;
+
+ if (ublk_rq_has_data(req)) {
+ struct ublk_map_data data = {
+ .ubq = ubq,
+ .rq = req,
+ .io = io,
+ .max_bytes = rq_bytes,
+ };
+
+ ublk_copy_user_pages(&data, true);
+
+ return rq_bytes - data.max_bytes;
+ }
+ return rq_bytes;
+}
+
+static int ublk_unmap_io(const struct ublk_queue *ubq,
+ const struct request *req,
+ struct ublk_io *io)
+{
+ const unsigned int rq_bytes = blk_rq_bytes(req);
+
+ if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) {
+ struct ublk_map_data data = {
+ .ubq = ubq,
+ .rq = req,
+ .io = io,
+ .max_bytes = io->res,
+ };
+
+ WARN_ON_ONCE(io->res > rq_bytes);
+
+ ublk_copy_user_pages(&data, false);
+
+ return io->res - data.max_bytes;
+ }
+ return rq_bytes;
+}
+
+static inline unsigned int ublk_req_build_flags(struct request *req)
+{
+ unsigned flags = 0;
+
+ if (req->cmd_flags & REQ_FAILFAST_DEV)
+ flags |= UBLK_IO_F_FAILFAST_DEV;
+
+ if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
+ flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
+
+ if (req->cmd_flags & REQ_FAILFAST_DRIVER)
+ flags |= UBLK_IO_F_FAILFAST_DRIVER;
+
+ if (req->cmd_flags & REQ_META)
+ flags |= UBLK_IO_F_META;
+
+ if (req->cmd_flags & REQ_FUA)
+ flags |= UBLK_IO_F_FUA;
+
+ if (req->cmd_flags & REQ_NOUNMAP)
+ flags |= UBLK_IO_F_NOUNMAP;
+
+ if (req->cmd_flags & REQ_SWAP)
+ flags |= UBLK_IO_F_SWAP;
+
+ return flags;
+}
+
+static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
+{
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
+ struct ublk_io *io = &ubq->ios[req->tag];
+ u32 ublk_op;
+
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ ublk_op = UBLK_IO_OP_READ;
+ break;
+ case REQ_OP_WRITE:
+ ublk_op = UBLK_IO_OP_WRITE;
+ break;
+ case REQ_OP_FLUSH:
+ ublk_op = UBLK_IO_OP_FLUSH;
+ break;
+ case REQ_OP_DISCARD:
+ ublk_op = UBLK_IO_OP_DISCARD;
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ ublk_op = UBLK_IO_OP_WRITE_ZEROES;
+ break;
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ /* need to translate since kernel may change */
+ iod->op_flags = ublk_op | ublk_req_build_flags(req);
+ iod->nr_sectors = blk_rq_sectors(req);
+ iod->start_sector = blk_rq_pos(req);
+ iod->addr = io->addr;
+
+ return BLK_STS_OK;
+}
+
+static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
+ struct io_uring_cmd *ioucmd)
+{
+ return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
+}
+
+static bool ubq_daemon_is_dying(struct ublk_queue *ubq)
+{
+ return ubq->ubq_daemon->flags & PF_EXITING;
+}
+
+/* todo: handle partial completion */
+static void ublk_complete_rq(struct request *req)
+{
+ struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ struct ublk_io *io = &ubq->ios[req->tag];
+ unsigned int unmapped_bytes;
+
+ /* failed read IO if nothing is read */
+ if (!io->res && req_op(req) == REQ_OP_READ)
+ io->res = -EIO;
+
+ if (io->res < 0) {
+ blk_mq_end_request(req, errno_to_blk_status(io->res));
+ return;
+ }
+
+ /*
+ * FLUSH or DISCARD usually won't return bytes returned, so end them
+ * directly.
+ *
+ * Both the two needn't unmap.
+ */
+ if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) {
+ blk_mq_end_request(req, BLK_STS_OK);
+ return;
+ }
+
+ /* for READ request, writing data in iod->addr to rq buffers */
+ unmapped_bytes = ublk_unmap_io(ubq, req, io);
+
+ /*
+ * Extremely impossible since we got data filled in just before
+ *
+ * Re-read simply for this unlikely case.
+ */
+ if (unlikely(unmapped_bytes < io->res))
+ io->res = unmapped_bytes;
+
+ if (blk_update_request(req, BLK_STS_OK, io->res))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+}
+
+/*
+ * __ublk_fail_req() may be called from abort context or ->ubq_daemon
+ * context during exiting, so lock is required.
+ *
+ * Also aborting may not be started yet, keep in mind that one failed
+ * request may be issued by block layer again.
+ */
+static void __ublk_fail_req(struct ublk_io *io, struct request *req)
+{
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
+
+ if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
+ io->flags |= UBLK_IO_FLAG_ABORTED;
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ }
+}
+
+#define UBLK_REQUEUE_DELAY_MS 3
+
+static inline void __ublk_rq_task_work(struct request *req)
+{
+ struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ struct ublk_device *ub = ubq->dev;
+ int tag = req->tag;
+ struct ublk_io *io = &ubq->ios[tag];
+ bool task_exiting = current != ubq->ubq_daemon ||
+ (current->flags & PF_EXITING);
+ unsigned int mapped_bytes;
+
+ pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
+ __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+
+ if (unlikely(task_exiting)) {
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ mod_delayed_work(system_wq, &ub->monitor_work, 0);
+ return;
+ }
+
+ mapped_bytes = ublk_map_io(ubq, req, io);
+
+ /* partially mapped, update io descriptor */
+ if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
+ /*
+ * Nothing mapped, retry until we succeed.
+ *
+ * We may never succeed in mapping any bytes here because
+ * of OOM. TODO: reserve one buffer with single page pinned
+ * for providing forward progress guarantee.
+ */
+ if (unlikely(!mapped_bytes)) {
+ blk_mq_requeue_request(req, false);
+ blk_mq_delay_kick_requeue_list(req->q,
+ UBLK_REQUEUE_DELAY_MS);
+ return;
+ }
+
+ ublk_get_iod(ubq, req->tag)->nr_sectors =
+ mapped_bytes >> 9;
+ }
+
+ /* mark this cmd owned by ublksrv */
+ io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
+
+ /*
+ * clear ACTIVE since we are done with this sqe/cmd slot
+ * We can only accept io cmd in case of being not active.
+ */
+ io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+
+ /* tell ublksrv one io request is coming */
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_OK, 0);
+}
+
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ __ublk_rq_task_work(pdu->req);
+}
+
+static void ublk_rq_task_work_fn(struct callback_head *work)
+{
+ struct ublk_rq_data *data = container_of(work,
+ struct ublk_rq_data, work);
+ struct request *req = blk_mq_rq_from_pdu(data);
+
+ __ublk_rq_task_work(req);
+}
+
+static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+ struct request *rq = bd->rq;
+ blk_status_t res;
+
+ /* fill iod to slot in io cmd buffer */
+ res = ublk_setup_iod(ubq, rq);
+ if (unlikely(res != BLK_STS_OK))
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(bd->rq);
+
+ if (unlikely(ubq_daemon_is_dying(ubq))) {
+ fail:
+ mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
+ return BLK_STS_IOERR;
+ }
+
+ if (ublk_can_use_task_work(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
+ enum task_work_notify_mode notify_mode = bd->last ?
+ TWA_SIGNAL_NO_IPI : TWA_NONE;
+
+ if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
+ goto fail;
+ } else {
+ struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ pdu->req = rq;
+ io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
+ }
+
+ return BLK_STS_OK;
+}
+
+static void ublk_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+
+ if (ublk_can_use_task_work(ubq))
+ __set_notify_signal(ubq->ubq_daemon);
+}
+
+static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
+ unsigned int hctx_idx)
+{
+ struct ublk_device *ub = driver_data;
+ struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
+
+ hctx->driver_data = ubq;
+ return 0;
+}
+
+static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ init_task_work(&data->work, ublk_rq_task_work_fn);
+ return 0;
+}
+
+static const struct blk_mq_ops ublk_mq_ops = {
+ .queue_rq = ublk_queue_rq,
+ .commit_rqs = ublk_commit_rqs,
+ .init_hctx = ublk_init_hctx,
+ .init_request = ublk_init_rq,
+};
+
+static int ublk_ch_open(struct inode *inode, struct file *filp)
+{
+ struct ublk_device *ub = container_of(inode->i_cdev,
+ struct ublk_device, cdev);
+
+ if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
+ return -EBUSY;
+ filp->private_data = ub;
+ return 0;
+}
+
+static int ublk_ch_release(struct inode *inode, struct file *filp)
+{
+ struct ublk_device *ub = filp->private_data;
+
+ clear_bit(UB_STATE_OPEN, &ub->state);
+ return 0;
+}
+
+/* map pre-allocated per-queue cmd buffer to ublksrv daemon */
+static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct ublk_device *ub = filp->private_data;
+ size_t sz = vma->vm_end - vma->vm_start;
+ unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
+ unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
+ int q_id, ret = 0;
+
+ spin_lock(&ub->mm_lock);
+ if (!ub->mm)
+ ub->mm = current->mm;
+ if (current->mm != ub->mm)
+ ret = -EINVAL;
+ spin_unlock(&ub->mm_lock);
+
+ if (ret)
+ return ret;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
+ if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
+ return -EINVAL;
+
+ q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
+ pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
+ __func__, q_id, current->pid, vma->vm_start,
+ phys_off, (unsigned long)sz);
+
+ if (sz != ublk_queue_cmd_buf_size(ub, q_id))
+ return -EINVAL;
+
+ pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
+ return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
+}
+
+static void ublk_commit_completion(struct ublk_device *ub,
+ struct ublksrv_io_cmd *ub_cmd)
+{
+ u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
+ struct ublk_queue *ubq = ublk_get_queue(ub, qid);
+ struct ublk_io *io = &ubq->ios[tag];
+ struct request *req;
+
+ /* now this cmd slot is owned by nbd driver */
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+ io->res = ub_cmd->result;
+
+ /* find the io request and complete */
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
+
+ if (req && likely(!blk_should_fake_timeout(req->q)))
+ ublk_complete_rq(req);
+}
+
+/*
+ * When ->ubq_daemon is exiting, either new request is ended immediately,
+ * or any queued io command is drained, so it is safe to abort queue
+ * lockless
+ */
+static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ int i;
+
+ if (!ublk_get_device(ub))
+ return;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
+ struct request *rq;
+
+ /*
+ * Either we fail the request or ublk_rq_task_work_fn
+ * will do it
+ */
+ rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
+ if (rq)
+ __ublk_fail_req(io, rq);
+ }
+ }
+ ublk_put_device(ub);
+}
+
+static void ublk_daemon_monitor_work(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, monitor_work.work);
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ if (ubq_daemon_is_dying(ubq)) {
+ schedule_work(&ub->stop_work);
+
+ /* abort queue is for making forward progress */
+ ublk_abort_queue(ub, ubq);
+ }
+ }
+
+ /*
+ * We can't schedule monitor work after ublk_remove() is started.
+ *
+ * No need ub->mutex, monitor work are canceled after state is marked
+ * as DEAD, so DEAD state is observed reliably.
+ */
+ if (ub->dev_info.state != UBLK_S_DEV_DEAD)
+ schedule_delayed_work(&ub->monitor_work,
+ UBLK_DAEMON_MONITOR_PERIOD);
+}
+
+static void ublk_cancel_queue(struct ublk_queue *ubq)
+{
+ int i;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ if (io->flags & UBLK_IO_FLAG_ACTIVE)
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
+ }
+}
+
+/* Cancel all pending commands, must be called after del_gendisk() returns */
+static void ublk_cancel_dev(struct ublk_device *ub)
+{
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_cancel_queue(ublk_get_queue(ub, i));
+}
+
+static void ublk_stop_dev(struct ublk_device *ub)
+{
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto unlock;
+
+ del_gendisk(ub->ub_disk);
+ ub->dev_info.state = UBLK_S_DEV_DEAD;
+ ub->dev_info.ublksrv_pid = -1;
+ ublk_cancel_dev(ub);
+ put_disk(ub->ub_disk);
+ ub->ub_disk = NULL;
+ unlock:
+ mutex_unlock(&ub->mutex);
+ cancel_delayed_work_sync(&ub->monitor_work);
+}
+
+static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+{
+ return ubq->nr_io_ready == ubq->q_depth;
+}
+
+/* device can only be started after all IOs are ready */
+static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ mutex_lock(&ub->mutex);
+ ubq->nr_io_ready++;
+ if (ublk_queue_ready(ubq)) {
+ ubq->ubq_daemon = current;
+ get_task_struct(ubq->ubq_daemon);
+ ub->nr_queues_ready++;
+ }
+ if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
+ complete_all(&ub->completion);
+ mutex_unlock(&ub->mutex);
+}
+
+static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
+ struct ublk_device *ub = cmd->file->private_data;
+ struct ublk_queue *ubq;
+ struct ublk_io *io;
+ u32 cmd_op = cmd->cmd_op;
+ unsigned tag = ub_cmd->tag;
+ int ret = -EINVAL;
+
+ pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
+ __func__, cmd->cmd_op, ub_cmd->q_id, tag,
+ ub_cmd->result);
+
+ if (!(issue_flags & IO_URING_F_SQE128))
+ goto out;
+
+ if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
+ goto out;
+
+ ubq = ublk_get_queue(ub, ub_cmd->q_id);
+ if (!ubq || ub_cmd->q_id != ubq->q_id)
+ goto out;
+
+ if (ubq->ubq_daemon && ubq->ubq_daemon != current)
+ goto out;
+
+ if (tag >= ubq->q_depth)
+ goto out;
+
+ io = &ubq->ios[tag];
+
+ /* there is pending io cmd, something must be wrong */
+ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ switch (cmd_op) {
+ case UBLK_IO_FETCH_REQ:
+ /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
+ if (ublk_queue_ready(ubq)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * The io is being handled by server, so COMMIT_RQ is expected
+ * instead of FETCH_REQ
+ */
+ if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ goto out;
+ /* FETCH_RQ has to provide IO buffer */
+ if (!ub_cmd->addr)
+ goto out;
+ io->cmd = cmd;
+ io->flags |= UBLK_IO_FLAG_ACTIVE;
+ io->addr = ub_cmd->addr;
+
+ ublk_mark_io_ready(ub, ubq);
+ break;
+ case UBLK_IO_COMMIT_AND_FETCH_REQ:
+ /* FETCH_RQ has to provide IO buffer */
+ if (!ub_cmd->addr)
+ goto out;
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ goto out;
+ io->addr = ub_cmd->addr;
+ io->flags |= UBLK_IO_FLAG_ACTIVE;
+ io->cmd = cmd;
+ ublk_commit_completion(ub, ub_cmd);
+ break;
+ default:
+ goto out;
+ }
+ return -EIOCBQUEUED;
+
+ out:
+ io_uring_cmd_done(cmd, ret, 0);
+ pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
+ __func__, cmd_op, tag, ret, io->flags);
+ return -EIOCBQUEUED;
+}
+
+static const struct file_operations ublk_ch_fops = {
+ .owner = THIS_MODULE,
+ .open = ublk_ch_open,
+ .release = ublk_ch_release,
+ .llseek = no_llseek,
+ .uring_cmd = ublk_ch_uring_cmd,
+ .mmap = ublk_ch_mmap,
+};
+
+static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
+{
+ int size = ublk_queue_cmd_buf_size(ub, q_id);
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+
+ if (ubq->ubq_daemon)
+ put_task_struct(ubq->ubq_daemon);
+ if (ubq->io_cmd_buf)
+ free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
+}
+
+static int ublk_init_queue(struct ublk_device *ub, int q_id)
+{
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ void *ptr;
+ int size;
+
+ ubq->flags = ub->dev_info.flags;
+ ubq->q_id = q_id;
+ ubq->q_depth = ub->dev_info.queue_depth;
+ size = ublk_queue_cmd_buf_size(ub, q_id);
+
+ ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
+ if (!ptr)
+ return -ENOMEM;
+
+ ubq->io_cmd_buf = ptr;
+ ubq->dev = ub;
+ return 0;
+}
+
+static void ublk_deinit_queues(struct ublk_device *ub)
+{
+ int nr_queues = ub->dev_info.nr_hw_queues;
+ int i;
+
+ if (!ub->__queues)
+ return;
+
+ for (i = 0; i < nr_queues; i++)
+ ublk_deinit_queue(ub, i);
+ kfree(ub->__queues);
+}
+
+static int ublk_init_queues(struct ublk_device *ub)
+{
+ int nr_queues = ub->dev_info.nr_hw_queues;
+ int depth = ub->dev_info.queue_depth;
+ int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
+ int i, ret = -ENOMEM;
+
+ ub->queue_size = ubq_size;
+ ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
+ if (!ub->__queues)
+ return ret;
+
+ for (i = 0; i < nr_queues; i++) {
+ if (ublk_init_queue(ub, i))
+ goto fail;
+ }
+
+ init_completion(&ub->completion);
+ return 0;
+
+ fail:
+ ublk_deinit_queues(ub);
+ return ret;
+}
+
+static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
+{
+ int i = idx;
+ int err;
+
+ spin_lock(&ublk_idr_lock);
+ /* allocate id, if @id >= 0, we're requesting that specific id */
+ if (i >= 0) {
+ err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
+ if (err == -ENOSPC)
+ err = -EEXIST;
+ } else {
+ err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT);
+ }
+ spin_unlock(&ublk_idr_lock);
+
+ if (err >= 0)
+ ub->ub_number = err;
+
+ return err;
+}
+
+static void ublk_free_dev_number(struct ublk_device *ub)
+{
+ spin_lock(&ublk_idr_lock);
+ idr_remove(&ublk_index_idr, ub->ub_number);
+ wake_up_all(&ublk_idr_wq);
+ spin_unlock(&ublk_idr_lock);
+}
+
+static void ublk_cdev_rel(struct device *dev)
+{
+ struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
+
+ blk_mq_free_tag_set(&ub->tag_set);
+ ublk_deinit_queues(ub);
+ ublk_free_dev_number(ub);
+ mutex_destroy(&ub->mutex);
+ kfree(ub);
+}
+
+static int ublk_add_chdev(struct ublk_device *ub)
+{
+ struct device *dev = &ub->cdev_dev;
+ int minor = ub->ub_number;
+ int ret;
+
+ dev->parent = ublk_misc.this_device;
+ dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
+ dev->class = ublk_chr_class;
+ dev->release = ublk_cdev_rel;
+ device_initialize(dev);
+
+ ret = dev_set_name(dev, "ublkc%d", minor);
+ if (ret)
+ goto fail;
+
+ cdev_init(&ub->cdev, &ublk_ch_fops);
+ ret = cdev_device_add(&ub->cdev, dev);
+ if (ret)
+ goto fail;
+ return 0;
+ fail:
+ put_device(dev);
+ return ret;
+}
+
+static void ublk_stop_work_fn(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, stop_work);
+
+ ublk_stop_dev(ub);
+}
+
+/* align maximum I/O size to PAGE_SIZE */
+static void ublk_align_max_io_size(struct ublk_device *ub)
+{
+ unsigned int max_rq_bytes = ub->dev_info.rq_max_blocks << ub->bs_shift;
+
+ ub->dev_info.rq_max_blocks =
+ round_down(max_rq_bytes, PAGE_SIZE) >> ub->bs_shift;
+}
+
+static int ublk_add_tag_set(struct ublk_device *ub)
+{
+ ub->tag_set.ops = &ublk_mq_ops;
+ ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
+ ub->tag_set.queue_depth = ub->dev_info.queue_depth;
+ ub->tag_set.numa_node = NUMA_NO_NODE;
+ ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
+ ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ub->tag_set.driver_data = ub;
+ return blk_mq_alloc_tag_set(&ub->tag_set);
+}
+
+static void ublk_remove(struct ublk_device *ub)
+{
+ ublk_stop_dev(ub);
+ cancel_work_sync(&ub->stop_work);
+ cdev_device_del(&ub->cdev, &ub->cdev_dev);
+ put_device(&ub->cdev_dev);
+}
+
+static struct ublk_device *ublk_get_device_from_id(int idx)
+{
+ struct ublk_device *ub = NULL;
+
+ if (idx < 0)
+ return NULL;
+
+ spin_lock(&ublk_idr_lock);
+ ub = idr_find(&ublk_index_idr, idx);
+ if (ub)
+ ub = ublk_get_device(ub);
+ spin_unlock(&ublk_idr_lock);
+
+ return ub;
+}
+
+static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ int ublksrv_pid = (int)header->data[0];
+ unsigned long dev_blocks = header->data[1];
+ struct ublk_device *ub;
+ struct gendisk *disk;
+ int ret = -EINVAL;
+
+ if (ublksrv_pid <= 0)
+ return -EINVAL;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ wait_for_completion_interruptible(&ub->completion);
+
+ schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
+
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
+ test_bit(UB_STATE_USED, &ub->state)) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ /* We may get disk size updated */
+ if (dev_blocks)
+ ub->dev_info.dev_blocks = dev_blocks;
+
+ disk = blk_mq_alloc_disk(&ub->tag_set, ub);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
+ goto out_unlock;
+ }
+ sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
+ disk->fops = &ub_fops;
+ disk->private_data = ub;
+
+ blk_queue_logical_block_size(disk->queue, ub->dev_info.block_size);
+ blk_queue_physical_block_size(disk->queue, ub->dev_info.block_size);
+ blk_queue_io_min(disk->queue, ub->dev_info.block_size);
+ blk_queue_max_hw_sectors(disk->queue,
+ ub->dev_info.rq_max_blocks << (ub->bs_shift - 9));
+ disk->queue->limits.discard_granularity = PAGE_SIZE;
+ blk_queue_max_discard_sectors(disk->queue, UINT_MAX >> 9);
+ blk_queue_max_write_zeroes_sectors(disk->queue, UINT_MAX >> 9);
+
+ set_capacity(disk, ub->dev_info.dev_blocks << (ub->bs_shift - 9));
+
+ ub->dev_info.ublksrv_pid = ublksrv_pid;
+ ub->ub_disk = disk;
+ get_device(&ub->cdev_dev);
+ ret = add_disk(disk);
+ if (ret) {
+ put_disk(disk);
+ goto out_unlock;
+ }
+ set_bit(UB_STATE_USED, &ub->state);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+out_unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_device(ub);
+ return ret;
+}
+
+static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_device *ub;
+ cpumask_var_t cpumask;
+ unsigned long queue;
+ unsigned int retlen;
+ unsigned int i;
+ int ret = -EINVAL;
+
+ if (header->len * BITS_PER_BYTE < nr_cpu_ids)
+ return -EINVAL;
+ if (header->len & (sizeof(unsigned long)-1))
+ return -EINVAL;
+ if (!header->addr)
+ return -EINVAL;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ queue = header->data[0];
+ if (queue >= ub->dev_info.nr_hw_queues)
+ goto out_put_device;
+
+ ret = -ENOMEM;
+ if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
+ goto out_put_device;
+
+ for_each_possible_cpu(i) {
+ if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
+ cpumask_set_cpu(i, cpumask);
+ }
+
+ ret = -EFAULT;
+ retlen = min_t(unsigned short, header->len, cpumask_size());
+ if (copy_to_user(argp, cpumask, retlen))
+ goto out_free_cpumask;
+ if (retlen != header->len &&
+ clear_user(argp + retlen, header->len - retlen))
+ goto out_free_cpumask;
+
+ ret = 0;
+out_free_cpumask:
+ free_cpumask_var(cpumask);
+out_put_device:
+ ublk_put_device(ub);
+ return ret;
+}
+
+static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
+{
+ pr_devel("%s: dev id %d flags %llx\n", __func__,
+ info->dev_id, info->flags);
+ pr_devel("\t nr_hw_queues %d queue_depth %d block size %d dev_capacity %lld\n",
+ info->nr_hw_queues, info->queue_depth,
+ info->block_size, info->dev_blocks);
+}
+
+static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublksrv_ctrl_dev_info info;
+ struct ublk_device *ub;
+ int ret = -EINVAL;
+
+ if (header->len < sizeof(info) || !header->addr)
+ return -EINVAL;
+ if (header->queue_id != (u16)-1) {
+ pr_warn("%s: queue_id is wrong %x\n",
+ __func__, header->queue_id);
+ return -EINVAL;
+ }
+ if (copy_from_user(&info, argp, sizeof(info)))
+ return -EFAULT;
+ ublk_dump_dev_info(&info);
+ if (header->dev_id != info.dev_id) {
+ pr_warn("%s: dev id not match %u %u\n",
+ __func__, header->dev_id, info.dev_id);
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_killable(&ublk_ctl_mutex);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
+ ub = kzalloc(sizeof(*ub), GFP_KERNEL);
+ if (!ub)
+ goto out_unlock;
+ mutex_init(&ub->mutex);
+ spin_lock_init(&ub->mm_lock);
+ INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
+ INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
+
+ ret = ublk_alloc_dev_number(ub, header->dev_id);
+ if (ret < 0)
+ goto out_free_ub;
+
+ memcpy(&ub->dev_info, &info, sizeof(info));
+
+ /* update device id */
+ ub->dev_info.dev_id = ub->ub_number;
+
+ /*
+ * 64bit flags will be copied back to userspace as feature
+ * negotiation result, so have to clear flags which driver
+ * doesn't support yet, then userspace can get correct flags
+ * (features) to handle.
+ */
+ ub->dev_info.flags &= UBLK_F_ALL;
+
+ /* We are not ready to support zero copy */
+ ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
+
+ ub->bs_shift = ilog2(ub->dev_info.block_size);
+ ub->dev_info.nr_hw_queues = min_t(unsigned int,
+ ub->dev_info.nr_hw_queues, nr_cpu_ids);
+ ublk_align_max_io_size(ub);
+
+ ret = ublk_init_queues(ub);
+ if (ret)
+ goto out_free_dev_number;
+
+ ret = ublk_add_tag_set(ub);
+ if (ret)
+ goto out_deinit_queues;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
+ goto out_free_tag_set;
+
+ /*
+ * Add the char dev so that ublksrv daemon can be setup.
+ * ublk_add_chdev() will cleanup everything if it fails.
+ */
+ ret = ublk_add_chdev(ub);
+ goto out_unlock;
+
+out_free_tag_set:
+ blk_mq_free_tag_set(&ub->tag_set);
+out_deinit_queues:
+ ublk_deinit_queues(ub);
+out_free_dev_number:
+ ublk_free_dev_number(ub);
+out_free_ub:
+ mutex_destroy(&ub->mutex);
+ kfree(ub);
+out_unlock:
+ mutex_unlock(&ublk_ctl_mutex);
+ return ret;
+}
+
+static inline bool ublk_idr_freed(int id)
+{
+ void *ptr;
+
+ spin_lock(&ublk_idr_lock);
+ ptr = idr_find(&ublk_index_idr, id);
+ spin_unlock(&ublk_idr_lock);
+
+ return ptr == NULL;
+}
+
+static int ublk_ctrl_del_dev(int idx)
+{
+ struct ublk_device *ub;
+ int ret;
+
+ ret = mutex_lock_killable(&ublk_ctl_mutex);
+ if (ret)
+ return ret;
+
+ ub = ublk_get_device_from_id(idx);
+ if (ub) {
+ ublk_remove(ub);
+ ublk_put_device(ub);
+ ret = 0;
+ } else {
+ ret = -ENODEV;
+ }
+
+ /*
+ * Wait until the idr is removed, then it can be reused after
+ * DEL_DEV command is returned.
+ */
+ if (!ret)
+ wait_event(ublk_idr_wq, ublk_idr_freed(idx));
+ mutex_unlock(&ublk_ctl_mutex);
+
+ return ret;
+}
+
+static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+
+ pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
+ __func__, cmd->cmd_op, header->dev_id, header->queue_id,
+ header->data[0], header->addr, header->len);
+}
+
+static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ struct ublk_device *ub;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ ublk_stop_dev(ub);
+ cancel_work_sync(&ub->stop_work);
+
+ ublk_put_device(ub);
+ return 0;
+}
+
+static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_device *ub;
+ int ret = 0;
+
+ if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
+ return -EINVAL;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
+ ret = -EFAULT;
+ ublk_put_device(ub);
+
+ return ret;
+}
+
+static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ int ret = -EINVAL;
+
+ ublk_ctrl_cmd_dump(cmd);
+
+ if (!(issue_flags & IO_URING_F_SQE128))
+ goto out;
+
+ ret = -EPERM;
+ if (!capable(CAP_SYS_ADMIN))
+ goto out;
+
+ ret = -ENODEV;
+ switch (cmd->cmd_op) {
+ case UBLK_CMD_START_DEV:
+ ret = ublk_ctrl_start_dev(cmd);
+ break;
+ case UBLK_CMD_STOP_DEV:
+ ret = ublk_ctrl_stop_dev(cmd);
+ break;
+ case UBLK_CMD_GET_DEV_INFO:
+ ret = ublk_ctrl_get_dev_info(cmd);
+ break;
+ case UBLK_CMD_ADD_DEV:
+ ret = ublk_ctrl_add_dev(cmd);
+ break;
+ case UBLK_CMD_DEL_DEV:
+ ret = ublk_ctrl_del_dev(header->dev_id);
+ break;
+ case UBLK_CMD_GET_QUEUE_AFFINITY:
+ ret = ublk_ctrl_get_queue_affinity(cmd);
+ break;
+ default:
+ break;
+ }
+ out:
+ io_uring_cmd_done(cmd, ret, 0);
+ pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
+ __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
+ return -EIOCBQUEUED;
+}
+
+static const struct file_operations ublk_ctl_fops = {
+ .open = nonseekable_open,
+ .uring_cmd = ublk_ctrl_uring_cmd,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice ublk_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ublk-control",
+ .fops = &ublk_ctl_fops,
+};
+
+static int __init ublk_init(void)
+{
+ int ret;
+
+ init_waitqueue_head(&ublk_idr_wq);
+
+ ret = misc_register(&ublk_misc);
+ if (ret)
+ return ret;
+
+ ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
+ if (ret)
+ goto unregister_mis;
+
+ ublk_chr_class = class_create(THIS_MODULE, "ublk-char");
+ if (IS_ERR(ublk_chr_class)) {
+ ret = PTR_ERR(ublk_chr_class);
+ goto free_chrdev_region;
+ }
+ return 0;
+
+free_chrdev_region:
+ unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
+unregister_mis:
+ misc_deregister(&ublk_misc);
+ return ret;
+}
+
+static void __exit ublk_exit(void)
+{
+ struct ublk_device *ub;
+ int id;
+
+ class_destroy(ublk_chr_class);
+
+ misc_deregister(&ublk_misc);
+
+ idr_for_each_entry(&ublk_index_idr, ub, id)
+ ublk_remove(ub);
+
+ idr_destroy(&ublk_index_idr);
+ unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
+}
+
+module_init(ublk_init);
+module_exit(ublk_exit);
+
+MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6fc7850c2b0a..d7d72e8f6e55 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -1089,7 +1089,7 @@ static int virtblk_probe(struct virtio_device *vdev)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(vblk->disk);
+ put_disk(vblk->disk);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_free_vq:
@@ -1111,7 +1111,6 @@ static void virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
del_gendisk(vblk->disk);
- blk_cleanup_queue(vblk->disk->queue);
blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index a97f2bf5b01b..a5cf7f1e871c 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -442,7 +442,7 @@ static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
* Routines for managing virtual block devices (vbds).
*/
static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
- int operation)
+ enum req_op operation)
{
struct xen_vbd *vbd = &blkif->vbd;
int rc = -EACCES;
@@ -1193,8 +1193,8 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct bio *bio = NULL;
struct bio **biolist = pending_req->biolist;
int i, nbio = 0;
- int operation;
- int operation_flags = 0;
+ enum req_op operation;
+ blk_opf_t operation_flags = 0;
struct blk_plug plug;
bool drain = false;
struct grant_page **pages = pending_req->segments;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3646c0cae672..dc48298225a6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2397,7 +2397,7 @@ static void blkfront_connect(struct blkfront_info *info)
err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
if (err) {
- blk_cleanup_disk(info->gd);
+ put_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
info->rq = NULL;
goto fail;
@@ -2482,7 +2482,7 @@ static int blkfront_remove(struct xenbus_device *xbdev)
blkif_free(info, 0);
if (info->gd) {
xlbd_release_minors(info->gd->first_minor, info->gd->minors);
- blk_cleanup_disk(info->gd);
+ put_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
}
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 7a6ed83481b8..c1e85f356e4d 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -337,7 +337,7 @@ static int z2ram_register_disk(int minor)
z2ram_gendisk[minor] = disk;
err = add_disk(disk);
if (err)
- blk_cleanup_disk(disk);
+ put_disk(disk);
return err;
}
@@ -384,7 +384,6 @@ static void __exit z2_exit(void)
for (i = 0; i < Z2MINOR_COUNT; i++) {
del_gendisk(z2ram_gendisk[i]);
- blk_cleanup_queue(z2ram_gendisk[i]->queue);
put_disk(z2ram_gendisk[i]);
}
blk_mq_free_tag_set(&tag_set);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index b8549c61ff2c..4abeb261b833 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1523,7 +1523,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
* Returns 1 if IO request was successfully submitted.
*/
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, unsigned int op, struct bio *bio)
+ int offset, enum req_op op, struct bio *bio)
{
int ret;
@@ -1631,7 +1631,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
int offset, ret;
u32 index;
@@ -1957,7 +1957,7 @@ static int zram_add(void)
return device_id;
out_cleanup_disk:
- blk_cleanup_disk(zram->disk);
+ put_disk(zram->disk);
out_free_idr:
idr_remove(&zram_index_idr, device_id);
out_free_dev:
@@ -2008,7 +2008,7 @@ static int zram_remove(struct zram *zram)
*/
zram_reset_device(zram);
- blk_cleanup_disk(zram->disk);
+ put_disk(zram->disk);
kfree(zram);
return 0;
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 8e78b37d0f6a..ceded5772aac 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -817,7 +817,7 @@ probe_fail_free_irqs:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
probe_fail_cleanup_disk:
- blk_cleanup_disk(gd.disk);
+ put_disk(gd.disk);
probe_fail_free_tag_set:
blk_mq_free_tag_set(&gd.tag_set);
probe_fail_free_cd_info:
@@ -831,7 +831,6 @@ probe_fail_no_mem:
static int remove_gdrom(struct platform_device *devptr)
{
- blk_cleanup_queue(gd.gdrom_rq);
blk_mq_free_tag_set(&gd.tag_set);
free_irq(HW_EVENT_GDROM_CMD, &gd);
free_irq(HW_EVENT_GDROM_DMA, &gd);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 6058abf42ba7..7720ea270ed8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1282,8 +1282,7 @@ struct srp_terminate_context {
int scsi_result;
};
-static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
- bool reserved)
+static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
{
struct srp_terminate_context *context = context_ptr;
struct srp_target_port *target = context->srp_target;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3563d15dbaf2..ba3909bb6bea 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -414,8 +414,8 @@ static void uuid_io_unlock(struct closure *cl)
up(&c->uuid_write_mutex);
}
-static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
- struct bkey *k, struct closure *parent)
+static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k,
+ struct closure *parent)
{
struct closure *cl = &c->uuid_write;
struct uuid_entry *u;
@@ -429,22 +429,22 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
for (i = 0; i < KEY_PTRS(k); i++) {
struct bio *bio = bch_bbio_alloc(c);
- bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
+ bio->bi_opf = opf | REQ_SYNC | REQ_META;
bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
bio->bi_private = cl;
- bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
bch_bio_map(bio, c->uuids);
bch_submit_bbio(bio, c, k, i);
- if (op != REQ_OP_WRITE)
+ if ((opf & REQ_OP_MASK) != REQ_OP_WRITE)
break;
}
bch_extent_to_text(buf, sizeof(buf), k);
- pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf);
+ pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ?
+ "wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
if (!bch_is_zero(u->uuid, 16))
@@ -463,7 +463,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
- uuid_io(c, REQ_OP_READ, 0, k, cl);
+ uuid_io(c, REQ_OP_READ, k, cl);
if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
struct uuid_entry_v0 *u0 = (void *) c->uuids;
@@ -511,7 +511,7 @@ static int __uuid_write(struct cache_set *c)
size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
SET_KEY_SIZE(&k.key, size);
- uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
+ uuid_io(c, REQ_OP_WRITE, &k.key, &cl);
closure_sync(&cl);
/* Only one bucket used for uuid write */
@@ -587,8 +587,7 @@ static void prio_endio(struct bio *bio)
closure_put(&ca->prio);
}
-static void prio_io(struct cache *ca, uint64_t bucket, int op,
- unsigned long op_flags)
+static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf)
{
struct closure *cl = &ca->prio;
struct bio *bio = bch_bbio_alloc(ca->set);
@@ -601,7 +600,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
- bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
+ bio->bi_opf = opf | REQ_SYNC | REQ_META;
bch_bio_map(bio, ca->disk_buckets);
closure_bio_submit(ca->set, bio, &ca->prio);
@@ -661,7 +660,7 @@ int bch_prio_write(struct cache *ca, bool wait)
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
- prio_io(ca, bucket, REQ_OP_WRITE, 0);
+ prio_io(ca, bucket, REQ_OP_WRITE);
mutex_lock(&ca->set->bucket_lock);
ca->prio_buckets[i] = bucket;
@@ -705,7 +704,7 @@ static int prio_read(struct cache *ca, uint64_t bucket)
ca->prio_last_buckets[bucket_nr] = bucket;
bucket_nr++;
- prio_io(ca, bucket, REQ_OP_READ, 0);
+ prio_io(ca, bucket, REQ_OP_READ);
if (p->csum !=
bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
@@ -884,7 +883,7 @@ static void bcache_device_free(struct bcache_device *d)
if (disk) {
ida_simple_remove(&bcache_device_idx,
first_minor_to_idx(disk->first_minor));
- blk_cleanup_disk(disk);
+ put_disk(disk);
}
bioset_exit(&d->bio_split);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5ffa1dcf84cf..dc01ce33265b 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -577,13 +577,12 @@ static void dmio_complete(unsigned long error, void *context)
b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
}
-static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
+static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
{
int r;
struct dm_io_request io_req = {
- .bi_op = rw,
- .bi_op_flags = 0,
+ .bi_opf = op,
.notify.fn = dmio_complete,
.notify.context = b,
.client = b->c->dm_io,
@@ -616,7 +615,7 @@ static void bio_complete(struct bio *bio)
b->end_io(b, status);
}
-static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
+static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
{
struct bio *bio;
@@ -630,10 +629,10 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
dmio:
- use_dmio(b, rw, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset);
return;
}
- bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw);
+ bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
@@ -669,7 +668,8 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
return sector;
}
-static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
+static void submit_io(struct dm_buffer *b, enum req_op op,
+ void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned n_sectors;
sector_t sector;
@@ -679,7 +679,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
sector = block_to_sector(b->c, b->block);
- if (rw != REQ_OP_WRITE) {
+ if (op != REQ_OP_WRITE) {
n_sectors = b->c->block_size >> SECTOR_SHIFT;
offset = 0;
} else {
@@ -698,9 +698,9 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
}
if (b->data_mode != DATA_MODE_VMALLOC)
- use_bio(b, rw, sector, n_sectors, offset);
+ use_bio(b, op, sector, n_sectors, offset);
else
- use_dmio(b, rw, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset);
}
/*----------------------------------------------------------------
@@ -1341,8 +1341,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
- .bi_op = REQ_OP_WRITE,
- .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
@@ -1365,8 +1364,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
{
struct dm_io_request io_req = {
- .bi_op = REQ_OP_DISCARD,
- .bi_op_flags = REQ_SYNC,
+ .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 0221fa63f888..223e8e1a7a13 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -61,7 +61,8 @@ static inline bool __ebs_check_bs(unsigned int bs)
*
* copy blocks between bufio blocks and bio vector's (partial/overlapping) pages.
*/
-static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bvec_iter *iter)
+static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv,
+ struct bvec_iter *iter)
{
int r = 0;
unsigned char *ba, *pa;
@@ -81,7 +82,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len);
/* Avoid reading for writes in case bio vector's page overwrites block completely. */
- if (rw == READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
+ if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
ba = dm_bufio_read(ec->bufio, block, &b);
else
ba = dm_bufio_new(ec->bufio, block, &b);
@@ -95,7 +96,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
} else {
/* Copy data to/from bio to buffer if read/new was successful above. */
ba += buf_off;
- if (rw == READ) {
+ if (op == REQ_OP_READ) {
memcpy(pa, ba, cur_len);
flush_dcache_page(bv->bv_page);
} else {
@@ -117,14 +118,14 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
}
/* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */
-static int __ebs_rw_bio(struct ebs_c *ec, int rw, struct bio *bio)
+static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio)
{
int r = 0, rr;
struct bio_vec bv;
struct bvec_iter iter;
bio_for_each_bvec(bv, bio, iter) {
- rr = __ebs_rw_bvec(ec, rw, &bv, &iter);
+ rr = __ebs_rw_bvec(ec, op, &bv, &iter);
if (rr)
r = rr;
}
@@ -205,10 +206,10 @@ static void __ebs_process_bios(struct work_struct *ws)
bio_list_for_each(bio, &bios) {
r = -EIO;
if (bio_op(bio) == REQ_OP_READ)
- r = __ebs_rw_bio(ec, READ, bio);
+ r = __ebs_rw_bio(ec, REQ_OP_READ, bio);
else if (bio_op(bio) == REQ_OP_WRITE) {
write = true;
- r = __ebs_rw_bio(ec, WRITE, bio);
+ r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio);
} else if (bio_op(bio) == REQ_OP_DISCARD) {
__ebs_forget_bio(ec, bio);
r = __ebs_discard_bio(ec, bio);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index f2305eb758a2..89fa7a68c6c4 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -32,7 +32,7 @@ struct flakey_c {
unsigned corrupt_bio_byte;
unsigned corrupt_bio_rw;
unsigned corrupt_bio_value;
- unsigned corrupt_bio_flags;
+ blk_opf_t corrupt_bio_flags;
};
enum feature_flag_bits {
@@ -145,7 +145,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
/*
* Only corrupt bios with these flags set.
*/
- r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
+ BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
+ sizeof(unsigned int));
+ r = dm_read_arg(_args + 3, as,
+ (__force unsigned *)&fc->corrupt_bio_flags,
+ &ti->error);
if (r)
return r;
argc--;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 3d5a0ce123c9..c60f9b2ece2d 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -298,7 +298,7 @@ struct dm_integrity_io {
struct work_struct work;
struct dm_integrity_c *ic;
- enum req_opf op;
+ enum req_op op;
bool fua;
struct dm_integrity_range range;
@@ -551,14 +551,14 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
return 0;
}
-static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
+static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
+ const enum req_op op = opf & REQ_OP_MASK;
int r;
- io_req.bi_op = op;
- io_req.bi_op_flags = op_flags;
+ io_req.bi_opf = opf;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = ic->sb;
io_req.notify.fn = NULL;
@@ -1050,8 +1050,9 @@ static void complete_journal_io(unsigned long error, void *context)
complete_journal_op(comp);
}
-static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
- unsigned sector, unsigned n_sectors, struct journal_completion *comp)
+static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
+ unsigned sector, unsigned n_sectors,
+ struct journal_completion *comp)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1067,8 +1068,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
- io_req.bi_op = op;
- io_req.bi_op_flags = op_flags;
+ io_req.bi_opf = opf;
io_req.mem.type = DM_IO_PAGE_LIST;
if (ic->journal_io)
io_req.mem.ptr.pl = &ic->journal_io[pl_index];
@@ -1088,7 +1088,8 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
r = dm_io(&io_req, 1, &io_loc, NULL);
if (unlikely(r)) {
- dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
+ dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
+ "reading journal" : "writing journal", r);
if (comp) {
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
complete_journal_io(-1UL, comp);
@@ -1096,15 +1097,16 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
}
}
-static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
+ unsigned section, unsigned n_sections,
+ struct journal_completion *comp)
{
unsigned sector, n_sectors;
sector = section * ic->journal_section_sectors;
n_sectors = n_sections * ic->journal_section_sectors;
- rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
+ rw_journal_sectors(ic, opf, sector, n_sectors, comp);
}
static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
@@ -1129,7 +1131,7 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
for (i = 0; i < commit_sections; i++)
rw_section_mac(ic, commit_start + i, true);
}
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
commit_sections, &io_comp);
} else {
unsigned to_end;
@@ -1141,7 +1143,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
if (try_wait_for_completion(&crypt_comp_1.comp)) {
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA,
+ commit_start, to_end, &io_comp);
reinit_completion(&crypt_comp_1.comp);
crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
@@ -1152,17 +1155,17 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
wait_for_completion_io(&crypt_comp_1.comp);
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
wait_for_completion_io(&crypt_comp_2.comp);
}
} else {
for (i = 0; i < to_end; i++)
rw_section_mac(ic, commit_start + i, true);
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
for (i = 0; i < commit_sections - to_end; i++)
rw_section_mac(ic, i, true);
}
- rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
+ rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp);
}
wait_for_completion_io(&io_comp.comp);
@@ -1188,8 +1191,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
- io_req.bi_op = REQ_OP_WRITE;
- io_req.bi_op_flags = 0;
+ io_req.bi_opf = REQ_OP_WRITE;
io_req.mem.type = DM_IO_PAGE_LIST;
io_req.mem.ptr.pl = &ic->journal[pl_index];
io_req.mem.offset = pl_offset;
@@ -1516,8 +1518,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
if (!ic->meta_dev)
flush_data = false;
if (flush_data) {
- fr.io_req.bi_op = REQ_OP_WRITE,
- fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
fr.io_req.mem.type = DM_IO_KMEM,
fr.io_req.mem.ptr.addr = NULL,
fr.io_req.notify.fn = flush_notify,
@@ -2626,7 +2627,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
if (dm_integrity_failed(ic))
return;
- r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
+ r = sync_rw_sb(ic, REQ_OP_WRITE);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
}
@@ -2706,8 +2707,7 @@ next_chunk:
if (unlikely(dm_integrity_failed(ic)))
goto err;
- io_req.bi_op = REQ_OP_READ;
- io_req.bi_op_flags = 0;
+ io_req.bi_opf = REQ_OP_READ;
io_req.mem.type = DM_IO_VMA;
io_req.mem.ptr.addr = ic->recalc_buffer;
io_req.notify.fn = NULL;
@@ -2800,7 +2800,7 @@ static void bitmap_block_work(struct work_struct *w)
if (bio_list_empty(&waiting))
return;
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC,
bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
@@ -2846,7 +2846,7 @@ static void bitmap_flush_work(struct work_struct *work)
block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
spin_lock_irq(&ic->endio_wait.lock);
@@ -2918,7 +2918,7 @@ static void replay_journal(struct dm_integrity_c *ic)
if (!ic->just_formatted) {
DEBUG_print("reading journal\n");
- rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
+ rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL);
if (ic->journal_io)
DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
if (ic->journal_io) {
@@ -3113,7 +3113,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
/* set to 0 to test bitmap replay code */
init_journal(ic, 0, ic->journal_sections, 0);
ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
#endif
@@ -3136,23 +3136,23 @@ static void dm_integrity_resume(struct dm_target *ti)
if (ic->provided_data_sectors > old_provided_data_sectors &&
ic->mode == 'B' &&
ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
- rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
+ rw_journal_sectors(ic, REQ_OP_READ, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
}
ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
}
if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
DEBUG_print("resume dirty_bitmap\n");
- rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
+ rw_journal_sectors(ic, REQ_OP_READ, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
if (ic->mode == 'B') {
if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
@@ -3171,7 +3171,7 @@ static void dm_integrity_resume(struct dm_target *ti)
block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
ic->sb->recalc_sector = cpu_to_le64(0);
@@ -3187,7 +3187,7 @@ static void dm_integrity_resume(struct dm_target *ti)
replay_journal(ic);
ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
}
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
} else {
@@ -3199,7 +3199,7 @@ static void dm_integrity_resume(struct dm_target *ti)
if (ic->mode == 'B') {
ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
@@ -3215,7 +3215,7 @@ static void dm_integrity_resume(struct dm_target *ti)
block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
}
- rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
}
}
@@ -4256,7 +4256,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- r = sync_rw_sb(ic, REQ_OP_READ, 0);
+ r = sync_rw_sb(ic, REQ_OP_READ);
if (r) {
ti->error = "Error reading superblock";
goto bad;
@@ -4500,7 +4500,7 @@ try_smaller_buffer:
ti->error = "Error initializing journal";
goto bad;
}
- r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
if (r) {
ti->error = "Error initializing superblock";
goto bad;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index e4b95eaeec8c..783564533459 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -293,7 +293,7 @@ static void km_dp_init(struct dpages *dp, void *data)
/*-----------------------------------------------------------------
* IO routines that accept a list of pages.
*---------------------------------------------------------------*/
-static void do_region(int op, int op_flags, unsigned region,
+static void do_region(const blk_opf_t opf, unsigned region,
struct dm_io_region *where, struct dpages *dp,
struct io *io)
{
@@ -306,6 +306,7 @@ static void do_region(int op, int op_flags, unsigned region,
struct request_queue *q = bdev_get_queue(where->bdev);
sector_t num_sectors;
unsigned int special_cmd_max_sectors;
+ const enum req_op op = opf & REQ_OP_MASK;
/*
* Reject unsupported discard and write same requests.
@@ -339,8 +340,8 @@ static void do_region(int op, int op_flags, unsigned region,
(PAGE_SIZE >> SECTOR_SHIFT)));
}
- bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags,
- GFP_NOIO, &io->client->bios);
+ bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
+ &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_end_io = endio;
store_io_and_region_in_bio(bio, io, region);
@@ -368,7 +369,7 @@ static void do_region(int op, int op_flags, unsigned region,
} while (remaining);
}
-static void dispatch_io(int op, int op_flags, unsigned int num_regions,
+static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
struct dm_io_region *where, struct dpages *dp,
struct io *io, int sync)
{
@@ -378,7 +379,7 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync)
- op_flags |= REQ_SYNC;
+ opf |= REQ_SYNC;
/*
* For multiple regions we need to be careful to rewind
@@ -386,8 +387,8 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions,
*/
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
- if (where[i].count || (op_flags & REQ_PREFLUSH))
- do_region(op, op_flags, i, where + i, dp, io);
+ if (where[i].count || (opf & REQ_PREFLUSH))
+ do_region(opf, i, where + i, dp, io);
}
/*
@@ -411,13 +412,13 @@ static void sync_io_complete(unsigned long error, void *context)
}
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
- struct dm_io_region *where, int op, int op_flags,
- struct dpages *dp, unsigned long *error_bits)
+ struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
+ unsigned long *error_bits)
{
struct io *io;
struct sync_io sio;
- if (num_regions > 1 && !op_is_write(op)) {
+ if (num_regions > 1 && !op_is_write(opf)) {
WARN_ON(1);
return -EIO;
}
@@ -434,7 +435,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
+ dispatch_io(opf, num_regions, where, dp, io, 1);
wait_for_completion_io(&sio.wait);
@@ -445,12 +446,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
}
static int async_io(struct dm_io_client *client, unsigned int num_regions,
- struct dm_io_region *where, int op, int op_flags,
+ struct dm_io_region *where, blk_opf_t opf,
struct dpages *dp, io_notify_fn fn, void *context)
{
struct io *io;
- if (num_regions > 1 && !op_is_write(op)) {
+ if (num_regions > 1 && !op_is_write(opf)) {
WARN_ON(1);
fn(1, context);
return -EIO;
@@ -466,7 +467,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
+ dispatch_io(opf, num_regions, where, dp, io, 0);
return 0;
}
@@ -489,7 +490,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
case DM_IO_VMA:
flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
- if (io_req->bi_op == REQ_OP_READ) {
+ if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
dp->vma_invalidate_address = io_req->mem.ptr.vma;
dp->vma_invalidate_size = size;
}
@@ -519,11 +520,10 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
if (!io_req->notify.fn)
return sync_io(io_req->client, num_regions, where,
- io_req->bi_op, io_req->bi_op_flags, &dp,
- sync_error_bits);
+ io_req->bi_opf, &dp, sync_error_bits);
- return async_io(io_req->client, num_regions, where, io_req->bi_op,
- io_req->bi_op_flags, &dp, io_req->notify.fn,
+ return async_io(io_req->client, num_regions, where,
+ io_req->bi_opf, &dp, io_req->notify.fn,
io_req->notify.context);
}
EXPORT_SYMBOL(dm_io);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 37b03ab7e5c9..9c8f3544e99d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -350,9 +350,9 @@ struct kcopyd_job {
unsigned long write_err;
/*
- * Either READ or WRITE
+ * REQ_OP_READ, REQ_OP_WRITE or REQ_OP_WRITE_ZEROES.
*/
- int rw;
+ enum req_op op;
struct dm_io_region source;
/*
@@ -418,7 +418,8 @@ static struct kcopyd_job *pop_io_job(struct list_head *jobs,
* constraint and sequential writes that are at the right position.
*/
list_for_each_entry(job, jobs, list) {
- if (job->rw == READ || !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
+ if (job->op == REQ_OP_READ ||
+ !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
list_del(&job->list);
return job;
}
@@ -518,7 +519,7 @@ static void complete_io(unsigned long error, void *context)
io_job_finish(kc->throttle);
if (error) {
- if (op_is_write(job->rw))
+ if (op_is_write(job->op))
job->write_err |= error;
else
job->read_err = 1;
@@ -530,11 +531,11 @@ static void complete_io(unsigned long error, void *context)
}
}
- if (op_is_write(job->rw))
+ if (op_is_write(job->op))
push(&kc->complete_jobs, job);
else {
- job->rw = WRITE;
+ job->op = REQ_OP_WRITE;
push(&kc->io_jobs, job);
}
@@ -549,8 +550,7 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_op = job->rw,
- .bi_op_flags = 0,
+ .bi_opf = job->op,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = 0,
@@ -571,7 +571,7 @@ static int run_io_job(struct kcopyd_job *job)
io_job_start(job->kc->throttle);
- if (job->rw == READ)
+ if (job->op == REQ_OP_READ)
r = dm_io(&io_req, 1, &job->source, NULL);
else
r = dm_io(&io_req, job->num_dests, job->dests, NULL);
@@ -614,7 +614,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
if (r < 0) {
/* error this rogue job */
- if (op_is_write(job->rw))
+ if (op_is_write(job->op))
job->write_err = (unsigned long) -1L;
else
job->read_err = 1;
@@ -817,7 +817,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
if (from) {
job->source = *from;
job->pages = NULL;
- job->rw = READ;
+ job->op = REQ_OP_READ;
} else {
memset(&job->source, 0, sizeof job->source);
job->source.count = job->dests[0].count;
@@ -826,10 +826,10 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
/*
* Use WRITE ZEROES to optimize zeroing if all dests support it.
*/
- job->rw = REQ_OP_WRITE_ZEROES;
+ job->op = REQ_OP_WRITE_ZEROES;
for (i = 0; i < job->num_dests; i++)
if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
- job->rw = WRITE;
+ job->op = REQ_OP_WRITE;
break;
}
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 0c6620e7b7bf..cf10fa667797 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -291,10 +291,9 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
core->nr_regions = le64_to_cpu(disk->nr_regions);
}
-static int rw_header(struct log_c *lc, int op)
+static int rw_header(struct log_c *lc, enum req_op op)
{
- lc->io_req.bi_op = op;
- lc->io_req.bi_op_flags = 0;
+ lc->io_req.bi_opf = op;
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
}
@@ -307,8 +306,7 @@ static int flush_header(struct log_c *lc)
.count = 0,
};
- lc->io_req.bi_op = REQ_OP_WRITE;
- lc->io_req.bi_op_flags = REQ_PREFLUSH;
+ lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL);
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 80c9f7134e9b..66486b14ec33 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2038,7 +2038,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
rdev->sb_loaded = 0;
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) {
DMERR("Failed to read superblock of device at position %d",
rdev->raid_disk);
md_error(rdev->mddev, rdev);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 8811d484fdd1..06a38dc32025 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -260,8 +260,7 @@ static int mirror_flush(struct dm_target *ti)
struct dm_io_region io[MAX_NR_MIRRORS];
struct mirror *m;
struct dm_io_request io_req = {
- .bi_op = REQ_OP_WRITE,
- .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = ms->io_client,
@@ -535,8 +534,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
{
struct dm_io_region io;
struct dm_io_request io_req = {
- .bi_op = REQ_OP_READ,
- .bi_op_flags = 0,
+ .bi_opf = REQ_OP_READ,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = read_callback,
@@ -648,9 +646,9 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
unsigned int i;
struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
struct mirror *m;
+ blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
struct dm_io_request io_req = {
- .bi_op = REQ_OP_WRITE,
- .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
+ .bi_opf = REQ_OP_WRITE | op_flags,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = write_callback,
@@ -659,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
};
if (bio_op(bio) == REQ_OP_DISCARD) {
- io_req.bi_op = REQ_OP_DISCARD;
+ io_req.bi_opf = REQ_OP_DISCARD | op_flags;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = NULL;
}
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3bb5cff5d6fc..f46f930eedf9 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -226,8 +226,8 @@ static void do_metadata(struct work_struct *work)
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
-static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
- int op_flags, int metadata)
+static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
+ int metadata)
{
struct dm_io_region where = {
.bdev = dm_snap_cow(ps->store->snap)->bdev,
@@ -235,8 +235,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
.count = ps->store->chunk_size,
};
struct dm_io_request io_req = {
- .bi_op = op,
- .bi_op_flags = op_flags,
+ .bi_opf = opf,
.mem.type = DM_IO_VMA,
.mem.ptr.vma = area,
.client = ps->io_client,
@@ -282,11 +281,11 @@ static void skip_metadata(struct pstore *ps)
* Read or write a metadata area. Remembering to skip the first
* chunk which holds the header.
*/
-static int area_io(struct pstore *ps, int op, int op_flags)
+static int area_io(struct pstore *ps, blk_opf_t opf)
{
chunk_t chunk = area_location(ps, ps->current_area);
- return chunk_io(ps, ps->area, chunk, op, op_flags, 0);
+ return chunk_io(ps, ps->area, chunk, opf, 0);
}
static void zero_memory_area(struct pstore *ps)
@@ -297,7 +296,7 @@ static void zero_memory_area(struct pstore *ps)
static int zero_disk_area(struct pstore *ps, chunk_t area)
{
return chunk_io(ps, ps->zero_area, area_location(ps, area),
- REQ_OP_WRITE, 0, 0);
+ REQ_OP_WRITE, 0);
}
static int read_header(struct pstore *ps, int *new_snapshot)
@@ -329,7 +328,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
if (r)
return r;
- r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1);
+ r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1);
if (r)
goto bad;
@@ -390,7 +389,7 @@ static int write_header(struct pstore *ps)
dh->version = cpu_to_le32(ps->version);
dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
- return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1);
+ return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1);
}
/*
@@ -734,8 +733,8 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, REQ_OP_WRITE,
- REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
+ if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA |
+ REQ_SYNC))
ps->valid = 0;
/*
@@ -775,7 +774,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
return 0;
ps->current_area--;
- r = area_io(ps, REQ_OP_READ, 0);
+ r = area_io(ps, REQ_OP_READ);
if (r < 0)
return r;
ps->current_committed = ps->exceptions_per_area;
@@ -812,7 +811,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
+ r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index bd539afbfe88..df904b7e95ce 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1620,13 +1620,11 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
unsigned int *zone_sectors = data;
- if (!blk_queue_is_zoned(q))
+ if (!bdev_is_zoned(dev->bdev))
return 0;
-
- return blk_queue_zone_sectors(q) != *zone_sectors;
+ return bdev_zone_sectors(dev->bdev) != *zone_sectors;
}
/*
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index d74c5a7a0ab4..2b994b3e22a7 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -523,8 +523,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
region.sector += wc->start_sector;
atomic_inc(&endio.count);
- req.bi_op = REQ_OP_WRITE;
- req.bi_op_flags = REQ_SYNC;
+ req.bi_opf = REQ_OP_WRITE | REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
req.client = wc->dm_io;
@@ -562,8 +561,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
region.sector += wc->start_sector;
- req.bi_op = REQ_OP_WRITE;
- req.bi_op_flags = REQ_SYNC | REQ_FUA;
+ req.bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_FUA;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
@@ -592,8 +590,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
region.bdev = dev->bdev;
region.sector = 0;
region.count = 0;
- req.bi_op = REQ_OP_WRITE;
- req.bi_op_flags = REQ_PREFLUSH;
+ req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
req.mem.type = DM_IO_KMEM;
req.mem.ptr.addr = NULL;
req.client = wc->dm_io;
@@ -981,8 +978,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
region.bdev = wc->ssd_dev->bdev;
region.sector = wc->start_sector;
region.count = n_sectors;
- req.bi_op = REQ_OP_READ;
- req.bi_op_flags = REQ_SYNC;
+ req.bi_opf = REQ_OP_READ | REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 3e7b1fe1580b..4d10f302c62e 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -139,13 +139,11 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
void dm_cleanup_zoned_dev(struct mapped_device *md)
{
- struct request_queue *q = md->queue;
-
- if (q) {
- kfree(q->conv_zones_bitmap);
- q->conv_zones_bitmap = NULL;
- kfree(q->seq_zones_wlock);
- q->seq_zones_wlock = NULL;
+ if (md->disk) {
+ kfree(md->disk->conv_zones_bitmap);
+ md->disk->conv_zones_bitmap = NULL;
+ kfree(md->disk->seq_zones_wlock);
+ md->disk->seq_zones_wlock = NULL;
}
kvfree(md->zwp_offset);
@@ -179,31 +177,31 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
void *data)
{
struct mapped_device *md = data;
- struct request_queue *q = md->queue;
+ struct gendisk *disk = md->disk;
switch (zone->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
- if (!q->conv_zones_bitmap) {
- q->conv_zones_bitmap =
- kcalloc(BITS_TO_LONGS(q->nr_zones),
+ if (!disk->conv_zones_bitmap) {
+ disk->conv_zones_bitmap =
+ kcalloc(BITS_TO_LONGS(disk->nr_zones),
sizeof(unsigned long), GFP_NOIO);
- if (!q->conv_zones_bitmap)
+ if (!disk->conv_zones_bitmap)
return -ENOMEM;
}
- set_bit(idx, q->conv_zones_bitmap);
+ set_bit(idx, disk->conv_zones_bitmap);
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
- if (!q->seq_zones_wlock) {
- q->seq_zones_wlock =
- kcalloc(BITS_TO_LONGS(q->nr_zones),
+ if (!disk->seq_zones_wlock) {
+ disk->seq_zones_wlock =
+ kcalloc(BITS_TO_LONGS(disk->nr_zones),
sizeof(unsigned long), GFP_NOIO);
- if (!q->seq_zones_wlock)
+ if (!disk->seq_zones_wlock)
return -ENOMEM;
}
if (!md->zwp_offset) {
md->zwp_offset =
- kvcalloc(q->nr_zones, sizeof(unsigned int),
+ kvcalloc(disk->nr_zones, sizeof(unsigned int),
GFP_KERNEL);
if (!md->zwp_offset)
return -ENOMEM;
@@ -228,7 +226,7 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
*/
static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
{
- struct request_queue *q = md->queue;
+ struct gendisk *disk = md->disk;
unsigned int noio_flag;
int ret;
@@ -236,7 +234,7 @@ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
* Check if something changed. If yes, cleanup the current resources
* and reallocate everything.
*/
- if (!q->nr_zones || q->nr_zones != md->nr_zones)
+ if (!disk->nr_zones || disk->nr_zones != md->nr_zones)
dm_cleanup_zoned_dev(md);
if (md->nr_zones)
return 0;
@@ -246,17 +244,17 @@ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
* operations in this context are done as if GFP_NOIO was specified.
*/
noio_flag = memalloc_noio_save();
- ret = dm_blk_do_report_zones(md, t, 0, q->nr_zones,
+ ret = dm_blk_do_report_zones(md, t, 0, disk->nr_zones,
dm_zone_revalidate_cb, md);
memalloc_noio_restore(noio_flag);
if (ret < 0)
goto err;
- if (ret != q->nr_zones) {
+ if (ret != disk->nr_zones) {
ret = -EIO;
goto err;
}
- md->nr_zones = q->nr_zones;
+ md->nr_zones = disk->nr_zones;
return 0;
@@ -270,7 +268,7 @@ static int device_not_zone_append_capable(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
- return !blk_queue_is_zoned(bdev_get_queue(dev->bdev));
+ return !bdev_is_zoned(dev->bdev);
}
static bool dm_table_supports_zone_append(struct dm_table *t)
@@ -301,7 +299,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
* correct value to be exposed in sysfs queue/nr_zones.
*/
WARN_ON_ONCE(queue_is_mq(q));
- q->nr_zones = blkdev_nr_zones(md->disk);
+ md->disk->nr_zones = bdev_nr_zones(md->disk->part0);
/* Check if zone append is natively supported */
if (dm_table_supports_zone_append(t)) {
@@ -334,7 +332,7 @@ static int dm_update_zone_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
unsigned int *wp_ofst)
{
- sector_t sector = zno * blk_queue_zone_sectors(md->queue);
+ sector_t sector = zno * bdev_zone_sectors(md->disk->part0);
unsigned int noio_flag;
struct dm_table *t;
int srcu_idx, ret;
@@ -361,7 +359,7 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
}
struct orig_bio_details {
- unsigned int op;
+ enum req_op op;
unsigned int nr_sectors;
};
@@ -373,7 +371,7 @@ struct orig_bio_details {
static bool dm_zone_map_bio_begin(struct mapped_device *md,
unsigned int zno, struct bio *clone)
{
- sector_t zsectors = blk_queue_zone_sectors(md->queue);
+ sector_t zsectors = bdev_zone_sectors(md->disk->part0);
unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
/*
@@ -443,7 +441,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int z
return BLK_STS_OK;
case REQ_OP_ZONE_FINISH:
WRITE_ONCE(md->zwp_offset[zno],
- blk_queue_zone_sectors(md->queue));
+ bdev_zone_sectors(md->disk->part0));
return BLK_STS_OK;
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE:
@@ -466,26 +464,26 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int z
}
}
-static inline void dm_zone_lock(struct request_queue *q,
- unsigned int zno, struct bio *clone)
+static inline void dm_zone_lock(struct gendisk *disk, unsigned int zno,
+ struct bio *clone)
{
if (WARN_ON_ONCE(bio_flagged(clone, BIO_ZONE_WRITE_LOCKED)))
return;
- wait_on_bit_lock_io(q->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE);
+ wait_on_bit_lock_io(disk->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE);
bio_set_flag(clone, BIO_ZONE_WRITE_LOCKED);
}
-static inline void dm_zone_unlock(struct request_queue *q,
- unsigned int zno, struct bio *clone)
+static inline void dm_zone_unlock(struct gendisk *disk, unsigned int zno,
+ struct bio *clone)
{
if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
return;
- WARN_ON_ONCE(!test_bit(zno, q->seq_zones_wlock));
- clear_bit_unlock(zno, q->seq_zones_wlock);
+ WARN_ON_ONCE(!test_bit(zno, disk->seq_zones_wlock));
+ clear_bit_unlock(zno, disk->seq_zones_wlock);
smp_mb__after_atomic();
- wake_up_bit(q->seq_zones_wlock, zno);
+ wake_up_bit(disk->seq_zones_wlock, zno);
bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
}
@@ -520,7 +518,6 @@ int dm_zone_map_bio(struct dm_target_io *tio)
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
struct mapped_device *md = io->md;
- struct request_queue *q = md->queue;
struct bio *clone = &tio->clone;
struct orig_bio_details orig_bio_details;
unsigned int zno;
@@ -536,7 +533,7 @@ int dm_zone_map_bio(struct dm_target_io *tio)
/* Lock the target zone */
zno = bio_zone_no(clone);
- dm_zone_lock(q, zno, clone);
+ dm_zone_lock(md->disk, zno, clone);
orig_bio_details.nr_sectors = bio_sectors(clone);
orig_bio_details.op = bio_op(clone);
@@ -546,7 +543,7 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* both valid, and if the bio is a zone append, remap it to a write.
*/
if (!dm_zone_map_bio_begin(md, zno, clone)) {
- dm_zone_unlock(q, zno, clone);
+ dm_zone_unlock(md->disk, zno, clone);
return DM_MAPIO_KILL;
}
@@ -570,12 +567,12 @@ int dm_zone_map_bio(struct dm_target_io *tio)
sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
*tio->len_ptr);
if (sts != BLK_STS_OK)
- dm_zone_unlock(q, zno, clone);
+ dm_zone_unlock(md->disk, zno, clone);
break;
case DM_MAPIO_REQUEUE:
case DM_MAPIO_KILL:
default:
- dm_zone_unlock(q, zno, clone);
+ dm_zone_unlock(md->disk, zno, clone);
sts = BLK_STS_IOERR;
break;
}
@@ -592,7 +589,7 @@ int dm_zone_map_bio(struct dm_target_io *tio)
void dm_zone_endio(struct dm_io *io, struct bio *clone)
{
struct mapped_device *md = io->md;
- struct request_queue *q = md->queue;
+ struct gendisk *disk = md->disk;
struct bio *orig_bio = io->orig_bio;
unsigned int zwp_offset;
unsigned int zno;
@@ -608,7 +605,8 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
*/
if (clone->bi_status == BLK_STS_OK &&
bio_op(clone) == REQ_OP_ZONE_APPEND) {
- sector_t mask = (sector_t)blk_queue_zone_sectors(q) - 1;
+ sector_t mask =
+ (sector_t)bdev_zone_sectors(disk->part0) - 1;
orig_bio->bi_iter.bi_sector +=
clone->bi_iter.bi_sector & mask;
@@ -649,5 +647,5 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
zwp_offset - bio_sectors(orig_bio);
}
- dm_zone_unlock(q, zno, clone);
+ dm_zone_unlock(disk, zno, clone);
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index d1ea66114d14..34db364c23a8 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -737,7 +737,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
/*
* Read/write a metadata block.
*/
-static int dmz_rdwr_block(struct dmz_dev *dev, int op,
+static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op,
sector_t block, struct page *page)
{
struct bio *bio;
@@ -2045,7 +2045,8 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
* allocated and used to map the chunk.
* The zone returned will be set to the active state.
*/
-struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op)
+struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
+ unsigned int chunk, enum req_op op)
{
struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 0ec5d8b9b1a4..95b132b52f33 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -764,8 +764,7 @@ static void dmz_put_zoned_device(struct dm_target *ti)
static int dmz_fixup_devices(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
- struct dmz_dev *reg_dev, *zoned_dev;
- struct request_queue *q;
+ struct dmz_dev *reg_dev = NULL;
sector_t zone_nr_sectors = 0;
int i;
@@ -780,32 +779,32 @@ static int dmz_fixup_devices(struct dm_target *ti)
return -EINVAL;
}
for (i = 1; i < dmz->nr_ddevs; i++) {
- zoned_dev = &dmz->dev[i];
+ struct dmz_dev *zoned_dev = &dmz->dev[i];
+ struct block_device *bdev = zoned_dev->bdev;
+
if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
ti->error = "Secondary disk is not a zoned device";
return -EINVAL;
}
- q = bdev_get_queue(zoned_dev->bdev);
if (zone_nr_sectors &&
- zone_nr_sectors != blk_queue_zone_sectors(q)) {
+ zone_nr_sectors != bdev_zone_sectors(bdev)) {
ti->error = "Zone nr sectors mismatch";
return -EINVAL;
}
- zone_nr_sectors = blk_queue_zone_sectors(q);
+ zone_nr_sectors = bdev_zone_sectors(bdev);
zoned_dev->zone_nr_sectors = zone_nr_sectors;
- zoned_dev->nr_zones =
- blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ zoned_dev->nr_zones = bdev_nr_zones(bdev);
}
} else {
- reg_dev = NULL;
- zoned_dev = &dmz->dev[0];
+ struct dmz_dev *zoned_dev = &dmz->dev[0];
+ struct block_device *bdev = zoned_dev->bdev;
+
if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
ti->error = "Disk is not a zoned device";
return -EINVAL;
}
- q = bdev_get_queue(zoned_dev->bdev);
- zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
- zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ zoned_dev->zone_nr_sectors = bdev_zone_sectors(bdev);
+ zoned_dev->nr_zones = bdev_nr_zones(bdev);
}
if (reg_dev) {
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index a02744a0846c..265494d3f711 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -248,7 +248,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
unsigned int dev_idx, bool idle);
struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
- unsigned int chunk, int op);
+ unsigned int chunk, enum req_op op);
void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone);
struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
struct dm_zone *dzone);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2b75f1ef7386..54c2a23f4e55 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -716,7 +716,7 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
}
static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
- int *srcu_idx, unsigned bio_opf)
+ int *srcu_idx, blk_opf_t bio_opf)
{
if (bio_opf & REQ_NOWAIT)
return dm_get_live_table_fast(md);
@@ -725,7 +725,7 @@ static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
}
static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
- unsigned bio_opf)
+ blk_opf_t bio_opf)
{
if (bio_opf & REQ_NOWAIT)
dm_put_live_table_fast(md);
@@ -1033,7 +1033,7 @@ static void clone_endio(struct bio *bio)
}
if (static_branch_unlikely(&zoned_enabled) &&
- unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
+ unlikely(bdev_is_zoned(bio->bi_bdev)))
dm_zone_endio(io, bio);
if (endio) {
@@ -1086,23 +1086,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
{
sector_t target_offset = dm_target_offset(ti, sector);
sector_t len = max_io_len_target_boundary(ti, target_offset);
- sector_t max_len;
/*
* Does the target need to split IO even further?
* - varied (per target) IO splitting is a tenet of DM; this
* explains why stacked chunk_sectors based splitting via
- * blk_max_size_offset() isn't possible here. So pass in
- * ti->max_io_len to override stacked chunk_sectors.
+ * blk_queue_split() isn't possible here.
*/
- if (ti->max_io_len) {
- max_len = blk_max_size_offset(ti->table->md->queue,
- target_offset, ti->max_io_len);
- if (len > max_len)
- len = max_len;
- }
-
- return len;
+ if (!ti->max_io_len)
+ return len;
+ return min_t(sector_t, len,
+ min(queue_max_sectors(ti->table->md->queue),
+ blk_chunk_sectors_left(target_offset, ti->max_io_len)));
}
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
@@ -1516,7 +1511,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
static bool is_abnormal_io(struct bio *bio)
{
- unsigned int op = bio_op(bio);
+ enum req_op op = bio_op(bio);
if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
switch (op) {
@@ -1547,6 +1542,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios;
break;
+ default:
+ break;
}
/*
@@ -1628,7 +1625,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
* Only support bio polling for normal IO, and the target io is
* exactly inside the dm_io instance (verified in dm_poll_dm_io)
*/
- ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED;
+ ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
setup_split_accounting(ci, len);
@@ -1725,7 +1722,7 @@ static void dm_submit_bio(struct bio *bio)
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
int srcu_idx;
struct dm_table *map;
- unsigned bio_opf = bio->bi_opf;
+ blk_opf_t bio_opf = bio->bi_opf;
map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
@@ -1899,7 +1896,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
del_gendisk(md->disk);
}
dm_queue_destroy_crypto_profile(md->queue);
- blk_cleanup_disk(md->disk);
+ put_disk(md->disk);
}
if (md->pending_io) {
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index d87f674ab762..bf6dffadbe6f 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -165,7 +165,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
if (sync_page_io(rdev, target,
roundup(size, bdev_logical_block_size(rdev->bdev)),
- page, REQ_OP_READ, 0, true)) {
+ page, REQ_OP_READ, true)) {
page->index = index;
return 0;
}
@@ -302,7 +302,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
+ submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
bh = bh->b_this_page;
}
@@ -394,7 +394,7 @@ static int read_page(struct file *file, unsigned long index,
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(REQ_OP_READ, 0, bh);
+ submit_bh(REQ_OP_READ, bh);
}
blk_cur++;
bh = bh->b_this_page;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c7ecb0bffda0..4df78e30b76a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -993,15 +993,15 @@ int md_super_wait(struct mddev *mddev)
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
- struct page *page, int op, int op_flags, bool metadata_op)
+ struct page *page, blk_opf_t opf, bool metadata_op)
{
struct bio bio;
struct bio_vec bvec;
if (metadata_op && rdev->meta_bdev)
- bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags);
+ bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
else
- bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags);
+ bio_init(&bio, rdev->bdev, &bvec, 1, opf);
if (metadata_op)
bio.bi_iter.bi_sector = sector + rdev->sb_start;
@@ -1024,7 +1024,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
if (rdev->sb_loaded)
return 0;
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
@@ -1722,7 +1722,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
bb_sector = (long long)offset;
if (!sync_page_io(rdev, bb_sector, sectors << 9,
- rdev->bb_page, REQ_OP_READ, 0, true))
+ rdev->bb_page, REQ_OP_READ, true))
return -EIO;
bbp = (__le64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
@@ -2438,7 +2438,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
mdname(mddev), mddev->max_disks);
return -EBUSY;
}
- bdevname(rdev->bdev,b);
+ snprintf(b, sizeof(b), "%pg", rdev->bdev);
strreplace(b, '/', '!');
rdev->mddev = mddev;
@@ -5579,7 +5579,7 @@ static void md_free(struct kobject *ko)
if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
- blk_cleanup_disk(mddev->gendisk);
+ put_disk(mddev->gendisk);
}
percpu_ref_exit(&mddev->writes_pending);
@@ -5718,7 +5718,7 @@ static int md_alloc(dev_t dev, char *name)
out_del_gendisk:
del_gendisk(disk);
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_unlock_disks_mutex:
mutex_unlock(&disks_mutex);
mddev_put(mddev);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index cf2cbb17acbd..b4f84b27bdef 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -738,8 +738,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
- struct page *page, int op, int op_flags,
- bool metadata_op);
+ struct page *page, blk_opf_t opf, bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(void);
extern void md_allow_write(struct mddev *mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 258d4eb2d63c..05d8438cfec8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1220,8 +1220,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct raid1_info *mirror;
struct bio *read_bio;
struct bitmap *bitmap = mddev->bitmap;
- const int op = bio_op(bio);
- const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const enum req_op op = bio_op(bio);
+ const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
int rdisk;
bool r1bio_existed = !!r1_bio;
@@ -1240,7 +1240,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
rcu_read_lock();
rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
if (rdev)
- bdevname(rdev->bdev, b);
+ snprintf(b, sizeof(b), "%pg", rdev->bdev);
else
strcpy(b, "???");
rcu_read_unlock();
@@ -1988,9 +1988,9 @@ static void end_sync_write(struct bio *bio)
}
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
- int sectors, struct page *page, int rw)
+ int sectors, struct page *page, int rw)
{
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
if (rw == WRITE) {
@@ -2057,7 +2057,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev, sect, s<<9,
pages[idx],
- REQ_OP_READ, 0, false)) {
+ REQ_OP_READ, false)) {
success = 1;
break;
}
@@ -2305,7 +2305,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
if (sync_page_io(rdev, sect, s<<9,
- conf->tmppage, REQ_OP_READ, 0, false))
+ conf->tmppage, REQ_OP_READ, false))
success = 1;
rdev_dec_pending(rdev, mddev);
if (success)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d589f823feb1..26545950ca42 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1136,8 +1136,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
{
struct r10conf *conf = mddev->private;
struct bio *read_bio;
- const int op = bio_op(bio);
- const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const enum req_op op = bio_op(bio);
+ const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
struct md_rdev *rdev;
char b[BDEVNAME_SIZE];
@@ -1164,7 +1164,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
disk = r10_bio->devs[slot].devnum;
err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (err_rdev)
- bdevname(err_rdev->bdev, b);
+ snprintf(b, sizeof(b), "%pg", err_rdev->bdev);
else {
strcpy(b, "???");
/* This never gets dereferenced */
@@ -1230,9 +1230,9 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
struct bio *bio, bool replacement,
int n_copy)
{
- const int op = bio_op(bio);
- const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
- const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
+ const enum req_op op = bio_op(bio);
+ const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+ const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
unsigned long flags;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@@ -2512,7 +2512,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr,
s << 9,
pages[idx],
- REQ_OP_READ, 0, false);
+ REQ_OP_READ, false);
if (ok) {
rdev = conf->mirrors[dw].rdev;
addr = r10_bio->devs[1].addr + sect;
@@ -2520,7 +2520,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr,
s << 9,
pages[idx],
- REQ_OP_WRITE, 0, false);
+ REQ_OP_WRITE, false);
if (!ok) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement,
@@ -2644,7 +2644,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
&& (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
if (rw == WRITE) {
@@ -2726,7 +2726,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
sect,
s<<9,
conf->tmppage,
- REQ_OP_READ, 0, false);
+ REQ_OP_READ, false);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
if (success)
@@ -5107,7 +5107,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
addr,
s << 9,
pages[idx],
- REQ_OP_READ, 0, false);
+ REQ_OP_READ, false);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
if (success)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 83c184eddbda..6f2dd73128b0 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1788,7 +1788,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
mb = page_address(page);
mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
mb, PAGE_SIZE));
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
REQ_SYNC | REQ_FUA, false)) {
__free_page(page);
return -EIO;
@@ -1898,7 +1898,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
sync_page_io(rdev, sh->sector, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+ sh->dev[disk_index].page, REQ_OP_WRITE,
false);
rdev_dec_pending(rdev, rdev->mddev);
rcu_read_lock();
@@ -1908,7 +1908,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
atomic_inc(&rrdev->nr_pending);
rcu_read_unlock();
sync_page_io(rrdev, sh->sector, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+ sh->dev[disk_index].page, REQ_OP_WRITE,
false);
rdev_dec_pending(rrdev, rrdev->mddev);
rcu_read_lock();
@@ -2394,7 +2394,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
PAGE_SIZE));
kunmap_atomic(addr);
sync_page_io(log->rdev, write_pos, PAGE_SIZE,
- dev->page, REQ_OP_WRITE, 0, false);
+ dev->page, REQ_OP_WRITE, false);
write_pos = r5l_ring_add(log, write_pos,
BLOCK_SECTORS);
offset += sizeof(__le32) +
@@ -2406,7 +2406,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
mb, PAGE_SIZE));
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
- REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
+ REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
sh->log_start = ctx->pos;
list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
atomic_inc(&log->stripe_in_journal_count);
@@ -2971,7 +2971,7 @@ static int r5l_load_log(struct r5l_log *log)
if (!page)
return -ENOMEM;
- if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
+ if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
ret = -EIO;
goto ioerr;
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 0a2e4806b1ec..98988cb26295 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -897,7 +897,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
__func__, indent, "", rdev->bdev,
(unsigned long long)sector);
if (!sync_page_io(rdev, sector, block_size, page2,
- REQ_OP_READ, 0, false)) {
+ REQ_OP_READ, false)) {
md_error(mddev, rdev);
pr_debug("%s:%*s read failed!\n", __func__,
indent, "");
@@ -919,7 +919,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)(ppl_sector + i));
if (!sync_page_io(log->rdev,
ppl_sector - log->rdev->data_offset + i,
- block_size, page2, REQ_OP_READ, 0,
+ block_size, page2, REQ_OP_READ,
false)) {
pr_debug("%s:%*s read failed!\n", __func__,
indent, "");
@@ -946,7 +946,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)parity_sector,
parity_rdev->bdev);
if (!sync_page_io(parity_rdev, parity_sector, block_size,
- page1, REQ_OP_WRITE, 0, false)) {
+ page1, REQ_OP_WRITE, false)) {
pr_debug("%s:%*s parity write error!\n", __func__,
indent, "");
md_error(mddev, parity_rdev);
@@ -998,7 +998,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
if (!sync_page_io(rdev, sector - rdev->data_offset,
- s, page, REQ_OP_READ, 0, false)) {
+ s, page, REQ_OP_READ, false)) {
md_error(mddev, rdev);
ret = -EIO;
goto out;
@@ -1062,7 +1062,7 @@ static int ppl_write_empty_header(struct ppl_log *log)
if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
- REQ_FUA, 0, false)) {
+ REQ_FUA, false)) {
md_error(rdev->mddev, rdev);
ret = -EIO;
}
@@ -1100,7 +1100,7 @@ static int ppl_load_distributed(struct ppl_log *log)
if (!sync_page_io(rdev,
rdev->ppl.sector - rdev->data_offset +
pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
- 0, false)) {
+ false)) {
md_error(mddev, rdev);
ret = -EIO;
/* if not able to read - don't recover any PPL */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c8539d0e12dd..5cabdbbac48b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1082,7 +1082,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
should_defer = conf->batch_bio_dispatch && conf->group_cnt;
for (i = disks; i--; ) {
- int op, op_flags = 0;
+ enum req_op op;
+ blk_opf_t op_flags = 0;
int replace_only = 0;
struct bio *bi, *rbi;
struct md_rdev *rdev, *rrdev = NULL;
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 3993bdd4b519..ed9a683b3ca8 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2129,7 +2129,7 @@ static int msb_init_disk(struct memstick_dev *card)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(msb->disk);
+ put_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
@@ -2187,7 +2187,6 @@ static void msb_remove(struct memstick_dev *card)
/* Remove the disk */
del_gendisk(msb->disk);
- blk_cleanup_queue(msb->queue);
blk_mq_free_tag_set(&msb->tag_set);
msb->queue = NULL;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 725ba74ded30..61cf75d4a01e 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1209,7 +1209,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(msb->disk);
+ put_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
@@ -1294,7 +1294,6 @@ static void mspro_block_remove(struct memstick_dev *card)
del_gendisk(msb->disk);
dev_dbg(&card->dev, "mspro block remove\n");
- blk_cleanup_queue(msb->queue);
blk_mq_free_tag_set(&msb->tag_set);
msb->queue = NULL;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index f4a1281658db..e08e22f0a7c5 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2505,11 +2505,11 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
dev_set_drvdata(&card->dev, md);
ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
if (ret)
- goto err_cleanup_queue;
+ goto err_put_disk;
return md;
- err_cleanup_queue:
- blk_cleanup_queue(md->disk->queue);
+ err_put_disk:
+ put_disk(md->disk);
blk_mq_free_tag_set(&md->queue.tag_set);
err_kfree:
kfree(md);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index fa5324ceeebe..fefaa901b50f 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -116,8 +116,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
}
}
-static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
- bool reserved)
+static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req)
{
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
@@ -494,7 +493,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
if (blk_queue_quiesced(q))
blk_mq_unquiesce_queue(q);
- blk_cleanup_queue(q);
blk_mq_free_tag_set(&mq->tag_set);
/*
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f73172111465..60b222799871 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -29,7 +29,7 @@ static void blktrans_dev_release(struct kref *kref)
struct mtd_blktrans_dev *dev =
container_of(kref, struct mtd_blktrans_dev, ref);
- blk_cleanup_disk(dev->disk);
+ put_disk(dev->disk);
blk_mq_free_tag_set(dev->tag_set);
kfree(dev->tag_set);
list_del(&dev->list);
@@ -398,7 +398,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(new->disk);
+ put_disk(new->disk);
out_free_tag_set:
blk_mq_free_tag_set(new->tag_set);
out_kfree_tag_set:
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index a78fdf3b30f7..4cf67a2a0d04 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -467,7 +467,7 @@ out_destroy_wq:
out_remove_minor:
idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk:
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
out_free_tags:
blk_mq_free_tag_set(&dev->tag_set);
out_free_dev:
@@ -486,7 +486,7 @@ static void ubiblock_cleanup(struct ubiblock *dev)
destroy_workqueue(dev->wq);
/* Finally destroy the blk queue */
dev_info(disk_to_dev(dev->gd), "released");
- blk_cleanup_disk(dev->gd);
+ put_disk(dev->gd);
blk_mq_free_tag_set(&dev->tag_set);
idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 9613e54c7a67..0297b7882e33 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1422,7 +1422,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- unsigned int op, sector_t sector)
+ enum req_op op, sector_t sector)
{
int ret;
@@ -1483,7 +1483,7 @@ static void btt_submit_bio(struct bio *bio)
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
struct btt *btt = bdev->bd_disk->private_data;
int rc;
@@ -1548,14 +1548,14 @@ static int btt_blk_init(struct btt *btt)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(btt->btt_disk);
+ put_disk(btt->btt_disk);
return rc;
}
static void btt_blk_cleanup(struct btt *btt)
{
del_gendisk(btt->btt_disk);
- blk_cleanup_disk(btt->btt_disk);
+ put_disk(btt->btt_disk);
}
/**
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 629d10fcf53b..f36efcc11f67 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -239,7 +239,7 @@ static void pmem_submit_bio(struct bio *bio)
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
struct pmem_device *pmem = bdev->bd_disk->private_data;
blk_status_t rc;
@@ -450,7 +450,7 @@ static void pmem_release_disk(void *__pmem)
put_dax(pmem->dax_dev);
del_gendisk(pmem->disk);
- blk_cleanup_disk(pmem->disk);
+ put_disk(pmem->disk);
}
static int pmem_attach_disk(struct device *dev,
@@ -596,7 +596,7 @@ out_cleanup_dax:
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
out:
- blk_cleanup_disk(pmem->disk);
+ put_disk(pmem->disk);
return rc;
}
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index d702d7d60235..5c352d5d8ee6 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -862,8 +862,7 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
}
}
-static enum blk_eh_timer_return apple_nvme_timeout(struct request *req,
- bool reserved)
+static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
{
struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct apple_nvme_queue *q = iod->q;
@@ -1502,7 +1501,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
if (!blk_get_queue(anv->ctrl.admin_q)) {
nvme_start_admin_queue(&anv->ctrl);
- blk_cleanup_queue(anv->ctrl.admin_q);
+ blk_mq_destroy_queue(anv->ctrl.admin_q);
anv->ctrl.admin_q = NULL;
ret = -ENODEV;
goto put_dev;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 6a12a906a11e..2533b88e66d5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -418,7 +418,7 @@ blk_status_t nvme_host_path_error(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_host_path_error);
-bool nvme_cancel_request(struct request *req, void *data, bool reserved)
+bool nvme_cancel_request(struct request *req, void *data)
{
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag);
@@ -4061,7 +4061,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
mutex_unlock(&ctrl->subsys->lock);
nvme_put_ns_head(ns->head);
out_cleanup_disk:
- blk_cleanup_disk(disk);
+ put_disk(disk);
out_free_ns:
kfree(ns);
out_free_id:
@@ -4103,7 +4103,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
- blk_cleanup_queue(ns->queue);
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 3c778bb0c294..9987797620b6 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2392,7 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
unsigned long flags;
if (ctrl->ctrl.tagset) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(&ctrl->tag_set);
}
@@ -2402,8 +2402,8 @@ nvme_fc_ctrl_free(struct kref *ref)
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
nvme_start_admin_queue(&ctrl->ctrl);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
kfree(ctrl->queues);
@@ -2456,8 +2456,7 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
* status. The done path will return the io request back to the block
* layer with an error status.
*/
-static bool
-nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+static bool nvme_fc_terminate_exchange(struct request *req, void *data)
{
struct nvme_ctrl *nctrl = data;
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
@@ -2565,8 +2564,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
nvme_reset_ctrl(&ctrl->ctrl);
}
-static enum blk_eh_timer_return
-nvme_fc_timeout(struct request *rq, bool reserved)
+static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
@@ -2953,7 +2951,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
out_delete_hw_queues:
nvme_fc_delete_hw_io_queues(ctrl);
out_cleanup_blk_queue:
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
blk_mq_free_tag_set(&ctrl->tag_set);
nvme_fc_free_io_queues(ctrl);
@@ -3642,9 +3640,9 @@ fail_ctrl:
return ERR_PTR(-EIO);
out_cleanup_admin_q:
- blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_admin_tag_set:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_queues:
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index a2e89db1cd63..27614bee7380 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -68,7 +68,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, void **metap, unsigned timeout, bool vec,
- unsigned int rq_flags, blk_mq_req_flags_t blk_flags)
+ blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
@@ -407,7 +407,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_uring_data d;
struct nvme_command c;
struct request *req;
- unsigned int rq_flags = 0;
+ blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
void *meta = NULL;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index d3e2440d8abb..f26640ccb955 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -830,7 +830,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->head->disk->queue);
#ifdef CONFIG_BLK_DEV_ZONED
if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
- ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
+ ns->head->disk->nr_zones = ns->disk->nr_zones;
#endif
}
@@ -853,7 +853,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
- blk_cleanup_disk(head->disk);
+ put_disk(head->disk);
}
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 5558f8812157..7e0a925bf3be 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -698,7 +698,7 @@ static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
}
blk_status_t nvme_host_path_error(struct request *req);
-bool nvme_cancel_request(struct request *req, void *data, bool reserved);
+bool nvme_cancel_request(struct request *req, void *data);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
@@ -734,7 +734,7 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
-static inline unsigned int nvme_req_op(struct nvme_command *cmd)
+static inline enum req_op nvme_req_op(struct nvme_command *cmd)
{
return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 73d9fcba3b1c..7e7d4802ac6b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1344,7 +1344,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
"Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
}
-static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
+static enum blk_eh_timer_return nvme_timeout(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = iod->nvmeq;
@@ -1760,7 +1760,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
* queue to flush these to completion.
*/
nvme_start_admin_queue(&dev->ctrl);
- blk_cleanup_queue(dev->ctrl.admin_q);
+ blk_mq_destroy_queue(dev->ctrl.admin_q);
blk_mq_free_tag_set(&dev->admin_tagset);
}
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 46c2dcf72f7e..4665aebd944d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -840,8 +840,8 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
if (remove) {
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) {
@@ -935,10 +935,10 @@ out_stop_queue:
nvme_cancel_admin_tagset(&ctrl->ctrl);
out_cleanup_queue:
if (new)
- blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
if (new)
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
if (new)
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
@@ -957,7 +957,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
if (remove) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(ctrl->ctrl.tagset);
}
nvme_rdma_free_io_queues(ctrl);
@@ -1012,7 +1012,7 @@ out_wait_freeze_timed_out:
out_cleanup_connect_q:
nvme_cancel_tagset(&ctrl->ctrl);
if (new)
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
if (new)
blk_mq_free_tag_set(ctrl->ctrl.tagset);
@@ -2021,8 +2021,7 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
nvmf_complete_timed_out_request(rq);
}
-static enum blk_eh_timer_return
-nvme_rdma_timeout(struct request *rq, bool reserved)
+static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 7a9e6ffa2342..b95ee85053e3 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1884,7 +1884,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_io_queues(ctrl);
if (remove) {
- blk_cleanup_queue(ctrl->connect_q);
+ blk_mq_destroy_queue(ctrl->connect_q);
blk_mq_free_tag_set(ctrl->tagset);
}
nvme_tcp_free_io_queues(ctrl);
@@ -1939,7 +1939,7 @@ out_wait_freeze_timed_out:
out_cleanup_connect_q:
nvme_cancel_tagset(ctrl);
if (new)
- blk_cleanup_queue(ctrl->connect_q);
+ blk_mq_destroy_queue(ctrl->connect_q);
out_free_tag_set:
if (new)
blk_mq_free_tag_set(ctrl->tagset);
@@ -1952,8 +1952,8 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_queue(ctrl, 0);
if (remove) {
- blk_cleanup_queue(ctrl->admin_q);
- blk_cleanup_queue(ctrl->fabrics_q);
+ blk_mq_destroy_queue(ctrl->admin_q);
+ blk_mq_destroy_queue(ctrl->fabrics_q);
blk_mq_free_tag_set(ctrl->admin_tagset);
}
nvme_tcp_free_admin_queue(ctrl);
@@ -2011,10 +2011,10 @@ out_stop_queue:
nvme_cancel_admin_tagset(ctrl);
out_cleanup_queue:
if (new)
- blk_cleanup_queue(ctrl->admin_q);
+ blk_mq_destroy_queue(ctrl->admin_q);
out_cleanup_fabrics_q:
if (new)
- blk_cleanup_queue(ctrl->fabrics_q);
+ blk_mq_destroy_queue(ctrl->fabrics_q);
out_free_tagset:
if (new)
blk_mq_free_tag_set(ctrl->admin_tagset);
@@ -2323,8 +2323,7 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
nvmf_complete_timed_out_request(rq);
}
-static enum blk_eh_timer_return
-nvme_tcp_timeout(struct request *rq, bool reserved)
+static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 9f81beb4df4e..12316ab51bda 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -109,10 +109,10 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data;
}
- blk_queue_set_zoned(ns->disk, BLK_ZONED_HM);
+ disk_set_zoned(ns->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1);
- blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1);
+ disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
+ disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
free_data:
kfree(id);
return status;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 27a72504d31c..2dc1c1035626 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -246,7 +246,8 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
struct scatterlist *sg;
struct blk_plug plug;
sector_t sector;
- int op, i, rc;
+ blk_opf_t opf;
+ int i, rc;
struct sg_mapping_iter prot_miter;
unsigned int iter_flags;
unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
@@ -260,26 +261,26 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
if (req->cmd->rw.opcode == nvme_cmd_write) {
- op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+ opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- op |= REQ_FUA;
+ opf |= REQ_FUA;
iter_flags = SG_MITER_TO_SG;
} else {
- op = REQ_OP_READ;
+ opf = REQ_OP_READ;
iter_flags = SG_MITER_FROM_SG;
}
if (is_pci_p2pdma_page(sg_page(req->sg)))
- op |= REQ_NOMERGE;
+ opf |= REQ_NOMERGE;
sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
bio_init(bio, req->ns->bdev, req->inline_bvec,
- ARRAY_SIZE(req->inline_bvec), op);
+ ARRAY_SIZE(req->inline_bvec), opf);
} else {
- bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op,
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
GFP_KERNEL);
}
bio->bi_iter.bi_sector = sector;
@@ -306,7 +307,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
- op, GFP_KERNEL);
+ opf, GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
bio_chain(bio, prev);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..0f5c77e22a0a 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -266,8 +266,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
}
@@ -283,7 +283,7 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
mutex_unlock(&nvme_loop_ctrl_mutex);
if (nctrl->tagset) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(&ctrl->tag_set);
}
kfree(ctrl->queues);
@@ -410,9 +410,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
out_cleanup_queue:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_sq:
@@ -554,7 +554,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
return 0;
out_cleanup_connect_q:
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues:
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 82b61acf7a72..c7ef69f29fe4 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -57,10 +57,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
* zones, reject the device. Otherwise, use report zones to detect if
* the device has conventional zones.
*/
- if (ns->bdev->bd_disk->queue->conv_zones_bitmap)
+ if (ns->bdev->bd_disk->conv_zones_bitmap)
return false;
- ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk),
+ ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
validate_conv_zones_cb, NULL);
if (ret < 0)
return false;
@@ -241,7 +241,7 @@ static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
{
unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
- return blkdev_nr_zones(req->ns->bdev->bd_disk) -
+ return bdev_nr_zones(req->ns->bdev) -
(sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
}
@@ -308,7 +308,7 @@ void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
queue_work(zbd_wq, &req->z.zmgmt_work);
}
-static inline enum req_opf zsa_req_op(u8 zsa)
+static inline enum req_op zsa_req_op(u8 zsa)
{
switch (zsa) {
case NVME_ZONE_OPEN:
@@ -386,7 +386,7 @@ static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
{
struct block_device *bdev = req->ns->bdev;
- unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk);
+ unsigned int nr_zones = bdev_nr_zones(bdev);
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = NULL;
sector_t sector = 0;
@@ -413,8 +413,8 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
ret = 0;
}
- while (sector < get_capacity(bdev->bd_disk)) {
- if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
+ while (sector < bdev_nr_sectors(bdev)) {
+ if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) {
bio = blk_next_bio(bio, bdev, 0,
zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
GFP_KERNEL);
@@ -422,7 +422,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
/* This may take a while, so be nice to others */
cond_resched();
}
- sector += blk_queue_zone_sectors(q);
+ sector += bdev_zone_sectors(bdev);
}
if (bio) {
@@ -465,7 +465,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
- enum req_opf op = zsa_req_op(req->cmd->zms.zsa);
+ enum req_op op = zsa_req_op(req->cmd->zms.zsa);
struct block_device *bdev = req->ns->bdev;
sector_t zone_sectors = bdev_zone_sectors(bdev);
u16 status = NVME_SC_SUCCESS;
@@ -525,7 +525,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
{
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
- const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+ const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0;
struct scatterlist *sg;
@@ -556,9 +556,9 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->z.inline_bio;
bio_init(bio, req->ns->bdev, req->inline_bvec,
- ARRAY_SIZE(req->inline_bvec), op);
+ ARRAY_SIZE(req->inline_bvec), opf);
} else {
- bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
+ bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
}
bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ba6d78789660..4df8bf6505fc 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3145,7 +3145,7 @@ out:
* BLK_EH_DONE if the request is handled or terminated
* by the driver.
*/
-enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
+enum blk_eh_timer_return dasd_times_out(struct request *req)
{
struct dasd_block *block = req->q->queuedata;
struct dasd_device *device;
@@ -3280,7 +3280,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
static void dasd_free_queue(struct dasd_block *block)
{
if (block->request_queue) {
- blk_cleanup_queue(block->request_queue);
+ blk_mq_destroy_queue(block->request_queue);
blk_mq_free_tag_set(&block->tag_set);
block->request_queue = NULL;
}
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index a7a33ebf4bbe..5a83f0a39901 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -41,8 +41,8 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (base->devindex >= DASD_PER_MAJOR)
return -EBUSY;
- gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE,
- &dasd_bio_compl_lkclass);
+ gdp = blk_mq_alloc_disk_for_queue(block->request_queue,
+ &dasd_bio_compl_lkclass);
if (!gdp)
return -ENOMEM;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 83b918b84b4a..333a399f754e 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -795,7 +795,7 @@ void dasd_free_device(struct dasd_device *);
struct dasd_block *dasd_alloc_block(void);
void dasd_free_block(struct dasd_block *);
-enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved);
+enum blk_eh_timer_return dasd_times_out(struct request *req);
void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 8d0d0eaa3059..4d8d1759775a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -414,7 +414,7 @@ removeseg:
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
- blk_cleanup_disk(dev_info->gd);
+ put_disk(dev_info->gd);
up_write(&dcssblk_devices_sem);
if (device_remove_file_self(dev, attr)) {
@@ -712,7 +712,7 @@ out_dax:
put_dax(dev_info->dax_dev);
put_dev:
list_del(&dev_info->lh);
- blk_cleanup_disk(dev_info->gd);
+ put_disk(dev_info->gd);
list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
segment_unload(seg_info->segment_name);
}
@@ -722,7 +722,7 @@ put_dev:
dev_list_del:
list_del(&dev_info->lh);
release_gd:
- blk_cleanup_disk(dev_info->gd);
+ put_disk(dev_info->gd);
up_write(&dcssblk_devices_sem);
seg_list_del:
if (dev_info == NULL)
@@ -790,7 +790,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
- blk_cleanup_disk(dev_info->gd);
+ put_disk(dev_info->gd);
/* unload all related segments */
list_for_each_entry(entry, &dev_info->seg_list, lh)
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 2a9c0ddcade5..0c1df1d5f1ac 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -501,7 +501,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(bdev->gendisk);
+ put_disk(bdev->gendisk);
out_tag:
blk_mq_free_tag_set(&bdev->tag_set);
out:
@@ -512,7 +512,7 @@ out:
void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
{
del_gendisk(bdev->gendisk);
- blk_cleanup_disk(bdev->gendisk);
+ put_disk(bdev->gendisk);
blk_mq_free_tag_set(&bdev->tag_set);
}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 940a6deab38f..bd99c5492b7d 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -272,7 +272,7 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
-static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data, bool rsvd)
+static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data)
{
int *active = data;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 9c27bc37e5de..5ba5c18b77b4 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -633,7 +633,7 @@ struct fib_count_data {
int krlcnt;
};
-static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data, bool reserved)
+static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data)
{
struct fib_count_data *fib_count = data;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 1d9be771f3ee..610a51538f03 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -127,7 +127,7 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
int bufflen, struct scsi_sense_hdr *sshdr, int flags)
{
u8 cdb[MAX_COMMAND_SIZE];
- int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
/* Prepare the command. */
@@ -157,7 +157,7 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
u8 cdb[MAX_COMMAND_SIZE];
unsigned char stpg_data[8];
int stpg_len = 8;
- int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
/* Prepare the data buffer */
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index bd28ec6cfb72..2e21ab447873 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -239,7 +239,7 @@ static int send_trespass_cmd(struct scsi_device *sdev,
unsigned char cdb[MAX_COMMAND_SIZE];
int err, res = SCSI_DH_OK, len;
struct scsi_sense_hdr sshdr;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
if (csdev->flags & CLARIION_SHORT_TRESPASS) {
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 4a3f7831a2d6..0d2cfa60aa06 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -83,7 +83,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
unsigned char cmd[6] = { TEST_UNIT_READY };
struct scsi_sense_hdr sshdr;
int ret = SCSI_DH_OK, res;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
retry:
@@ -121,7 +121,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
struct scsi_device *sdev = h->sdev;
int res, rc = SCSI_DH_OK;
int retry_cnt = HP_SW_RETRIES;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
retry:
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 66652ab409cc..bf8754741f85 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -536,7 +536,7 @@ static void send_mode_select(struct work_struct *work)
unsigned char cdb[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sshdr;
unsigned int data_size;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
spin_lock(&ctlr->ms_lock);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 3d64877bda8d..77a4d9f8aa83 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1350,8 +1350,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
return wq_work_done;
}
-static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
- bool reserved)
+static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
{
const int tag = scsi_cmd_to_rq(sc)->tag;
struct fnic *fnic = data;
@@ -1548,8 +1547,7 @@ struct fnic_rport_abort_io_iter_data {
int term_cnt;
};
-static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
- bool reserved)
+static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_rport_abort_io_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
@@ -2003,8 +2001,7 @@ struct fnic_pending_aborts_iter_data {
int ret;
};
-static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
- void *data, bool reserved)
+static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
@@ -2019,8 +2016,6 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
if (sc == iter_data->lr_sc || sc->device != lun_dev)
return true;
- if (reserved)
- return true;
io_lock = fnic_io_lock_tag(fnic, abt_tag);
spin_lock_irqsave(io_lock, flags);
@@ -2670,8 +2665,7 @@ call_fc_exch_mgr_reset:
}
-static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
- bool reserved)
+static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 8352f90d997d..315c7ac730e9 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -566,8 +566,7 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
-static bool scsi_host_check_in_flight(struct request *rq, void *data,
- bool reserved)
+static bool scsi_host_check_in_flight(struct request *rq, void *data)
{
int *count = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -662,7 +661,7 @@ void scsi_flush_work(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(scsi_flush_work);
-static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
+static bool complete_all_cmds_iter(struct request *rq, void *data)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
enum scsi_host_status status = *(enum scsi_host_status *)data;
@@ -693,17 +692,16 @@ void scsi_host_complete_all_commands(struct Scsi_Host *shost,
EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
struct scsi_host_busy_iter_data {
- bool (*fn)(struct scsi_cmnd *, void *, bool);
+ bool (*fn)(struct scsi_cmnd *, void *);
void *priv;
};
-static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
- bool reserved)
+static bool __scsi_host_busy_iter_fn(struct request *req, void *priv)
{
struct scsi_host_busy_iter_data *iter_data = priv;
struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
- return iter_data->fn(sc, iter_data->priv, reserved);
+ return iter_data->fn(sc, iter_data->priv);
}
/**
@@ -716,7 +714,7 @@ static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
* ithas to be provided by the caller
**/
void scsi_host_busy_iter(struct Scsi_Host *shost,
- bool (*fn)(struct scsi_cmnd *, void *, bool),
+ bool (*fn)(struct scsi_cmnd *, void *),
void *priv)
{
struct scsi_host_busy_iter_data iter_data = {
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index d8c195b7ca57..59a18769a4fe 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -381,14 +381,12 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
* mpi3mr_print_scmd - print individual SCSI command
* @rq: Block request
* @data: Adapter instance reference
- * @reserved: N/A. Currently not used
*
* Print the SCSI command details if it is in LLD scope.
*
* Return: true always.
*/
-static bool mpi3mr_print_scmd(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_print_scmd(struct request *rq, void *data)
{
struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -412,7 +410,6 @@ out:
* mpi3mr_flush_scmd - Flush individual SCSI command
* @rq: Block request
* @data: Adapter instance reference
- * @reserved: N/A. Currently not used
*
* Return the SCSI command to the upper layers if it is in LLD
* scope.
@@ -420,8 +417,7 @@ out:
* Return: true always.
*/
-static bool mpi3mr_flush_scmd(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_flush_scmd(struct request *rq, void *data)
{
struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -451,7 +447,6 @@ out:
* mpi3mr_count_dev_pending - Count commands pending for a lun
* @rq: Block request
* @data: SCSI device reference
- * @reserved: Unused
*
* This is an iterator function called for each SCSI command in
* a host and if the command is pending in the LLD for the
@@ -461,8 +456,7 @@ out:
* Return: true always.
*/
-static bool mpi3mr_count_dev_pending(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
{
struct scsi_device *sdev = (struct scsi_device *)data;
struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
@@ -485,7 +479,6 @@ out:
* mpi3mr_count_tgt_pending - Count commands pending for target
* @rq: Block request
* @data: SCSI target reference
- * @reserved: Unused
*
* This is an iterator function called for each SCSI command in
* a host and if the command is pending in the LLD for the
@@ -495,8 +488,7 @@ out:
* Return: true always.
*/
-static bool mpi3mr_count_tgt_pending(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
{
struct scsi_target *starget = (struct scsi_target *)data;
struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 49ef864df581..b776cefc7cda 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -139,7 +139,7 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
*
* Note: this function must be called only for a command that has timed out.
* Because the block layer marks a request as complete before it calls
- * scsi_times_out(), a .scsi_done() call from the LLD for a command that has
+ * scsi_timeout(), a .scsi_done() call from the LLD for a command that has
* timed out do not have any effect. Hence it is safe to call
* scsi_finish_command() from this function.
*/
@@ -316,7 +316,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
}
/**
- * scsi_times_out - Timeout function for normal scsi commands.
+ * scsi_timeout - Timeout function for normal scsi commands.
* @req: request that is timing out.
*
* Notes:
@@ -325,7 +325,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
*/
-enum blk_eh_timer_return scsi_times_out(struct request *req)
+enum blk_eh_timer_return scsi_timeout(struct request *req)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
enum blk_eh_timer_return rtn = BLK_EH_DONE;
@@ -1779,7 +1779,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
* scsi_noretry_cmd - determine if command should be failed fast
* @scmd: SCSI cmd to examine.
*/
-int scsi_noretry_cmd(struct scsi_cmnd *scmd)
+bool scsi_noretry_cmd(struct scsi_cmnd *scmd)
{
struct request *req = scsi_cmd_to_rq(scmd);
@@ -1789,19 +1789,19 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
case DID_TIME_OUT:
goto check_type;
case DID_BUS_BUSY:
- return req->cmd_flags & REQ_FAILFAST_TRANSPORT;
+ return !!(req->cmd_flags & REQ_FAILFAST_TRANSPORT);
case DID_PARITY:
- return req->cmd_flags & REQ_FAILFAST_DEV;
+ return !!(req->cmd_flags & REQ_FAILFAST_DEV);
case DID_ERROR:
if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
- return 0;
+ return false;
fallthrough;
case DID_SOFT_ERROR:
- return req->cmd_flags & REQ_FAILFAST_DRIVER;
+ return !!(req->cmd_flags & REQ_FAILFAST_DRIVER);
}
if (!scsi_status_is_check_condition(scmd->result))
- return 0;
+ return false;
check_type:
/*
@@ -1809,9 +1809,9 @@ check_type:
* the check condition was retryable.
*/
if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req))
- return 1;
+ return true;
- return 0;
+ return false;
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6ffc9e4258a8..17a617db9ae0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -163,7 +163,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
* Requeue this command. It will go before all other commands
* that are already in the queue. Schedule requeue work under
* lock such that the kblockd_schedule_work() call happens
- * before blk_cleanup_queue() finishes.
+ * before blk_mq_destroy_queue() finishes.
*/
cmd->result = 0;
@@ -209,8 +209,8 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
- int timeout, int retries, u64 flags, req_flags_t rq_flags,
- int *resid)
+ int timeout, int retries, blk_opf_t flags,
+ req_flags_t rq_flags, int *resid)
{
struct request *req;
struct scsi_cmnd *scmd;
@@ -424,9 +424,9 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
* it and the queue. Mitigate by taking a reference to the
* queue and never touching the sdev again after we drop the
* host lock. Note: if __scsi_remove_device() invokes
- * blk_cleanup_queue() before the queue is run from this
+ * blk_mq_destroy_queue() before the queue is run from this
* function then blk_run_queue() will return immediately since
- * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
+ * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
*/
slq = sdev->request_queue;
if (!blk_get_queue(slq))
@@ -633,7 +633,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
*/
static unsigned int scsi_rq_err_bytes(const struct request *rq)
{
- unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
unsigned int bytes = 0;
struct bio *bio;
@@ -1125,12 +1125,12 @@ static void scsi_initialize_rq(struct request *rq)
cmd->retries = 0;
}
-struct request *scsi_alloc_request(struct request_queue *q,
- unsigned int op, blk_mq_req_flags_t flags)
+struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
+ blk_mq_req_flags_t flags)
{
struct request *rq;
- rq = blk_mq_alloc_request(q, op, flags);
+ rq = blk_mq_alloc_request(q, opf, flags);
if (!IS_ERR(rq))
scsi_initialize_rq(rq);
return rq;
@@ -1790,14 +1790,6 @@ out_put_budget:
return ret;
}
-static enum blk_eh_timer_return scsi_timeout(struct request *req,
- bool reserved)
-{
- if (reserved)
- return BLK_EH_RESET_TIMER;
- return scsi_times_out(req);
-}
-
static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5c4786310a31..429663bd78ec 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -72,7 +72,7 @@ extern void scsi_exit_devinfo(void);
/* scsi_error.c */
extern void scmd_eh_abort_handler(struct work_struct *work);
-extern enum blk_eh_timer_return scsi_times_out(struct request *req);
+extern enum blk_eh_timer_return scsi_timeout(struct request *req);
extern int scsi_error_handler(void *host);
extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
@@ -82,7 +82,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *done_q);
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q);
-int scsi_noretry_cmd(struct scsi_cmnd *scmd);
+bool scsi_noretry_cmd(struct scsi_cmnd *scmd);
void scsi_eh_done(struct scsi_cmnd *scmd);
/* scsi_lib.c */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 43949798a2e4..aa70d9282161 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1475,7 +1475,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
scsi_device_set_state(sdev, SDEV_DEL);
mutex_unlock(&sdev->state_mutex);
- blk_cleanup_queue(sdev->request_queue);
+ blk_mq_destroy_queue(sdev->request_queue);
cancel_work_sync(&sdev->requeue_work);
if (sdev->host->hostt->slave_destroy)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a1a2ac09066f..eb02d939dd44 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2934,15 +2934,15 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
if (sdkp->device->type == TYPE_ZBC) {
/* Host-managed */
- blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
+ disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
} else {
sdkp->zoned = zoned;
if (sdkp->zoned == 1) {
/* Host-aware */
- blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
+ disk_set_zoned(sdkp->disk, BLK_ZONED_HA);
} else {
/* Regular disk or drive managed disk */
- blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
+ disk_set_zoned(sdkp->disk, BLK_ZONED_NONE);
}
}
@@ -3440,8 +3440,8 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
- &sd_bio_compl_lkclass);
+ gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
+ &sd_bio_compl_lkclass);
if (!gd)
goto out_free;
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6acc4f406eb8..bd15624c6322 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -529,7 +529,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
unsigned int zno = blk_rq_zone_no(rq);
- enum req_opf op = req_op(rq);
+ enum req_op op = req_op(rq);
unsigned long flags;
/*
@@ -855,7 +855,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
if (sdkp->zone_info.zone_blocks == zone_blocks &&
sdkp->zone_info.nr_zones == nr_zones &&
- disk->queue->nr_zones == nr_zones)
+ disk->nr_zones == nr_zones)
goto unlock;
flags = memalloc_noio_save();
@@ -929,7 +929,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
/*
* This can happen for a host aware disk with partitions.
* The block device zone model was already cleared by
- * blk_queue_set_zoned(). Only free the scsi disk zone
+ * disk_set_zoned(). Only free the scsi disk zone
* information and exit early.
*/
sd_zbc_free_zone_info(sdkp);
@@ -950,10 +950,10 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
if (sdkp->zones_max_open == U32_MAX)
- blk_queue_max_open_zones(q, 0);
+ disk_set_max_open_zones(disk, 0);
else
- blk_queue_max_open_zones(q, sdkp->zones_max_open);
- blk_queue_max_active_zones(q, 0);
+ disk_set_max_open_zones(disk, sdkp->zones_max_open);
+ disk_set_max_active_zones(disk, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
/*
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 32d3b8274f14..a278b739d0c5 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -624,8 +624,8 @@ static int sr_probe(struct device *dev)
if (!cd)
goto fail;
- disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
- &sr_bio_compl_lkclass);
+ disk = blk_mq_alloc_disk_for_queue(sdev->request_queue,
+ &sr_bio_compl_lkclass);
if (!disk)
goto fail_free;
mutex_init(&cd->lock);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 1ed9381751e6..30712a12b151 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -343,7 +343,7 @@ static void iblock_bio_done(struct bio *bio)
}
static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
- unsigned int opf)
+ blk_opf_t opf)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
@@ -723,7 +723,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
- unsigned int opf;
+ blk_opf_t opf;
unsigned bio_cnt;
int i, rc;
struct sg_mapping_iter prot_miter;
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 3d367be71728..2b40174e93ac 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -9509,7 +9509,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufs_bsg_remove(hba);
ufshpb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
- blk_cleanup_queue(hba->tmf_queue);
+ blk_mq_destroy_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
scsi_remove_host(hba->host);
/* disable interrupts */
@@ -9805,7 +9805,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
return 0;
free_tmf_queue:
- blk_cleanup_queue(hba->tmf_queue);
+ blk_mq_destroy_queue(hba->tmf_queue);
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
out_remove_scsi_host:
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index de2bb8401bc4..a1a7a1175a5a 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -433,9 +433,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return 0;
}
-static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
- int rgn_idx, enum req_opf dir,
- bool atomic)
+static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, int rgn_idx,
+ enum req_op op, bool atomic)
{
struct ufshpb_req *rq;
struct request *req;
@@ -446,7 +445,7 @@ static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
return NULL;
retry:
- req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
+ req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, op,
BLK_MQ_REQ_NOWAIT);
if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {