summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorJames Morris <james.l.morris@oracle.com>2017-11-29 12:47:41 +1100
committerJames Morris <james.l.morris@oracle.com>2017-11-29 12:47:41 +1100
commitcf40a76e7d5874bb25f4404eecc58a2e033af885 (patch)
tree8fd81cbea03c87b3d41d7ae5b1d11eadd35d6ef5 /include/linux/blkdev.h
parentab5348c9c23cd253f5902980d2d8fe067dc24c82 (diff)
parent4fbd8d194f06c8a3fd2af1ce560ddb31f7ec8323 (diff)
Merge tag 'v4.15-rc1' into next-seccomp
Linux 4.15-rc1
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h99
1 files changed, 48 insertions, 51 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 25f6a0cb27d3..8089ca17db9a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
@@ -134,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
struct request {
struct list_head queuelist;
union {
- struct call_single_data csd;
+ call_single_data_t csd;
u64 fifo_time;
};
@@ -266,6 +267,7 @@ struct blk_queue_ctx;
typedef void (request_fn_proc) (struct request_queue *q);
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
@@ -408,6 +410,7 @@ struct request_queue {
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
+ poll_q_fn *poll_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
softirq_done_fn *softirq_done_fn;
@@ -551,6 +554,7 @@ struct request_queue {
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace *blk_trace;
+ struct mutex blk_trace_mutex;
#endif
/*
* for flush operations
@@ -568,7 +572,6 @@ struct request_queue {
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
- int bsg_job_size;
struct bsg_class_device bsg_dev;
#endif
@@ -601,46 +604,42 @@ struct request_queue {
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
-#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
-#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
-#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
-#define QUEUE_FLAG_DYING 5 /* queue being torn down */
-#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
-#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
-#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */
+#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */
+#define QUEUE_FLAG_DYING 2 /* queue being torn down */
+#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */
+#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
+#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
+#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
-#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
-#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
-#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
-#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
-#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
-#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
-#define QUEUE_FLAG_WC 23 /* Write back caching */
-#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
-#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
-#define QUEUE_FLAG_DAX 26 /* device supports DAX */
-#define QUEUE_FLAG_STATS 27 /* track rq completion times */
-#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
-#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
-#define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */
-#define QUEUE_FLAG_QUIESCED 31 /* queue has been quiesced */
+#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
+#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
+#define QUEUE_FLAG_SECERASE 14 /* supports secure erase */
+#define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */
+#define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */
+#define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */
+#define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/
+#define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */
+#define QUEUE_FLAG_WC 20 /* Write back caching */
+#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
+#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
+#define QUEUE_FLAG_DAX 23 /* device supports DAX */
+#define QUEUE_FLAG_STATS 24 /* track rq completion times */
+#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
+#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
+#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
+#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
+#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_POLL))
@@ -724,8 +723,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_stackable(q) \
- test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_secure_erase(q) \
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
@@ -737,6 +734,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
+#define blk_queue_preempt_only(q) \
+ test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+
+extern int blk_set_preempt_only(struct request_queue *q);
+extern void blk_clear_preempt_only(struct request_queue *q);
static inline bool blk_account_rq(struct request *rq)
{
@@ -924,24 +926,17 @@ static inline void rq_flush_dcache_pages(struct request *rq)
}
#endif
-#ifdef CONFIG_PRINTK
-#define vfs_msg(sb, level, fmt, ...) \
- __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
-#else
-#define vfs_msg(sb, level, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __vfs_msg(sb, "", " "); \
-} while (0)
-#endif
-
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio);
+extern blk_qc_t direct_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
+extern struct request *blk_get_request_flags(struct request_queue *,
+ unsigned int op,
+ blk_mq_req_flags_t flags);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
gfp_t gfp_mask);
extern void blk_requeue_request(struct request_queue *, struct request *);
@@ -965,7 +960,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
-extern int blk_queue_enter(struct request_queue *q, bool nowait);
+extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
extern void blk_start_queue_async(struct request_queue *q);
@@ -992,7 +987,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
-bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+bool blk_poll(struct request_queue *q, blk_qc_t cookie);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@@ -1111,6 +1106,8 @@ extern struct request *blk_peek_request(struct request_queue *q);
extern void blk_start_request(struct request *rq);
extern struct request *blk_fetch_request(struct request_queue *q);
+void blk_steal_bios(struct bio_list *list, struct request *rq);
+
/*
* Request completion related functions.
*
@@ -1373,7 +1370,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
gfp_mask, 0);
}
-extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
+extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,