summaryrefslogtreecommitdiff
path: root/drivers/md/dm-table.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r--drivers/md/dm-table.c56
1 files changed, 33 insertions, 23 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 03541cfc2317..0e833a154b31 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -719,6 +719,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
dm_device_name(t->md), type);
+ if (tgt->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
+ static_branch_enable(&swap_bios_enabled);
+
return 0;
bad:
@@ -1002,6 +1005,8 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
+static bool dm_table_supports_poll(struct dm_table *t);
+
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
@@ -1009,21 +1014,24 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
unsigned min_pool_size = 0;
struct dm_target *ti;
unsigned i;
+ bool poll_supported = false;
if (unlikely(type == DM_TYPE_NONE)) {
DMWARN("no table type is set, can't allocate mempools");
return -EINVAL;
}
- if (__table_type_bio_based(type))
+ if (__table_type_bio_based(type)) {
for (i = 0; i < t->num_targets; i++) {
ti = t->targets + i;
per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
min_pool_size = max(min_pool_size, ti->num_flush_bios);
}
+ poll_supported = dm_table_supports_poll(t);
+ }
- t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
- per_io_data_size, min_pool_size);
+ t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size,
+ t->integrity_supported, poll_supported);
if (!t->mempools)
return -ENOMEM;
@@ -1539,9 +1547,20 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
-static int dm_table_supports_poll(struct dm_table *t)
+static bool dm_table_supports_poll(struct dm_table *t)
{
- return !dm_table_any_dev_attr(t, device_not_poll_capable, NULL);
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
+ return false;
+ }
+
+ return true;
}
/*
@@ -1820,9 +1839,7 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_nonrot(q);
+ return !bdev_nonrot(dev->bdev);
}
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
@@ -1890,9 +1907,7 @@ static bool dm_table_supports_nowait(struct dm_table *t)
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_discard(q);
+ return !bdev_max_discard_sectors(dev->bdev);
}
static bool dm_table_supports_discards(struct dm_table *t)
@@ -1924,9 +1939,7 @@ static int device_not_secure_erase_capable(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_secure_erase(q);
+ return !bdev_max_secure_erase_sectors(dev->bdev);
}
static bool dm_table_supports_secure_erase(struct dm_table *t)
@@ -1952,9 +1965,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return blk_queue_stable_writes(q);
+ return bdev_stable_writes(dev->bdev);
}
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1974,18 +1985,15 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
if (!dm_table_supports_discards(t)) {
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
- /* Must also clear discard limits... */
q->limits.max_discard_sectors = 0;
q->limits.max_hw_discard_sectors = 0;
q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0;
q->limits.discard_misaligned = 0;
- } else
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ }
- if (dm_table_supports_secure_erase(t))
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
+ if (!dm_table_supports_secure_erase(t))
+ q->limits.max_secure_erase_sectors = 0;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
@@ -2046,6 +2054,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
r = dm_set_zones_restrictions(t, q);
if (r)
return r;
+ if (!static_key_enabled(&zoned_enabled.key))
+ static_branch_enable(&zoned_enabled);
}
dm_update_crypto_profile(q, t);