summaryrefslogtreecommitdiff
path: root/fs/bcachefs/zone.c
blob: 6f48f58a6c500a62992211708d4394856fa1c0a8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
// SPDX-License-Identifier: GPL-2.0

#include "bcachefs.h"
#include "buckets.h"
#include "eytzinger.h"
#include "zone.h"

#include <linux/blkdev.h>

static int zone_report_cb(struct blk_zone *src, unsigned idx, void *data)
{
	struct blk_zone *dst = data;

	*dst = *src;
	return 0;
}

static int bch2_zone_report(struct block_device *bdev, sector_t sector, struct blk_zone *zone)
{
	int ret = blkdev_report_zones(bdev, sector, 1, zone_report_cb, zone);

	if (ret)
		pr_err("error getting zone %u: %i", 0, ret);
	return ret;
}

void bch2_bucket_discard(struct bch_dev *ca, u64 b)
{
	struct bch_fs *c = ca->fs;

	if (c->opts.nochanges)
		return;

	if (ca->mi.discard &&
	    bdev_max_discard_sectors(ca->disk_sb.bdev))
		blkdev_issue_discard(ca->disk_sb.bdev,
				     bucket_to_sector(ca, b),
				     ca->mi.bucket_size, GFP_NOFS);

	if (ca->zoned)
		blkdev_zone_mgmt(ca->disk_sb.bdev, REQ_OP_ZONE_RESET,
				 bucket_to_sector(ca, b),
				 ca->mi.bucket_size, GFP_NOFS);
}

void bch2_bucket_finish(struct bch_dev *ca, u64 b)
{
	struct bch_fs *c = ca->fs;

	if (c->opts.nochanges || !ca->zoned)
		return;

	blkdev_zone_mgmt(ca->disk_sb.bdev, REQ_OP_ZONE_FINISH,
			 bucket_to_sector(ca, b),
			 ca->mi.bucket_size, GFP_KERNEL);
}

void bch2_dev_zones_exit(struct bch_dev *ca)
{
	kfree(ca->buckets.d);
}

static int zone_report_capacity(struct blk_zone *src, unsigned idx, void *data)
{
	struct bucket_capacities *b = data;

	if (b->nr &&
	    b->d[b->nr - 1].sectors == src->capacity)
		return 0;

	if (b->nr == b->size) {
		size_t new_size = min(b->size * 2, 8U);
		struct bucket_capacity *d =
			krealloc_array(b->d, new_size, sizeof(*d), GFP_KERNEL);
		if (!d)
			return -ENOMEM;

		b->d	= d;
		b->size = new_size;
	}

	b->d[b->nr++] = (struct bucket_capacity) {
		.start		= idx,
		.sectors	= src->capacity,
	};

	return 0;
}

int bch2_dev_zones_init(struct bch_dev *ca, struct bch_sb_handle *sb)
{
	struct bucket_capacities *b = &ca->buckets;
	struct blk_zone zone;
	unsigned i;
	int ret;

	ca->zoned = bdev_nr_zones(sb->bdev) != 0;
	if (!ca->zoned) {
		ca->capacity = ca->mi.bucket_size * ca->mi.nbuckets;
		return 0;
	}

	ret = bch2_zone_report(sb->bdev, 0, &zone);
	if (ret)
		return ret;

	if (zone.len != ca->mi.bucket_size) {
		bch_err(ca, "zone size doesn't match bucket size");
		return -EINVAL;
	}

	if (bdev_nr_zones(sb->bdev) < ca->mi.nbuckets) {
		bch_err(ca, "member info nbuckets (%llu) greater than number of zones (%u)",
			ca->mi.nbuckets,
			bdev_nr_zones(sb->bdev));
		return -EINVAL;
	}

	b->nr = 0;
	ret = blkdev_report_zones(sb->bdev, 0, ca->mi.nbuckets,
				  zone_report_capacity, &ca->buckets);
	if (ret) {
		bch_err(ca, "error getting zone capacities");
		return -EINVAL;
	}

	ca->capacity = 0;
	for (i = 0; i < b->nr; i++) {
		u64 next = i + 1 < b->nr
			? b->d[i + 1].start
			: ca->mi.nbuckets;

		ca->capacity += (next - b->d[i].start) * b->d[i].sectors;
	}

	BUG_ON(ca->capacity > ca->mi.bucket_size * ca->mi.nbuckets);

	eytzinger0_sort(b->d, b->nr, sizeof(*b->d), bucket_capacity_cmp, NULL);

	return 0;
}