summaryrefslogtreecommitdiff
path: root/libbcachefs/fs-io.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-04-10 14:39:18 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-04-15 14:41:11 -0400
commit807b250927d9c6e829ddd1ff7eac57bf9b056dab (patch)
treec8c30549383c3b1950ac22fb927729bdd65234cf /libbcachefs/fs-io.c
parent7f102ee83d83fd918783ca542fac1574f9b2c623 (diff)
Update bcachefs sources to 504729f99c bcachefs: Allow answering y or n to all fsck errors of given type
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'libbcachefs/fs-io.c')
-rw-r--r--libbcachefs/fs-io.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/libbcachefs/fs-io.c b/libbcachefs/fs-io.c
index db138570..f706a99a 100644
--- a/libbcachefs/fs-io.c
+++ b/libbcachefs/fs-io.c
@@ -35,7 +35,13 @@
#include <trace/events/bcachefs.h>
#include <trace/events/writeback.h>
-static inline loff_t folio_end_pos(struct folio *folio)
+/*
+ * Use u64 for the end pos and sector helpers because if the folio covers the
+ * max supported range of the mapping, the start offset of the next folio
+ * overflows loff_t. This breaks much of the range based processing in the
+ * buffered write path.
+ */
+static inline u64 folio_end_pos(struct folio *folio)
{
return folio_pos(folio) + folio_size(folio);
}
@@ -50,7 +56,7 @@ static inline loff_t folio_sector(struct folio *folio)
return folio_pos(folio) >> 9;
}
-static inline loff_t folio_end_sector(struct folio *folio)
+static inline u64 folio_end_sector(struct folio *folio)
{
return folio_end_pos(folio) >> 9;
}
@@ -58,12 +64,12 @@ static inline loff_t folio_end_sector(struct folio *folio)
typedef DARRAY(struct folio *) folios;
static int filemap_get_contig_folios_d(struct address_space *mapping,
- loff_t start, loff_t end,
+ loff_t start, u64 end,
int fgp_flags, gfp_t gfp,
folios *folios)
{
struct folio *f;
- loff_t pos = start;
+ u64 pos = start;
int ret = 0;
while (pos < end) {
@@ -1819,7 +1825,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
folios folios;
struct folio **fi, *f;
unsigned copied = 0, f_offset;
- loff_t end = pos + len, f_pos;
+ u64 end = pos + len, f_pos;
loff_t last_folio_pos = inode->v.i_size;
int ret = 0;
@@ -1861,7 +1867,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
f_offset = pos - folio_pos(darray_first(folios));
darray_for_each(folios, fi) {
struct folio *f = *fi;
- unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
+ u64 f_len = min(end, folio_end_pos(f)) - f_pos;
if (!bch2_folio_create(f, __GFP_NOFAIL)->uptodate) {
ret = bch2_folio_set(c, inode_inum(inode), fi,
@@ -1900,7 +1906,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
f_offset = pos - folio_pos(darray_first(folios));
darray_for_each(folios, fi) {
struct folio *f = *fi;
- unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
+ u64 f_len = min(end, folio_end_pos(f)) - f_pos;
unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
if (!f_copied) {
@@ -1942,7 +1948,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
f_offset = pos - folio_pos(darray_first(folios));
darray_for_each(folios, fi) {
struct folio *f = *fi;
- unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
+ u64 f_len = min(end, folio_end_pos(f)) - f_pos;
if (!folio_test_uptodate(f))
folio_mark_uptodate(f);
@@ -2774,7 +2780,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
struct folio *folio;
s64 i_sectors_delta = 0;
int ret = 0;
- loff_t end_pos;
+ u64 end_pos;
folio = filemap_lock_folio(mapping, index);
if (!folio) {
@@ -2800,7 +2806,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
BUG_ON(end <= folio_pos(folio));
start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
- end_offset = min(end, folio_end_pos(folio)) - folio_pos(folio);
+ end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
/* Folio boundary? Nothing to do */
if (start_offset == 0 &&
@@ -2851,7 +2857,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
end_pos = folio_end_pos(folio);
if (inode->v.i_size > folio_pos(folio))
- end_pos = min(inode->v.i_size, end_pos);
+ end_pos = min_t(u64, inode->v.i_size, end_pos);
ret = s->s[(end_pos - folio_pos(folio) - 1) >> 9].state >= SECTOR_dirty;
folio_zero_segment(folio, start_offset, end_offset);