diff options
author | Christian Brauner <brauner@kernel.org> | 2025-03-19 10:04:29 +0100 |
---|---|---|
committer | Christian Brauner <brauner@kernel.org> | 2025-03-19 10:04:29 +0100 |
commit | 613218fc74b32095152275fc11d5ab8e3f05d5e8 (patch) | |
tree | 5109724f0b9226e4ed063d4f8c5ebb1023cadd95 | |
parent | d9ecc77193cad25402ff5517fb26fb22b4db0e10 (diff) | |
parent | 07c574eb53d4cc9aa7b985bc8bfcb302e5dc4694 (diff) |
Merge patch series "netfs: Miscellaneous fixes"
David Howells <dhowells@redhat.com> says:
Here are some miscellaneous fixes and changes for netfslib:
(1) Fix the collection of results during a pause in transmission.
(2) Call ->invalidate_cache() only if provided.
(3) Fix the rolling buffer to not hammer atomic bit clears when loading
from readahead.
(4) Fix netfs_unbuffered_read() to return ssize_t.
* patches from https://lore.kernel.org/r/20250314164201.1993231-1-dhowells@redhat.com:
netfs: Fix netfs_unbuffered_read() to return ssize_t rather than int
netfs: Fix rolling_buffer_load_from_ra() to not clear mark bits
netfs: Call `invalidate_cache` only if implemented
netfs: Fix collection of results during pause when collection offloaded
Link: https://lore.kernel.org/r/20250314164201.1993231-1-dhowells@redhat.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r-- | fs/netfs/direct_read.c | 6 | ||||
-rw-r--r-- | fs/netfs/read_collect.c | 18 | ||||
-rw-r--r-- | fs/netfs/rolling_buffer.c | 4 | ||||
-rw-r--r-- | fs/netfs/write_collect.c | 3 |
4 files changed, 15 insertions, 16 deletions
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c index 0bf3c2f5a710..5e3f0aeb51f3 100644 --- a/fs/netfs/direct_read.c +++ b/fs/netfs/direct_read.c @@ -125,9 +125,9 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq) * Perform a read to an application buffer, bypassing the pagecache and the * local disk cache. */ -static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync) +static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync) { - int ret; + ssize_t ret; _enter("R=%x %llx-%llx", rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); @@ -155,7 +155,7 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync) else ret = -EIOCBQUEUED; out: - _leave(" = %d", ret); + _leave(" = %zd", ret); return ret; } diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c index 636cc5a98ef5..23c75755ad4e 100644 --- a/fs/netfs/read_collect.c +++ b/fs/netfs/read_collect.c @@ -682,14 +682,16 @@ void netfs_wait_for_pause(struct netfs_io_request *rreq) trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue); prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); - subreq = list_first_entry_or_null(&stream->subrequests, - struct netfs_io_subrequest, rreq_link); - if (subreq && - (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) || - test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) { - __set_current_state(TASK_RUNNING); - netfs_read_collection(rreq); - continue; + if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) { + subreq = list_first_entry_or_null(&stream->subrequests, + struct netfs_io_subrequest, rreq_link); + if (subreq && + (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) || + test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) { + __set_current_state(TASK_RUNNING); + netfs_read_collection(rreq); + continue; + } } if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) || diff --git a/fs/netfs/rolling_buffer.c b/fs/netfs/rolling_buffer.c index 75d97af14b4a..207b6a326651 100644 --- a/fs/netfs/rolling_buffer.c +++ b/fs/netfs/rolling_buffer.c @@ -146,10 +146,6 @@ ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll, /* Store the counter after setting the slot. */ smp_store_release(&roll->next_head_slot, to); - - for (; ix < folioq_nr_slots(fq); ix++) - folioq_clear(fq, ix); - return size; } diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c index 294f67795f79..3fca59e6475d 100644 --- a/fs/netfs/write_collect.c +++ b/fs/netfs/write_collect.c @@ -400,7 +400,8 @@ void netfs_write_collection_worker(struct work_struct *work) trace_netfs_rreq(wreq, netfs_rreq_trace_write_done); if (wreq->io_streams[1].active && - wreq->io_streams[1].failed) { + wreq->io_streams[1].failed && + ictx->ops->invalidate_cache) { /* Cache write failure doesn't prevent writeback completion * unless we're in disconnected mode. */ |