summaryrefslogtreecommitdiff
path: root/fs/netfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/netfs')
-rw-r--r--fs/netfs/internal.h2
-rw-r--r--fs/netfs/objects.c1
-rw-r--r--fs/netfs/read_helper.c144
3 files changed, 73 insertions, 74 deletions
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 54c761bcc8e6..54faf0c72297 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -39,7 +39,7 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
*/
extern unsigned int netfs_debug;
-void netfs_rreq_work(struct work_struct *work);
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
/*
* stats.c
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 657b19e60118..e86107b30ba4 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -35,7 +35,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
rreq->i_size = i_size_read(inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
INIT_LIST_HEAD(&rreq->subrequests);
- INIT_WORK(&rreq->work, netfs_rreq_work);
refcount_set(&rreq->ref, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (rreq->netfs_ops->init_request) {
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index c048cd328ce5..8a4cebf5eb89 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -443,7 +443,7 @@ again:
netfs_rreq_completed(rreq, was_async);
}
-void netfs_rreq_work(struct work_struct *work)
+static void netfs_rreq_work(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
@@ -688,6 +688,69 @@ subreq_failed:
return false;
}
+/*
+ * Begin the process of reading in a chunk of data, where that data may be
+ * stitched together from multiple sources, including multiple servers and the
+ * local cache.
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
+{
+ unsigned int debug_index = 0;
+ int ret;
+
+ _enter("R=%x %llx-%llx",
+ rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
+
+ if (rreq->len == 0) {
+ pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
+ return -EIO;
+ }
+
+ INIT_WORK(&rreq->work, netfs_rreq_work);
+
+ if (sync)
+ netfs_get_request(rreq, netfs_rreq_trace_get_hold);
+
+ /* Chop the read into slices according to what the cache and the netfs
+ * want and submit each one.
+ */
+ atomic_set(&rreq->nr_outstanding, 1);
+ do {
+ if (!netfs_rreq_submit_slice(rreq, &debug_index))
+ break;
+
+ } while (rreq->submitted < rreq->len);
+
+ if (sync) {
+ /* Keep nr_outstanding incremented so that the ref always belongs to
+ * us, and the service code isn't punted off to a random thread pool to
+ * process.
+ */
+ for (;;) {
+ wait_var_event(&rreq->nr_outstanding,
+ atomic_read(&rreq->nr_outstanding) == 1);
+ netfs_rreq_assess(rreq, false);
+ if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
+ break;
+ cond_resched();
+ }
+
+ ret = rreq->error;
+ if (ret == 0 && rreq->submitted < rreq->len) {
+ trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+ ret = -EIO;
+ }
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
+ } else {
+ /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+ if (atomic_dec_and_test(&rreq->nr_outstanding))
+ netfs_rreq_assess(rreq, false);
+ ret = 0;
+ }
+ return ret;
+}
+
static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
loff_t *_start, size_t *_len, loff_t i_size)
{
@@ -750,7 +813,6 @@ void netfs_readahead(struct readahead_control *ractl)
{
struct netfs_io_request *rreq;
struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
- unsigned int debug_index = 0;
int ret;
_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
@@ -777,22 +839,13 @@ void netfs_readahead(struct readahead_control *ractl)
netfs_rreq_expand(rreq, ractl);
- atomic_set(&rreq->nr_outstanding, 1);
- do {
- if (!netfs_rreq_submit_slice(rreq, &debug_index))
- break;
-
- } while (rreq->submitted < rreq->len);
-
/* Drop the refs on the folios here rather than in the cache or
* filesystem. The locks will be dropped in netfs_rreq_unlock().
*/
while (readahead_folio(ractl))
;
- /* If we decrement nr_outstanding to 0, the ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_assess(rreq, false);
+ netfs_begin_read(rreq, false);
return;
cleanup_free:
@@ -821,7 +874,6 @@ int netfs_readpage(struct file *file, struct page *subpage)
struct address_space *mapping = folio->mapping;
struct netfs_io_request *rreq;
struct netfs_i_context *ctx = netfs_i_context(mapping->host);
- unsigned int debug_index = 0;
int ret;
_enter("%lx", folio_index(folio));
@@ -836,42 +888,16 @@ int netfs_readpage(struct file *file, struct page *subpage)
if (ctx->ops->begin_cache_operation) {
ret = ctx->ops->begin_cache_operation(rreq);
- if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
- folio_unlock(folio);
- goto out;
- }
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto discard;
}
netfs_stat(&netfs_n_rh_readpage);
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
+ return netfs_begin_read(rreq, true);
- netfs_get_request(rreq, netfs_rreq_trace_get_hold);
-
- atomic_set(&rreq->nr_outstanding, 1);
- do {
- if (!netfs_rreq_submit_slice(rreq, &debug_index))
- break;
-
- } while (rreq->submitted < rreq->len);
-
- /* Keep nr_outstanding incremented so that the ref always belongs to us, and
- * the service code isn't punted off to a random thread pool to
- * process.
- */
- do {
- wait_var_event(&rreq->nr_outstanding,
- atomic_read(&rreq->nr_outstanding) == 1);
- netfs_rreq_assess(rreq, false);
- } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
-
- ret = rreq->error;
- if (ret == 0 && rreq->submitted < rreq->len) {
- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
- ret = -EIO;
- }
-out:
- netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
- return ret;
+discard:
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
alloc_error:
folio_unlock(folio);
return ret;
@@ -966,7 +992,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
struct netfs_io_request *rreq;
struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
struct folio *folio;
- unsigned int debug_index = 0, fgp_flags;
+ unsigned int fgp_flags;
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
@@ -1029,39 +1055,13 @@ retry:
*/
ractl._nr_pages = folio_nr_pages(folio);
netfs_rreq_expand(rreq, &ractl);
- netfs_get_request(rreq, netfs_rreq_trace_get_hold);
/* We hold the folio locks, so we can drop the references */
folio_get(folio);
while (readahead_folio(&ractl))
;
- atomic_set(&rreq->nr_outstanding, 1);
- do {
- if (!netfs_rreq_submit_slice(rreq, &debug_index))
- break;
-
- } while (rreq->submitted < rreq->len);
-
- /* Keep nr_outstanding incremented so that the ref always belongs to
- * us, and the service code isn't punted off to a random thread pool to
- * process.
- */
- for (;;) {
- wait_var_event(&rreq->nr_outstanding,
- atomic_read(&rreq->nr_outstanding) == 1);
- netfs_rreq_assess(rreq, false);
- if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
- break;
- cond_resched();
- }
-
- ret = rreq->error;
- if (ret == 0 && rreq->submitted < rreq->len) {
- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
- ret = -EIO;
- }
- netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
+ ret = netfs_begin_read(rreq, true);
if (ret < 0)
goto error;