From f18a378580a761c8559b7d90afaa157269559c05 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 17 Feb 2022 10:14:32 +0000 Subject: netfs: Finish off rename of netfs_read_request to netfs_io_request Adjust helper function names and comments after mass rename of struct netfs_read_*request to struct netfs_io_*request. Changes ======= ver #2) - Make the changes in the docs also. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164622992433.3564931.6684311087845150271.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678196111.1200972.5001114956865989528.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692892567.2099075.13895804222087028813.stgit@warthog.procyon.org.uk/ # v3 --- fs/netfs/read_helper.c | 83 +++++++++++++++++++++++++------------------------- 1 file changed, 42 insertions(+), 41 deletions(-) (limited to 'fs/netfs') diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 50035d93f1dc..26d54055b17e 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -37,7 +37,7 @@ static void netfs_put_subrequest(struct netfs_io_subrequest *subreq, __netfs_put_subrequest(subreq, was_async); } -static struct netfs_io_request *netfs_alloc_read_request( +static struct netfs_io_request *netfs_alloc_request( const struct netfs_request_ops *ops, void *netfs_priv, struct file *file) { @@ -63,13 +63,12 @@ static struct netfs_io_request *netfs_alloc_read_request( return rreq; } -static void netfs_get_read_request(struct netfs_io_request *rreq) +static void netfs_get_request(struct netfs_io_request *rreq) { refcount_inc(&rreq->usage); } -static void netfs_rreq_clear_subreqs(struct netfs_io_request *rreq, - bool was_async) +static void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) { struct netfs_io_subrequest *subreq; @@ -81,11 +80,11 @@ static void netfs_rreq_clear_subreqs(struct netfs_io_request *rreq, } } -static void netfs_free_read_request(struct work_struct *work) +static void netfs_free_request(struct work_struct *work) { struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); - netfs_rreq_clear_subreqs(rreq, false); + netfs_clear_subrequests(rreq, false); if (rreq->netfs_priv) rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); trace_netfs_rreq(rreq, netfs_rreq_trace_free); @@ -95,15 +94,15 @@ static void netfs_free_read_request(struct work_struct *work) netfs_stat_d(&netfs_n_rh_rreq); } -static void netfs_put_read_request(struct netfs_io_request *rreq, bool was_async) +static void netfs_put_request(struct netfs_io_request *rreq, bool was_async) { if (refcount_dec_and_test(&rreq->usage)) { if (was_async) { - rreq->work.func = netfs_free_read_request; + rreq->work.func = netfs_free_request; if (!queue_work(system_unbound_wq, &rreq->work)) BUG(); } else { - netfs_free_read_request(&rreq->work); + netfs_free_request(&rreq->work); } } } @@ -121,14 +120,14 @@ static struct netfs_io_subrequest *netfs_alloc_subrequest( INIT_LIST_HEAD(&subreq->rreq_link); refcount_set(&subreq->usage, 2); subreq->rreq = rreq; - netfs_get_read_request(rreq); + netfs_get_request(rreq); netfs_stat(&netfs_n_rh_sreq); } return subreq; } -static void netfs_get_read_subrequest(struct netfs_io_subrequest *subreq) +static void netfs_get_subrequest(struct netfs_io_subrequest *subreq) { refcount_inc(&subreq->usage); } @@ -141,7 +140,7 @@ static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq, trace_netfs_sreq(subreq, netfs_sreq_trace_free); kfree(subreq); netfs_stat_d(&netfs_n_rh_sreq); - netfs_put_read_request(rreq, was_async); + netfs_put_request(rreq, was_async); } /* @@ -216,7 +215,7 @@ static void netfs_read_from_server(struct netfs_io_request *rreq, struct netfs_io_subrequest *subreq) { netfs_stat(&netfs_n_rh_download); - rreq->netfs_ops->issue_op(subreq); + rreq->netfs_ops->issue_read(subreq); } /* @@ -225,8 +224,8 @@ static void netfs_read_from_server(struct netfs_io_request *rreq, static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) { trace_netfs_rreq(rreq, netfs_rreq_trace_done); - netfs_rreq_clear_subreqs(rreq, was_async); - netfs_put_read_request(rreq, was_async); + netfs_clear_subrequests(rreq, was_async); + netfs_put_request(rreq, was_async); } /* @@ -306,7 +305,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) atomic_inc(&rreq->nr_copy_ops); list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) { - if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) { + if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { list_del_init(&subreq->rreq_link); netfs_put_subrequest(subreq, false); } @@ -336,7 +335,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) atomic_inc(&rreq->nr_copy_ops); netfs_stat(&netfs_n_rh_write); - netfs_get_read_subrequest(subreq); + netfs_get_subrequest(subreq); trace_netfs_sreq(subreq, netfs_sreq_trace_write); cres->ops->write(cres, subreq->start, &iter, netfs_rreq_copy_terminated, subreq); @@ -378,9 +377,9 @@ static void netfs_rreq_unlock(struct netfs_io_request *rreq) XA_STATE(xas, &rreq->mapping->i_pages, start_page); if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) { - __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); + __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { - __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags); + __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); } } @@ -408,7 +407,7 @@ static void netfs_rreq_unlock(struct netfs_io_request *rreq) pg_failed = true; break; } - if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) + if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) folio_start_fscache(folio); pg_failed |= subreq_failed; if (pgend < iopos + subreq->len) @@ -453,13 +452,13 @@ static void netfs_rreq_unlock(struct netfs_io_request *rreq) static void netfs_rreq_short_read(struct netfs_io_request *rreq, struct netfs_io_subrequest *subreq) { - __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags); + __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags); netfs_stat(&netfs_n_rh_short_read); trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short); - netfs_get_read_subrequest(subreq); + netfs_get_subrequest(subreq); atomic_inc(&rreq->nr_outstanding); if (subreq->source == NETFS_READ_FROM_CACHE) netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR); @@ -493,10 +492,10 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq) subreq->error = 0; netfs_stat(&netfs_n_rh_download_instead); trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); - netfs_get_read_subrequest(subreq); + netfs_get_subrequest(subreq); atomic_inc(&rreq->nr_outstanding); netfs_read_from_server(rreq, subreq); - } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) { + } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) { netfs_rreq_short_read(rreq, subreq); } } @@ -553,7 +552,7 @@ again: clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); - if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags)) + if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) return netfs_rreq_write_to_cache(rreq); netfs_rreq_completed(rreq, was_async); @@ -642,8 +641,8 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, complete: __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); - if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) - set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); + if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) + set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); out: trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); @@ -674,7 +673,7 @@ incomplete: __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); } - __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags); + __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); goto out; @@ -878,7 +877,7 @@ void netfs_readahead(struct readahead_control *ractl, if (readahead_count(ractl) == 0) goto cleanup; - rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file); + rreq = netfs_alloc_request(ops, netfs_priv, ractl->file); if (!rreq) goto cleanup; rreq->mapping = ractl->mapping; @@ -916,7 +915,7 @@ void netfs_readahead(struct readahead_control *ractl, return; cleanup_free: - netfs_put_read_request(rreq, false); + netfs_put_request(rreq, false); return; cleanup: if (netfs_priv) @@ -953,7 +952,7 @@ int netfs_readpage(struct file *file, _enter("%lx", folio_index(folio)); - rreq = netfs_alloc_read_request(ops, netfs_priv, file); + rreq = netfs_alloc_request(ops, netfs_priv, file); if (!rreq) { if (netfs_priv) ops->cleanup(folio_file_mapping(folio), netfs_priv); @@ -975,7 +974,7 @@ int netfs_readpage(struct file *file, netfs_stat(&netfs_n_rh_readpage); trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); - netfs_get_read_request(rreq); + netfs_get_request(rreq); atomic_set(&rreq->nr_outstanding, 1); do { @@ -989,7 +988,8 @@ int netfs_readpage(struct file *file, * process. */ do { - wait_var_event(&rreq->nr_outstanding, atomic_read(&rreq->nr_outstanding) == 1); + wait_var_event(&rreq->nr_outstanding, + atomic_read(&rreq->nr_outstanding) == 1); netfs_rreq_assess(rreq, false); } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)); @@ -999,7 +999,7 @@ int netfs_readpage(struct file *file, ret = -EIO; } out: - netfs_put_read_request(rreq, false); + netfs_put_request(rreq, false); return ret; } EXPORT_SYMBOL(netfs_readpage); @@ -1122,7 +1122,7 @@ retry: } ret = -ENOMEM; - rreq = netfs_alloc_read_request(ops, netfs_priv, file); + rreq = netfs_alloc_request(ops, netfs_priv, file); if (!rreq) goto error; rreq->mapping = folio_file_mapping(folio); @@ -1146,7 +1146,7 @@ retry: */ ractl._nr_pages = folio_nr_pages(folio); netfs_rreq_expand(rreq, &ractl); - netfs_get_read_request(rreq); + netfs_get_request(rreq); /* We hold the folio locks, so we can drop the references */ folio_get(folio); @@ -1160,12 +1160,13 @@ retry: } while (rreq->submitted < rreq->len); - /* Keep nr_outstanding incremented so that the ref always belongs to us, and - * the service code isn't punted off to a random thread pool to + /* Keep nr_outstanding incremented so that the ref always belongs to + * us, and the service code isn't punted off to a random thread pool to * process. */ for (;;) { - wait_var_event(&rreq->nr_outstanding, atomic_read(&rreq->nr_outstanding) == 1); + wait_var_event(&rreq->nr_outstanding, + atomic_read(&rreq->nr_outstanding) == 1); netfs_rreq_assess(rreq, false); if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) break; @@ -1177,7 +1178,7 @@ retry: trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin); ret = -EIO; } - netfs_put_read_request(rreq, false); + netfs_put_request(rreq, false); if (ret < 0) goto error; @@ -1193,7 +1194,7 @@ have_folio_no_wait: return 0; error_put: - netfs_put_read_request(rreq, false); + netfs_put_request(rreq, false); error: folio_unlock(folio); folio_put(folio); -- cgit v1.2.3