@@ -443,7 +443,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
443443 netfs_rreq_completed (rreq , was_async );
444444}
445445
446- void netfs_rreq_work (struct work_struct * work )
446+ static void netfs_rreq_work (struct work_struct * work )
447447{
448448 struct netfs_io_request * rreq =
449449 container_of (work , struct netfs_io_request , work );
@@ -688,6 +688,69 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
688688 return false;
689689}
690690
691+ /*
692+ * Begin the process of reading in a chunk of data, where that data may be
693+ * stitched together from multiple sources, including multiple servers and the
694+ * local cache.
695+ */
696+ int netfs_begin_read (struct netfs_io_request * rreq , bool sync )
697+ {
698+ unsigned int debug_index = 0 ;
699+ int ret ;
700+
701+ _enter ("R=%x %llx-%llx" ,
702+ rreq -> debug_id , rreq -> start , rreq -> start + rreq -> len - 1 );
703+
704+ if (rreq -> len == 0 ) {
705+ pr_err ("Zero-sized read [R=%x]\n" , rreq -> debug_id );
706+ netfs_put_request (rreq , false, netfs_rreq_trace_put_zero_len );
707+ return - EIO ;
708+ }
709+
710+ rreq -> work .func = netfs_rreq_work ;
711+
712+ if (sync )
713+ netfs_get_request (rreq , netfs_rreq_trace_get_hold );
714+
715+ /* Chop the read into slices according to what the cache and the netfs
716+ * want and submit each one.
717+ */
718+ atomic_set (& rreq -> nr_outstanding , 1 );
719+ do {
720+ if (!netfs_rreq_submit_slice (rreq , & debug_index ))
721+ break ;
722+
723+ } while (rreq -> submitted < rreq -> len );
724+
725+ if (sync ) {
726+ /* Keep nr_outstanding incremented so that the ref always belongs to
727+ * us, and the service code isn't punted off to a random thread pool to
728+ * process.
729+ */
730+ for (;;) {
731+ wait_var_event (& rreq -> nr_outstanding ,
732+ atomic_read (& rreq -> nr_outstanding ) == 1 );
733+ netfs_rreq_assess (rreq , false);
734+ if (!test_bit (NETFS_RREQ_IN_PROGRESS , & rreq -> flags ))
735+ break ;
736+ cond_resched ();
737+ }
738+
739+ ret = rreq -> error ;
740+ if (ret == 0 && rreq -> submitted < rreq -> len ) {
741+ trace_netfs_failure (rreq , NULL , ret , netfs_fail_short_read );
742+ ret = - EIO ;
743+ }
744+ netfs_put_request (rreq , false, netfs_rreq_trace_put_hold );
745+ } else {
746+ /* If we decrement nr_outstanding to 0, the ref belongs to us. */
747+ if (atomic_dec_and_test (& rreq -> nr_outstanding ))
748+ netfs_rreq_assess (rreq , false);
749+ ret = 0 ;
750+ }
751+ return ret ;
752+ }
753+
691754static void netfs_cache_expand_readahead (struct netfs_io_request * rreq ,
692755 loff_t * _start , size_t * _len , loff_t i_size )
693756{
@@ -750,7 +813,6 @@ void netfs_readahead(struct readahead_control *ractl)
750813{
751814 struct netfs_io_request * rreq ;
752815 struct netfs_i_context * ctx = netfs_i_context (ractl -> mapping -> host );
753- unsigned int debug_index = 0 ;
754816 int ret ;
755817
756818 _enter ("%lx,%x" , readahead_index (ractl ), readahead_count (ractl ));
@@ -777,22 +839,13 @@ void netfs_readahead(struct readahead_control *ractl)
777839
778840 netfs_rreq_expand (rreq , ractl );
779841
780- atomic_set (& rreq -> nr_outstanding , 1 );
781- do {
782- if (!netfs_rreq_submit_slice (rreq , & debug_index ))
783- break ;
784-
785- } while (rreq -> submitted < rreq -> len );
786-
787842 /* Drop the refs on the folios here rather than in the cache or
788843 * filesystem. The locks will be dropped in netfs_rreq_unlock().
789844 */
790845 while (readahead_folio (ractl ))
791846 ;
792847
793- /* If we decrement nr_outstanding to 0, the ref belongs to us. */
794- if (atomic_dec_and_test (& rreq -> nr_outstanding ))
795- netfs_rreq_assess (rreq , false);
848+ netfs_begin_read (rreq , false);
796849 return ;
797850
798851cleanup_free :
@@ -821,7 +874,6 @@ int netfs_readpage(struct file *file, struct page *subpage)
821874 struct address_space * mapping = folio -> mapping ;
822875 struct netfs_io_request * rreq ;
823876 struct netfs_i_context * ctx = netfs_i_context (mapping -> host );
824- unsigned int debug_index = 0 ;
825877 int ret ;
826878
827879 _enter ("%lx" , folio_index (folio ));
@@ -836,42 +888,16 @@ int netfs_readpage(struct file *file, struct page *subpage)
836888
837889 if (ctx -> ops -> begin_cache_operation ) {
838890 ret = ctx -> ops -> begin_cache_operation (rreq );
839- if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS ) {
840- folio_unlock (folio );
841- goto out ;
842- }
891+ if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS )
892+ goto discard ;
843893 }
844894
845895 netfs_stat (& netfs_n_rh_readpage );
846896 trace_netfs_read (rreq , rreq -> start , rreq -> len , netfs_read_trace_readpage );
897+ return netfs_begin_read (rreq , true);
847898
848- netfs_get_request (rreq , netfs_rreq_trace_get_hold );
849-
850- atomic_set (& rreq -> nr_outstanding , 1 );
851- do {
852- if (!netfs_rreq_submit_slice (rreq , & debug_index ))
853- break ;
854-
855- } while (rreq -> submitted < rreq -> len );
856-
857- /* Keep nr_outstanding incremented so that the ref always belongs to us, and
858- * the service code isn't punted off to a random thread pool to
859- * process.
860- */
861- do {
862- wait_var_event (& rreq -> nr_outstanding ,
863- atomic_read (& rreq -> nr_outstanding ) == 1 );
864- netfs_rreq_assess (rreq , false);
865- } while (test_bit (NETFS_RREQ_IN_PROGRESS , & rreq -> flags ));
866-
867- ret = rreq -> error ;
868- if (ret == 0 && rreq -> submitted < rreq -> len ) {
869- trace_netfs_failure (rreq , NULL , ret , netfs_fail_short_readpage );
870- ret = - EIO ;
871- }
872- out :
873- netfs_put_request (rreq , false, netfs_rreq_trace_put_hold );
874- return ret ;
899+ discard :
900+ netfs_put_request (rreq , false, netfs_rreq_trace_put_discard );
875901alloc_error :
876902 folio_unlock (folio );
877903 return ret ;
@@ -966,7 +992,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
966992 struct netfs_io_request * rreq ;
967993 struct netfs_i_context * ctx = netfs_i_context (file_inode (file ));
968994 struct folio * folio ;
969- unsigned int debug_index = 0 , fgp_flags ;
995+ unsigned int fgp_flags ;
970996 pgoff_t index = pos >> PAGE_SHIFT ;
971997 int ret ;
972998
@@ -1029,39 +1055,13 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
10291055 */
10301056 ractl ._nr_pages = folio_nr_pages (folio );
10311057 netfs_rreq_expand (rreq , & ractl );
1032- netfs_get_request (rreq , netfs_rreq_trace_get_hold );
10331058
10341059 /* We hold the folio locks, so we can drop the references */
10351060 folio_get (folio );
10361061 while (readahead_folio (& ractl ))
10371062 ;
10381063
1039- atomic_set (& rreq -> nr_outstanding , 1 );
1040- do {
1041- if (!netfs_rreq_submit_slice (rreq , & debug_index ))
1042- break ;
1043-
1044- } while (rreq -> submitted < rreq -> len );
1045-
1046- /* Keep nr_outstanding incremented so that the ref always belongs to
1047- * us, and the service code isn't punted off to a random thread pool to
1048- * process.
1049- */
1050- for (;;) {
1051- wait_var_event (& rreq -> nr_outstanding ,
1052- atomic_read (& rreq -> nr_outstanding ) == 1 );
1053- netfs_rreq_assess (rreq , false);
1054- if (!test_bit (NETFS_RREQ_IN_PROGRESS , & rreq -> flags ))
1055- break ;
1056- cond_resched ();
1057- }
1058-
1059- ret = rreq -> error ;
1060- if (ret == 0 && rreq -> submitted < rreq -> len ) {
1061- trace_netfs_failure (rreq , NULL , ret , netfs_fail_short_write_begin );
1062- ret = - EIO ;
1063- }
1064- netfs_put_request (rreq , false, netfs_rreq_trace_put_hold );
1064+ ret = netfs_begin_read (rreq , true);
10651065 if (ret < 0 )
10661066 goto error ;
10671067
0 commit comments