@@ -88,6 +88,8 @@ static void netfs_free_read_request(struct work_struct *work)
8888 if (rreq -> netfs_priv )
8989 rreq -> netfs_ops -> cleanup (rreq -> mapping , rreq -> netfs_priv );
9090 trace_netfs_rreq (rreq , netfs_rreq_trace_free );
91+ if (rreq -> cache_resources .ops )
92+ rreq -> cache_resources .ops -> end_operation (& rreq -> cache_resources );
9193 kfree (rreq );
9294 netfs_stat_d (& netfs_n_rh_rreq );
9395}
@@ -154,6 +156,34 @@ static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
154156 iov_iter_zero (iov_iter_count (& iter ), & iter );
155157}
156158
159+ static void netfs_cache_read_terminated (void * priv , ssize_t transferred_or_error ,
160+ bool was_async )
161+ {
162+ struct netfs_read_subrequest * subreq = priv ;
163+
164+ netfs_subreq_terminated (subreq , transferred_or_error , was_async );
165+ }
166+
167+ /*
168+ * Issue a read against the cache.
169+ * - Eats the caller's ref on subreq.
170+ */
171+ static void netfs_read_from_cache (struct netfs_read_request * rreq ,
172+ struct netfs_read_subrequest * subreq ,
173+ bool seek_data )
174+ {
175+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
176+ struct iov_iter iter ;
177+
178+ netfs_stat (& netfs_n_rh_read );
179+ iov_iter_xarray (& iter , READ , & rreq -> mapping -> i_pages ,
180+ subreq -> start + subreq -> transferred ,
181+ subreq -> len - subreq -> transferred );
182+
183+ cres -> ops -> read (cres , subreq -> start , & iter , seek_data ,
184+ netfs_cache_read_terminated , subreq );
185+ }
186+
157187/*
158188 * Fill a subrequest region with zeroes.
159189 */
@@ -198,6 +228,141 @@ static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async
198228 netfs_put_read_request (rreq , was_async );
199229}
200230
231+ /*
232+ * Deal with the completion of writing the data to the cache. We have to clear
233+ * the PG_fscache bits on the pages involved and release the caller's ref.
234+ *
235+ * May be called in softirq mode and we inherit a ref from the caller.
236+ */
237+ static void netfs_rreq_unmark_after_write (struct netfs_read_request * rreq ,
238+ bool was_async )
239+ {
240+ struct netfs_read_subrequest * subreq ;
241+ struct page * page ;
242+ pgoff_t unlocked = 0 ;
243+ bool have_unlocked = false;
244+
245+ rcu_read_lock ();
246+
247+ list_for_each_entry (subreq , & rreq -> subrequests , rreq_link ) {
248+ XA_STATE (xas , & rreq -> mapping -> i_pages , subreq -> start / PAGE_SIZE );
249+
250+ xas_for_each (& xas , page , (subreq -> start + subreq -> len - 1 ) / PAGE_SIZE ) {
251+ /* We might have multiple writes from the same huge
252+ * page, but we mustn't unlock a page more than once.
253+ */
254+ if (have_unlocked && page -> index <= unlocked )
255+ continue ;
256+ unlocked = page -> index ;
257+ end_page_fscache (page );
258+ have_unlocked = true;
259+ }
260+ }
261+
262+ rcu_read_unlock ();
263+ netfs_rreq_completed (rreq , was_async );
264+ }
265+
266+ static void netfs_rreq_copy_terminated (void * priv , ssize_t transferred_or_error ,
267+ bool was_async )
268+ {
269+ struct netfs_read_subrequest * subreq = priv ;
270+ struct netfs_read_request * rreq = subreq -> rreq ;
271+
272+ if (IS_ERR_VALUE (transferred_or_error )) {
273+ netfs_stat (& netfs_n_rh_write_failed );
274+ } else {
275+ netfs_stat (& netfs_n_rh_write_done );
276+ }
277+
278+ trace_netfs_sreq (subreq , netfs_sreq_trace_write_term );
279+
280+ /* If we decrement nr_wr_ops to 0, the ref belongs to us. */
281+ if (atomic_dec_and_test (& rreq -> nr_wr_ops ))
282+ netfs_rreq_unmark_after_write (rreq , was_async );
283+
284+ netfs_put_subrequest (subreq , was_async );
285+ }
286+
287+ /*
288+ * Perform any outstanding writes to the cache. We inherit a ref from the
289+ * caller.
290+ */
291+ static void netfs_rreq_do_write_to_cache (struct netfs_read_request * rreq )
292+ {
293+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
294+ struct netfs_read_subrequest * subreq , * next , * p ;
295+ struct iov_iter iter ;
296+ int ret ;
297+
298+ trace_netfs_rreq (rreq , netfs_rreq_trace_write );
299+
300+ /* We don't want terminating writes trying to wake us up whilst we're
301+ * still going through the list.
302+ */
303+ atomic_inc (& rreq -> nr_wr_ops );
304+
305+ list_for_each_entry_safe (subreq , p , & rreq -> subrequests , rreq_link ) {
306+ if (!test_bit (NETFS_SREQ_WRITE_TO_CACHE , & subreq -> flags )) {
307+ list_del_init (& subreq -> rreq_link );
308+ netfs_put_subrequest (subreq , false);
309+ }
310+ }
311+
312+ list_for_each_entry (subreq , & rreq -> subrequests , rreq_link ) {
313+ /* Amalgamate adjacent writes */
314+ while (!list_is_last (& subreq -> rreq_link , & rreq -> subrequests )) {
315+ next = list_next_entry (subreq , rreq_link );
316+ if (next -> start != subreq -> start + subreq -> len )
317+ break ;
318+ subreq -> len += next -> len ;
319+ list_del_init (& next -> rreq_link );
320+ netfs_put_subrequest (next , false);
321+ }
322+
323+ ret = cres -> ops -> prepare_write (cres , & subreq -> start , & subreq -> len ,
324+ rreq -> i_size );
325+ if (ret < 0 ) {
326+ trace_netfs_sreq (subreq , netfs_sreq_trace_write_skip );
327+ continue ;
328+ }
329+
330+ iov_iter_xarray (& iter , WRITE , & rreq -> mapping -> i_pages ,
331+ subreq -> start , subreq -> len );
332+
333+ atomic_inc (& rreq -> nr_wr_ops );
334+ netfs_stat (& netfs_n_rh_write );
335+ netfs_get_read_subrequest (subreq );
336+ trace_netfs_sreq (subreq , netfs_sreq_trace_write );
337+ cres -> ops -> write (cres , subreq -> start , & iter ,
338+ netfs_rreq_copy_terminated , subreq );
339+ }
340+
341+ /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
342+ if (atomic_dec_and_test (& rreq -> nr_wr_ops ))
343+ netfs_rreq_unmark_after_write (rreq , false);
344+ }
345+
346+ static void netfs_rreq_write_to_cache_work (struct work_struct * work )
347+ {
348+ struct netfs_read_request * rreq =
349+ container_of (work , struct netfs_read_request , work );
350+
351+ netfs_rreq_do_write_to_cache (rreq );
352+ }
353+
354+ static void netfs_rreq_write_to_cache (struct netfs_read_request * rreq ,
355+ bool was_async )
356+ {
357+ if (was_async ) {
358+ rreq -> work .func = netfs_rreq_write_to_cache_work ;
359+ if (!queue_work (system_unbound_wq , & rreq -> work ))
360+ BUG ();
361+ } else {
362+ netfs_rreq_do_write_to_cache (rreq );
363+ }
364+ }
365+
201366/*
202367 * Unlock the pages in a read operation. We need to set PG_fscache on any
203368 * pages we're going to write back before we unlock them.
@@ -299,7 +464,10 @@ static void netfs_rreq_short_read(struct netfs_read_request *rreq,
299464
300465 netfs_get_read_subrequest (subreq );
301466 atomic_inc (& rreq -> nr_rd_ops );
302- netfs_read_from_server (rreq , subreq );
467+ if (subreq -> source == NETFS_READ_FROM_CACHE )
468+ netfs_read_from_cache (rreq , subreq , true);
469+ else
470+ netfs_read_from_server (rreq , subreq );
303471}
304472
305473/*
@@ -344,6 +512,25 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
344512 return false;
345513}
346514
515+ /*
516+ * Check to see if the data read is still valid.
517+ */
518+ static void netfs_rreq_is_still_valid (struct netfs_read_request * rreq )
519+ {
520+ struct netfs_read_subrequest * subreq ;
521+
522+ if (!rreq -> netfs_ops -> is_still_valid ||
523+ rreq -> netfs_ops -> is_still_valid (rreq ))
524+ return ;
525+
526+ list_for_each_entry (subreq , & rreq -> subrequests , rreq_link ) {
527+ if (subreq -> source == NETFS_READ_FROM_CACHE ) {
528+ subreq -> error = - ESTALE ;
529+ __set_bit (NETFS_RREQ_INCOMPLETE_IO , & rreq -> flags );
530+ }
531+ }
532+ }
533+
347534/*
348535 * Assess the state of a read request and decide what to do next.
349536 *
@@ -355,6 +542,8 @@ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
355542 trace_netfs_rreq (rreq , netfs_rreq_trace_assess );
356543
357544again :
545+ netfs_rreq_is_still_valid (rreq );
546+
358547 if (!test_bit (NETFS_RREQ_FAILED , & rreq -> flags ) &&
359548 test_bit (NETFS_RREQ_INCOMPLETE_IO , & rreq -> flags )) {
360549 if (netfs_rreq_perform_resubmissions (rreq ))
@@ -367,6 +556,9 @@ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
367556 clear_bit_unlock (NETFS_RREQ_IN_PROGRESS , & rreq -> flags );
368557 wake_up_bit (& rreq -> flags , NETFS_RREQ_IN_PROGRESS );
369558
559+ if (test_bit (NETFS_RREQ_WRITE_TO_CACHE , & rreq -> flags ))
560+ return netfs_rreq_write_to_cache (rreq , was_async );
561+
370562 netfs_rreq_completed (rreq , was_async );
371563}
372564
@@ -504,7 +696,10 @@ static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequ
504696 loff_t i_size )
505697{
506698 struct netfs_read_request * rreq = subreq -> rreq ;
699+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
507700
701+ if (cres -> ops )
702+ return cres -> ops -> prepare_read (subreq , i_size );
508703 if (subreq -> start >= rreq -> i_size )
509704 return NETFS_FILL_WITH_ZEROES ;
510705 return NETFS_DOWNLOAD_FROM_SERVER ;
@@ -595,6 +790,9 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
595790 case NETFS_DOWNLOAD_FROM_SERVER :
596791 netfs_read_from_server (rreq , subreq );
597792 break ;
793+ case NETFS_READ_FROM_CACHE :
794+ netfs_read_from_cache (rreq , subreq , false);
795+ break ;
598796 default :
599797 BUG ();
600798 }
@@ -607,9 +805,23 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
607805 return false;
608806}
609807
808+ static void netfs_cache_expand_readahead (struct netfs_read_request * rreq ,
809+ loff_t * _start , size_t * _len , loff_t i_size )
810+ {
811+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
812+
813+ if (cres -> ops && cres -> ops -> expand_readahead )
814+ cres -> ops -> expand_readahead (cres , _start , _len , i_size );
815+ }
816+
610817static void netfs_rreq_expand (struct netfs_read_request * rreq ,
611818 struct readahead_control * ractl )
612819{
820+ /* Give the cache a chance to change the request parameters. The
821+ * resultant request must contain the original region.
822+ */
823+ netfs_cache_expand_readahead (rreq , & rreq -> start , & rreq -> len , rreq -> i_size );
824+
613825 /* Give the netfs a chance to change the request parameters. The
614826 * resultant request must contain the original region.
615827 */
@@ -661,6 +873,7 @@ void netfs_readahead(struct readahead_control *ractl,
661873 struct netfs_read_request * rreq ;
662874 struct page * page ;
663875 unsigned int debug_index = 0 ;
876+ int ret ;
664877
665878 _enter ("%lx,%x" , readahead_index (ractl ), readahead_count (ractl ));
666879
@@ -674,6 +887,12 @@ void netfs_readahead(struct readahead_control *ractl,
674887 rreq -> start = readahead_pos (ractl );
675888 rreq -> len = readahead_length (ractl );
676889
890+ if (ops -> begin_cache_operation ) {
891+ ret = ops -> begin_cache_operation (rreq );
892+ if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS )
893+ goto cleanup_free ;
894+ }
895+
677896 netfs_stat (& netfs_n_rh_readahead );
678897 trace_netfs_read (rreq , readahead_pos (ractl ), readahead_length (ractl ),
679898 netfs_read_trace_readahead );
@@ -698,6 +917,9 @@ void netfs_readahead(struct readahead_control *ractl,
698917 netfs_rreq_assess (rreq , false);
699918 return ;
700919
920+ cleanup_free :
921+ netfs_put_read_request (rreq , false);
922+ return ;
701923cleanup :
702924 if (netfs_priv )
703925 ops -> cleanup (ractl -> mapping , netfs_priv );
@@ -744,6 +966,14 @@ int netfs_readpage(struct file *file,
744966 rreq -> start = page_index (page ) * PAGE_SIZE ;
745967 rreq -> len = thp_size (page );
746968
969+ if (ops -> begin_cache_operation ) {
970+ ret = ops -> begin_cache_operation (rreq );
971+ if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS ) {
972+ unlock_page (page );
973+ goto out ;
974+ }
975+ }
976+
747977 netfs_stat (& netfs_n_rh_readpage );
748978 trace_netfs_read (rreq , rreq -> start , rreq -> len , netfs_read_trace_readpage );
749979
@@ -768,6 +998,7 @@ int netfs_readpage(struct file *file,
768998 ret = rreq -> error ;
769999 if (ret == 0 && rreq -> submitted < rreq -> len )
7701000 ret = - EIO ;
1001+ out :
7711002 netfs_put_read_request (rreq , false);
7721003 return ret ;
7731004}
@@ -873,6 +1104,12 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
8731104 __set_bit (NETFS_RREQ_NO_UNLOCK_PAGE , & rreq -> flags );
8741105 netfs_priv = NULL ;
8751106
1107+ if (ops -> begin_cache_operation ) {
1108+ ret = ops -> begin_cache_operation (rreq );
1109+ if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS )
1110+ goto error_put ;
1111+ }
1112+
8761113 netfs_stat (& netfs_n_rh_write_begin );
8771114 trace_netfs_read (rreq , pos , len , netfs_read_trace_write_begin );
8781115
0 commit comments