@@ -772,3 +772,167 @@ int netfs_readpage(struct file *file,
772772 return ret ;
773773}
774774EXPORT_SYMBOL (netfs_readpage );
775+
776+ static void netfs_clear_thp (struct page * page )
777+ {
778+ unsigned int i ;
779+
780+ for (i = 0 ; i < thp_nr_pages (page ); i ++ )
781+ clear_highpage (page + i );
782+ }
783+
784+ /**
785+ * netfs_write_begin - Helper to prepare for writing
786+ * @file: The file to read from
787+ * @mapping: The mapping to read from
788+ * @pos: File position at which the write will begin
789+ * @len: The length of the write in this page
790+ * @flags: AOP_* flags
791+ * @_page: Where to put the resultant page
792+ * @_fsdata: Place for the netfs to store a cookie
793+ * @ops: The network filesystem's operations for the helper to use
794+ * @netfs_priv: Private netfs data to be retained in the request
795+ *
796+ * Pre-read data for a write-begin request by drawing data from the cache if
797+ * possible, or the netfs if not. Space beyond the EOF is zero-filled.
798+ * Multiple I/O requests from different sources will get munged together. If
799+ * necessary, the readahead window can be expanded in either direction to a
800+ * more convenient alighment for RPC efficiency or to make storage in the cache
801+ * feasible.
802+ *
803+ * The calling netfs must provide a table of operations, only one of which,
804+ * issue_op, is mandatory.
805+ *
806+ * The check_write_begin() operation can be provided to check for and flush
807+ * conflicting writes once the page is grabbed and locked. It is passed a
808+ * pointer to the fsdata cookie that gets returned to the VM to be passed to
809+ * write_end. It is permitted to sleep. It should return 0 if the request
810+ * should go ahead; unlock the page and return -EAGAIN to cause the page to be
811+ * regot; or return an error.
812+ *
813+ * This is usable whether or not caching is enabled.
814+ */
815+ int netfs_write_begin (struct file * file , struct address_space * mapping ,
816+ loff_t pos , unsigned int len , unsigned int flags ,
817+ struct page * * _page , void * * _fsdata ,
818+ const struct netfs_read_request_ops * ops ,
819+ void * netfs_priv )
820+ {
821+ struct netfs_read_request * rreq ;
822+ struct page * page , * xpage ;
823+ struct inode * inode = file_inode (file );
824+ unsigned int debug_index = 0 ;
825+ pgoff_t index = pos >> PAGE_SHIFT ;
826+ int pos_in_page = pos & ~PAGE_MASK ;
827+ loff_t size ;
828+ int ret ;
829+
830+ DEFINE_READAHEAD (ractl , file , NULL , mapping , index );
831+
832+ retry :
833+ page = grab_cache_page_write_begin (mapping , index , 0 );
834+ if (!page )
835+ return - ENOMEM ;
836+
837+ if (ops -> check_write_begin ) {
838+ /* Allow the netfs (eg. ceph) to flush conflicts. */
839+ ret = ops -> check_write_begin (file , pos , len , page , _fsdata );
840+ if (ret < 0 ) {
841+ if (ret == - EAGAIN )
842+ goto retry ;
843+ goto error ;
844+ }
845+ }
846+
847+ if (PageUptodate (page ))
848+ goto have_page ;
849+
850+ /* If the page is beyond the EOF, we want to clear it - unless it's
851+ * within the cache granule containing the EOF, in which case we need
852+ * to preload the granule.
853+ */
854+ size = i_size_read (inode );
855+ if (!ops -> is_cache_enabled (inode ) &&
856+ ((pos_in_page == 0 && len == thp_size (page )) ||
857+ (pos >= size ) ||
858+ (pos_in_page == 0 && (pos + len ) >= size ))) {
859+ netfs_clear_thp (page );
860+ SetPageUptodate (page );
861+ netfs_stat (& netfs_n_rh_write_zskip );
862+ goto have_page_no_wait ;
863+ }
864+
865+ ret = - ENOMEM ;
866+ rreq = netfs_alloc_read_request (ops , netfs_priv , file );
867+ if (!rreq )
868+ goto error ;
869+ rreq -> mapping = page -> mapping ;
870+ rreq -> start = page -> index * PAGE_SIZE ;
871+ rreq -> len = thp_size (page );
872+ rreq -> no_unlock_page = page -> index ;
873+ __set_bit (NETFS_RREQ_NO_UNLOCK_PAGE , & rreq -> flags );
874+ netfs_priv = NULL ;
875+
876+ netfs_stat (& netfs_n_rh_write_begin );
877+ trace_netfs_read (rreq , pos , len , netfs_read_trace_write_begin );
878+
879+ /* Expand the request to meet caching requirements and download
880+ * preferences.
881+ */
882+ ractl ._nr_pages = thp_nr_pages (page );
883+ netfs_rreq_expand (rreq , & ractl );
884+ netfs_get_read_request (rreq );
885+
886+ /* We hold the page locks, so we can drop the references */
887+ while ((xpage = readahead_page (& ractl )))
888+ if (xpage != page )
889+ put_page (xpage );
890+
891+ atomic_set (& rreq -> nr_rd_ops , 1 );
892+ do {
893+ if (!netfs_rreq_submit_slice (rreq , & debug_index ))
894+ break ;
895+
896+ } while (rreq -> submitted < rreq -> len );
897+
898+ /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
899+ * the service code isn't punted off to a random thread pool to
900+ * process.
901+ */
902+ for (;;) {
903+ wait_var_event (& rreq -> nr_rd_ops , atomic_read (& rreq -> nr_rd_ops ) == 1 );
904+ netfs_rreq_assess (rreq , false);
905+ if (!test_bit (NETFS_RREQ_IN_PROGRESS , & rreq -> flags ))
906+ break ;
907+ cond_resched ();
908+ }
909+
910+ ret = rreq -> error ;
911+ if (ret == 0 && rreq -> submitted < rreq -> len )
912+ ret = - EIO ;
913+ netfs_put_read_request (rreq , false);
914+ if (ret < 0 )
915+ goto error ;
916+
917+ have_page :
918+ ret = wait_on_page_fscache_killable (page );
919+ if (ret < 0 )
920+ goto error ;
921+ have_page_no_wait :
922+ if (netfs_priv )
923+ ops -> cleanup (netfs_priv , mapping );
924+ * _page = page ;
925+ _leave (" = 0" );
926+ return 0 ;
927+
928+ error_put :
929+ netfs_put_read_request (rreq , false);
930+ error :
931+ unlock_page (page );
932+ put_page (page );
933+ if (netfs_priv )
934+ ops -> cleanup (netfs_priv , mapping );
935+ _leave (" = %d" , ret );
936+ return ret ;
937+ }
938+ EXPORT_SYMBOL (netfs_write_begin );
0 commit comments