@@ -291,9 +291,15 @@ static void bch2_page_state_release(struct page *page)
291
291
__bch2_page_state_release (page );
292
292
}
293
293
294
- /* for newly allocated pages: */
295
- static struct bch_page_state * __bch2_page_state_create (struct page * page ,
296
- gfp_t gfp )
294
+ /**
295
+ * __bch2_page_state_alloc - allocate the bcachefs page state
296
+ * @page: page to attach the bcachefs page state to
297
+ * @gfp: additional memory allocation flags
298
+ *
299
+ * Allocate the bcachefs page private data for the given page.
300
+ */
301
+ static struct bch_page_state * __bch2_page_state_alloc (struct page * page ,
302
+ gfp_t gfp )
297
303
{
298
304
struct bch_page_state * s ;
299
305
@@ -306,10 +312,79 @@ static struct bch_page_state *__bch2_page_state_create(struct page *page,
306
312
return s ;
307
313
}
308
314
309
- static struct bch_page_state * bch2_page_state_create (struct page * page ,
315
+ /**
316
+ * __bch2_page_state_create - internal create function for bcachefs page state
317
+ * @c: bcachefs filesystem of the backing sectors
318
+ * @page: page to attach the bcachefs page state to
319
+ * @gfp: additional memory allocation flags
320
+ *
321
+ * Allocate the bcachefs ptge private data for the given page and set the
322
+ * page state to that of the corresponding sectors in the btree.
323
+ */
324
+ static struct bch_page_state * __bch2_page_state_create (struct bch_fs * c ,
325
+ struct page * page ,
326
+ gfp_t gfp )
327
+ {
328
+ struct bch_inode_info * inode = to_bch_ei (page -> mapping -> host );
329
+ struct bch_page_state * s ;
330
+ struct btree_iter * iter ;
331
+ struct btree_trans trans ;
332
+ struct bkey_s_c k ;
333
+ loff_t start = page -> index << PAGE_SECTOR_SHIFT ;
334
+ int i , ret ;
335
+
336
+ s = __bch2_page_state_alloc (page , gfp );
337
+ if (!s )
338
+ return NULL ;
339
+
340
+ bch2_trans_init (& trans , c , BTREE_ITER_MAX , 0 );
341
+
342
+ iter = bch2_trans_get_iter (& trans , BTREE_ID_extents ,
343
+ POS (inode -> v .i_ino , start ),
344
+ BTREE_ITER_SLOTS | BTREE_ITER_INTENT );
345
+
346
+ ret = PTR_ERR_OR_ZERO (iter );
347
+
348
+ if (ret ) {
349
+ kfree (s );
350
+ return NULL ;
351
+ }
352
+
353
+ for (i = 0 ; i < PAGE_SECTORS ; ++ i ) {
354
+ bch2_btree_iter_set_pos (iter , POS (inode -> v .i_ino , start + i ));
355
+ k = bch2_btree_iter_peek_slot (iter );
356
+ ret = bkey_err (k );
357
+
358
+ if (!ret && bkey_extent_is_allocation (k .k )) {
359
+ s -> s [i ].nr_replicas = k .k -> type == KEY_TYPE_reflink_v
360
+ ? 0 : bch2_bkey_nr_ptrs_fully_allocated (k );
361
+ s -> s [i ].state = k .k -> type == KEY_TYPE_reservation
362
+ ? SECTOR_RESERVED
363
+ : SECTOR_ALLOCATED ;
364
+ }
365
+ }
366
+
367
+ bch2_trans_iter_put (& trans , iter );
368
+ bch2_trans_exit (& trans );
369
+
370
+ return s ;
371
+ }
372
+
373
+ /**
374
+ * bch2_page_state_create - create bcachefs page state if not present
375
+ * @c: bcachefs filesystem of the backing sectors
376
+ * @page: page to attach the bcachefs page state to
377
+ * @gfp: additional memory allocation flags
378
+ *
379
+ * Allocate the bcachefs patge private data for the given page if no data
380
+ * currently exists. If the page state already exists, return the current
381
+ * page state.
382
+ */
383
+ static struct bch_page_state * bch2_page_state_create (struct bch_fs * c ,
384
+ struct page * page ,
310
385
gfp_t gfp )
311
386
{
312
- return bch2_page_state (page ) ?: __bch2_page_state_create (page , gfp );
387
+ return bch2_page_state (page ) ?: __bch2_page_state_create (c , page , gfp );
313
388
}
314
389
315
390
static inline unsigned inode_nr_replicas (struct bch_fs * c , struct bch_inode_info * inode )
@@ -332,7 +407,7 @@ static int bch2_get_page_disk_reservation(struct bch_fs *c,
332
407
struct bch_inode_info * inode ,
333
408
struct page * page , bool check_enospc )
334
409
{
335
- struct bch_page_state * s = bch2_page_state_create (page , 0 );
410
+ struct bch_page_state * s = bch2_page_state_create (c , page , 0 );
336
411
unsigned nr_replicas = inode_nr_replicas (c , inode );
337
412
struct disk_reservation disk_res = { 0 };
338
413
unsigned i , disk_res_sectors = 0 ;
@@ -389,7 +464,7 @@ static int bch2_page_reservation_get(struct bch_fs *c,
389
464
struct bch2_page_reservation * res ,
390
465
unsigned offset , unsigned len , bool check_enospc )
391
466
{
392
- struct bch_page_state * s = bch2_page_state_create (page , 0 );
467
+ struct bch_page_state * s = bch2_page_state_create (c , page , 0 );
393
468
unsigned i , disk_sectors = 0 , quota_sectors = 0 ;
394
469
int ret ;
395
470
@@ -470,7 +545,7 @@ static void bch2_set_page_dirty(struct bch_fs *c,
470
545
unsigned offset , unsigned len )
471
546
{
472
547
struct bch_page_state * s = bch2_page_state (page );
473
- unsigned i , dirty_sectors = 0 ;
548
+ unsigned int i , unallocated_sectors = 0 ;
474
549
475
550
WARN_ON ((u64 ) page_offset (page ) + offset + len >
476
551
round_up ((u64 ) i_size_read (& inode -> v ), block_bytes (c )));
@@ -492,16 +567,26 @@ static void bch2_set_page_dirty(struct bch_fs *c,
492
567
s -> s [i ].replicas_reserved += sectors ;
493
568
res -> disk .sectors -= sectors ;
494
569
495
- if (s -> s [i ].state == SECTOR_UNALLOCATED )
496
- dirty_sectors ++ ;
497
-
498
- s -> s [i ].state = max_t (unsigned , s -> s [i ].state , SECTOR_DIRTY );
570
+ switch (s -> s [i ].state ) {
571
+ case SECTOR_UNALLOCATED :
572
+ unallocated_sectors ++ ;
573
+ s -> s [i ].state = SECTOR_DIRTY ;
574
+ break ;
575
+ case SECTOR_RESERVED :
576
+ s -> s [i ].state = SECTOR_ALLOCATED ;
577
+ break ;
578
+ case SECTOR_DIRTY :
579
+ case SECTOR_ALLOCATED :
580
+ continue ;
581
+ default :
582
+ BUG ();
583
+ }
499
584
}
500
585
501
586
spin_unlock (& s -> lock );
502
587
503
- if (dirty_sectors )
504
- i_sectors_acct (c , inode , & res -> quota , dirty_sectors );
588
+ if (unallocated_sectors )
589
+ i_sectors_acct (c , inode , & res -> quota , unallocated_sectors );
505
590
506
591
if (!PageDirty (page ))
507
592
__set_page_dirty_nobuffers (page );
@@ -687,7 +772,7 @@ static int readpages_iter_init(struct readpages_iter *iter,
687
772
688
773
nr_pages = __readahead_batch (ractl , iter -> pages , nr_pages );
689
774
for (i = 0 ; i < nr_pages ; i ++ ) {
690
- __bch2_page_state_create (iter -> pages [i ], __GFP_NOFAIL );
775
+ __bch2_page_state_alloc (iter -> pages [i ], __GFP_NOFAIL );
691
776
put_page (iter -> pages [i ]);
692
777
}
693
778
@@ -767,7 +852,7 @@ static void readpage_bio_extend(struct readpages_iter *iter,
767
852
if (!page )
768
853
break ;
769
854
770
- if (!__bch2_page_state_create (page , 0 )) {
855
+ if (!__bch2_page_state_alloc (page , 0 )) {
771
856
put_page (page );
772
857
break ;
773
858
}
@@ -922,7 +1007,7 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
922
1007
struct btree_trans trans ;
923
1008
struct btree_iter * iter ;
924
1009
925
- bch2_page_state_create (page , __GFP_NOFAIL );
1010
+ bch2_page_state_create (c , page , __GFP_NOFAIL );
926
1011
927
1012
bio_set_op_attrs (& rbio -> bio , REQ_OP_READ , REQ_SYNC );
928
1013
rbio -> bio .bi_iter .bi_sector =
@@ -1156,7 +1241,7 @@ static int __bch2_writepage(struct page *page,
1156
1241
*/
1157
1242
zero_user_segment (page , offset , PAGE_SIZE );
1158
1243
do_io :
1159
- s = bch2_page_state_create (page , __GFP_NOFAIL );
1244
+ s = bch2_page_state_create (c , page , __GFP_NOFAIL );
1160
1245
1161
1246
ret = bch2_get_page_disk_reservation (c , inode , page , true);
1162
1247
if (ret ) {
@@ -2199,7 +2284,7 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
2199
2284
}
2200
2285
}
2201
2286
2202
- s = bch2_page_state_create (page , 0 );
2287
+ s = bch2_page_state_create (c , page , 0 );
2203
2288
if (!s ) {
2204
2289
ret = - ENOMEM ;
2205
2290
goto unlock ;
0 commit comments