@@ -42,15 +42,15 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
4242u16 hfs_bnode_read_u16 (struct hfs_bnode * node , int off )
4343{
4444 __be16 data ;
45- // optimize later...
45+ /* TODO: optimize later... */
4646 hfs_bnode_read (node , & data , off , 2 );
4747 return be16_to_cpu (data );
4848}
4949
5050u8 hfs_bnode_read_u8 (struct hfs_bnode * node , int off )
5151{
5252 u8 data ;
53- // optimize later...
53+ /* TODO: optimize later... */
5454 hfs_bnode_read (node , & data , off , 1 );
5555 return data ;
5656}
@@ -96,7 +96,7 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
9696void hfs_bnode_write_u16 (struct hfs_bnode * node , int off , u16 data )
9797{
9898 __be16 v = cpu_to_be16 (data );
99- // optimize later...
99+ /* TODO: optimize later... */
100100 hfs_bnode_write (node , & v , off , 2 );
101101}
102102
@@ -212,7 +212,8 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
212212 dst_page -- ;
213213 }
214214 src -= len ;
215- memmove (kmap (* dst_page ) + src , kmap (* src_page ) + src , len );
215+ memmove (kmap (* dst_page ) + src ,
216+ kmap (* src_page ) + src , len );
216217 kunmap (* src_page );
217218 set_page_dirty (* dst_page );
218219 kunmap (* dst_page );
@@ -250,14 +251,16 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
250251
251252 if (src == dst ) {
252253 l = min (len , (int )PAGE_CACHE_SIZE - src );
253- memmove (kmap (* dst_page ) + src , kmap (* src_page ) + src , l );
254+ memmove (kmap (* dst_page ) + src ,
255+ kmap (* src_page ) + src , l );
254256 kunmap (* src_page );
255257 set_page_dirty (* dst_page );
256258 kunmap (* dst_page );
257259
258260 while ((len -= l ) != 0 ) {
259261 l = min (len , (int )PAGE_CACHE_SIZE );
260- memmove (kmap (* ++ dst_page ), kmap (* ++ src_page ), l );
262+ memmove (kmap (* ++ dst_page ),
263+ kmap (* ++ src_page ), l );
261264 kunmap (* src_page );
262265 set_page_dirty (* dst_page );
263266 kunmap (* dst_page );
@@ -268,7 +271,8 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
268271 do {
269272 src_ptr = kmap (* src_page ) + src ;
270273 dst_ptr = kmap (* dst_page ) + dst ;
271- if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst ) {
274+ if (PAGE_CACHE_SIZE - src <
275+ PAGE_CACHE_SIZE - dst ) {
272276 l = PAGE_CACHE_SIZE - src ;
273277 src = 0 ;
274278 dst += l ;
@@ -340,7 +344,8 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
340344 return ;
341345 tmp -> next = node -> next ;
342346 cnid = cpu_to_be32 (tmp -> next );
343- hfs_bnode_write (tmp , & cnid , offsetof(struct hfs_bnode_desc , next ), 4 );
347+ hfs_bnode_write (tmp , & cnid ,
348+ offsetof(struct hfs_bnode_desc , next ), 4 );
344349 hfs_bnode_put (tmp );
345350 } else if (node -> type == HFS_NODE_LEAF )
346351 tree -> leaf_head = node -> next ;
@@ -351,15 +356,15 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
351356 return ;
352357 tmp -> prev = node -> prev ;
353358 cnid = cpu_to_be32 (tmp -> prev );
354- hfs_bnode_write (tmp , & cnid , offsetof(struct hfs_bnode_desc , prev ), 4 );
359+ hfs_bnode_write (tmp , & cnid ,
360+ offsetof(struct hfs_bnode_desc , prev ), 4 );
355361 hfs_bnode_put (tmp );
356362 } else if (node -> type == HFS_NODE_LEAF )
357363 tree -> leaf_tail = node -> prev ;
358364
359- // move down?
360- if (!node -> prev && !node -> next ) {
361- printk (KERN_DEBUG "hfs_btree_del_level\n" );
362- }
365+ /* move down? */
366+ if (!node -> prev && !node -> next )
367+ dprint (DBG_BNODE_MOD , "hfs_btree_del_level\n" );
363368 if (!node -> parent ) {
364369 tree -> root = 0 ;
365370 tree -> depth = 0 ;
@@ -379,16 +384,16 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
379384 struct hfs_bnode * node ;
380385
381386 if (cnid >= tree -> node_count ) {
382- printk (KERN_ERR "hfs: request for non-existent node %d in B*Tree\n" , cnid );
387+ printk (KERN_ERR "hfs: request for non-existent node "
388+ "%d in B*Tree\n" ,
389+ cnid );
383390 return NULL ;
384391 }
385392
386393 for (node = tree -> node_hash [hfs_bnode_hash (cnid )];
387- node ; node = node -> next_hash ) {
388- if (node -> this == cnid ) {
394+ node ; node = node -> next_hash )
395+ if (node -> this == cnid )
389396 return node ;
390- }
391- }
392397 return NULL ;
393398}
394399
@@ -402,7 +407,9 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
402407 loff_t off ;
403408
404409 if (cnid >= tree -> node_count ) {
405- printk (KERN_ERR "hfs: request for non-existent node %d in B*Tree\n" , cnid );
410+ printk (KERN_ERR "hfs: request for non-existent node "
411+ "%d in B*Tree\n" ,
412+ cnid );
406413 return NULL ;
407414 }
408415
@@ -429,7 +436,8 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
429436 } else {
430437 spin_unlock (& tree -> hash_lock );
431438 kfree (node );
432- wait_event (node2 -> lock_wq , !test_bit (HFS_BNODE_NEW , & node2 -> flags ));
439+ wait_event (node2 -> lock_wq ,
440+ !test_bit (HFS_BNODE_NEW , & node2 -> flags ));
433441 return node2 ;
434442 }
435443 spin_unlock (& tree -> hash_lock );
@@ -483,7 +491,8 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
483491 if (node ) {
484492 hfs_bnode_get (node );
485493 spin_unlock (& tree -> hash_lock );
486- wait_event (node -> lock_wq , !test_bit (HFS_BNODE_NEW , & node -> flags ));
494+ wait_event (node -> lock_wq ,
495+ !test_bit (HFS_BNODE_NEW , & node -> flags ));
487496 if (test_bit (HFS_BNODE_ERROR , & node -> flags ))
488497 goto node_error ;
489498 return node ;
@@ -497,7 +506,8 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
497506 if (!test_bit (HFS_BNODE_NEW , & node -> flags ))
498507 return node ;
499508
500- desc = (struct hfs_bnode_desc * )(kmap (node -> page [0 ]) + node -> page_offset );
509+ desc = (struct hfs_bnode_desc * )(kmap (node -> page [0 ]) +
510+ node -> page_offset );
501511 node -> prev = be32_to_cpu (desc -> prev );
502512 node -> next = be32_to_cpu (desc -> next );
503513 node -> num_recs = be16_to_cpu (desc -> num_recs );
@@ -556,11 +566,13 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
556566
557567void hfs_bnode_free (struct hfs_bnode * node )
558568{
559- //int i;
569+ #if 0
570+ int i ;
560571
561- //for (i = 0; i < node->tree->pages_per_bnode; i++)
562- // if (node->page[i])
563- // page_cache_release(node->page[i]);
572+ for (i = 0 ; i < node -> tree -> pages_per_bnode ; i ++ )
573+ if (node -> page [i ])
574+ page_cache_release (node -> page [i ]);
575+ #endif
564576 kfree (node );
565577}
566578
@@ -607,7 +619,8 @@ void hfs_bnode_get(struct hfs_bnode *node)
607619 if (node ) {
608620 atomic_inc (& node -> refcnt );
609621 dprint (DBG_BNODE_REFS , "get_node(%d:%d): %d\n" ,
610- node -> tree -> cnid , node -> this , atomic_read (& node -> refcnt ));
622+ node -> tree -> cnid , node -> this ,
623+ atomic_read (& node -> refcnt ));
611624 }
612625}
613626
@@ -619,7 +632,8 @@ void hfs_bnode_put(struct hfs_bnode *node)
619632 int i ;
620633
621634 dprint (DBG_BNODE_REFS , "put_node(%d:%d): %d\n" ,
622- node -> tree -> cnid , node -> this , atomic_read (& node -> refcnt ));
635+ node -> tree -> cnid , node -> this ,
636+ atomic_read (& node -> refcnt ));
623637 BUG_ON (!atomic_read (& node -> refcnt ));
624638 if (!atomic_dec_and_lock (& node -> refcnt , & tree -> hash_lock ))
625639 return ;
0 commit comments