@@ -61,17 +61,20 @@ struct rhlist_head {
6161/**
6262 * struct bucket_table - Table of hash buckets
6363 * @size: Number of hash buckets
64+ * @nest: Number of bits of first-level nested table.
6465 * @rehash: Current bucket being rehashed
6566 * @hash_rnd: Random seed to fold into hash
6667 * @locks_mask: Mask to apply before accessing locks[]
6768 * @locks: Array of spinlocks protecting individual buckets
6869 * @walkers: List of active walkers
6970 * @rcu: RCU structure for freeing the table
7071 * @future_tbl: Table under construction during rehashing
72+ * @ntbl: Nested table used when out of memory.
7173 * @buckets: size * hash buckets
7274 */
7375struct bucket_table {
7476 unsigned int size ;
77+ unsigned int nest ;
7578 unsigned int rehash ;
7679 u32 hash_rnd ;
7780 unsigned int locks_mask ;
@@ -81,7 +84,7 @@ struct bucket_table {
8184
8285 struct bucket_table __rcu * future_tbl ;
8386
84- struct rhash_head __rcu * buckets [] ____cacheline_aligned_in_smp ;
87+ struct rhash_head __rcu * buckets [] ____cacheline_aligned_in_smp ;
8588};
8689
8790/**
@@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
374377 void * arg );
375378void rhashtable_destroy (struct rhashtable * ht );
376379
380+ struct rhash_head __rcu * * rht_bucket_nested (const struct bucket_table * tbl ,
381+ unsigned int hash );
382+ struct rhash_head __rcu * * rht_bucket_nested_insert (struct rhashtable * ht ,
383+ struct bucket_table * tbl ,
384+ unsigned int hash );
385+
377386#define rht_dereference (p , ht ) \
378387 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
379388
@@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht);
389398#define rht_entry (tpos , pos , member ) \
390399 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
391400
401+ static inline struct rhash_head __rcu * const * rht_bucket (
402+ const struct bucket_table * tbl , unsigned int hash )
403+ {
404+ return unlikely (tbl -> nest ) ? rht_bucket_nested (tbl , hash ) :
405+ & tbl -> buckets [hash ];
406+ }
407+
408+ static inline struct rhash_head __rcu * * rht_bucket_var (
409+ struct bucket_table * tbl , unsigned int hash )
410+ {
411+ return unlikely (tbl -> nest ) ? rht_bucket_nested (tbl , hash ) :
412+ & tbl -> buckets [hash ];
413+ }
414+
415+ static inline struct rhash_head __rcu * * rht_bucket_insert (
416+ struct rhashtable * ht , struct bucket_table * tbl , unsigned int hash )
417+ {
418+ return unlikely (tbl -> nest ) ? rht_bucket_nested_insert (ht , tbl , hash ) :
419+ & tbl -> buckets [hash ];
420+ }
421+
392422/**
393423 * rht_for_each_continue - continue iterating over hash chain
394424 * @pos: the &struct rhash_head to use as a loop cursor.
@@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht);
408438 * @hash: the hash value / bucket index
409439 */
410440#define rht_for_each (pos , tbl , hash ) \
411- rht_for_each_continue(pos, (tbl)->buckets[ hash] , tbl, hash)
441+ rht_for_each_continue(pos, *rht_bucket (tbl, hash) , tbl, hash)
412442
413443/**
414444 * rht_for_each_entry_continue - continue iterating over hash chain
@@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht);
433463 * @member: name of the &struct rhash_head within the hashable struct.
434464 */
435465#define rht_for_each_entry (tpos , pos , tbl , hash , member ) \
436- rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[ hash] , \
466+ rht_for_each_entry_continue(tpos, pos, *rht_bucket (tbl, hash) , \
437467 tbl, hash, member)
438468
439469/**
@@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht);
448478 * This hash chain list-traversal primitive allows for the looped code to
449479 * remove the loop cursor from the list.
450480 */
451- #define rht_for_each_entry_safe (tpos , pos , next , tbl , hash , member ) \
452- for (pos = rht_dereference_bucket((tbl)->buckets[ hash] , tbl, hash), \
453- next = !rht_is_a_nulls(pos) ? \
454- rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
455- (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
456- pos = next, \
457- next = !rht_is_a_nulls(pos) ? \
481+ #define rht_for_each_entry_safe (tpos , pos , next , tbl , hash , member ) \
482+ for (pos = rht_dereference_bucket(*rht_bucket (tbl, hash) , tbl, hash), \
483+ next = !rht_is_a_nulls(pos) ? \
484+ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
485+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
486+ pos = next, \
487+ next = !rht_is_a_nulls(pos) ? \
458488 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
459489
460490/**
@@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht);
485515 * traversal is guarded by rcu_read_lock().
486516 */
487517#define rht_for_each_rcu (pos , tbl , hash ) \
488- rht_for_each_rcu_continue(pos, (tbl)->buckets[ hash] , tbl, hash)
518+ rht_for_each_rcu_continue(pos, *rht_bucket (tbl, hash) , tbl, hash)
489519
490520/**
491521 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
@@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht);
518548 * the _rcu mutation primitives such as rhashtable_insert() as long as the
519549 * traversal is guarded by rcu_read_lock().
520550 */
521- #define rht_for_each_entry_rcu (tpos , pos , tbl , hash , member ) \
522- rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[ hash], \
551+ #define rht_for_each_entry_rcu (tpos , pos , tbl , hash , member ) \
552+ rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket (tbl, hash), \
523553 tbl, hash, member)
524554
525555/**
@@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup(
565595 .ht = ht ,
566596 .key = key ,
567597 };
568- const struct bucket_table * tbl ;
598+ struct bucket_table * tbl ;
569599 struct rhash_head * he ;
570600 unsigned int hash ;
571601
@@ -697,8 +727,12 @@ static inline void *__rhashtable_insert_fast(
697727 }
698728
699729 elasticity = ht -> elasticity ;
700- pprev = & tbl -> buckets [hash ];
701- rht_for_each (head , tbl , hash ) {
730+ pprev = rht_bucket_insert (ht , tbl , hash );
731+ data = ERR_PTR (- ENOMEM );
732+ if (!pprev )
733+ goto out ;
734+
735+ rht_for_each_continue (head , * pprev , tbl , hash ) {
702736 struct rhlist_head * plist ;
703737 struct rhlist_head * list ;
704738
@@ -736,7 +770,7 @@ static inline void *__rhashtable_insert_fast(
736770 if (unlikely (rht_grow_above_100 (ht , tbl )))
737771 goto slow_path ;
738772
739- head = rht_dereference_bucket (tbl -> buckets [ hash ] , tbl , hash );
773+ head = rht_dereference_bucket (* pprev , tbl , hash );
740774
741775 RCU_INIT_POINTER (obj -> next , head );
742776 if (rhlist ) {
@@ -746,7 +780,7 @@ static inline void *__rhashtable_insert_fast(
746780 RCU_INIT_POINTER (list -> next , NULL );
747781 }
748782
749- rcu_assign_pointer (tbl -> buckets [ hash ] , obj );
783+ rcu_assign_pointer (* pprev , obj );
750784
751785 atomic_inc (& ht -> nelems );
752786 if (rht_grow_above_75 (ht , tbl ))
@@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one(
955989
956990 spin_lock_bh (lock );
957991
958- pprev = & tbl -> buckets [ hash ] ;
959- rht_for_each (he , tbl , hash ) {
992+ pprev = rht_bucket_var ( tbl , hash ) ;
993+ rht_for_each_continue (he , * pprev , tbl , hash ) {
960994 struct rhlist_head * list ;
961995
962996 list = container_of (he , struct rhlist_head , rhead );
@@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast(
11071141
11081142 spin_lock_bh (lock );
11091143
1110- pprev = & tbl -> buckets [ hash ] ;
1111- rht_for_each (he , tbl , hash ) {
1144+ pprev = rht_bucket_var ( tbl , hash ) ;
1145+ rht_for_each_continue (he , * pprev , tbl , hash ) {
11121146 if (he != obj_old ) {
11131147 pprev = & he -> next ;
11141148 continue ;
0 commit comments