@@ -62,7 +62,7 @@ struct request_sock {
6262 u32 window_clamp ; /* window clamp at creation time */
6363 u32 rcv_wnd ; /* rcv_wnd offered first time */
6464 u32 ts_recent ;
65- unsigned long expires ;
65+ struct timer_list rsk_timer ;
6666 const struct request_sock_ops * rsk_ops ;
6767 struct sock * sk ;
6868 u32 secid ;
@@ -110,9 +110,6 @@ static inline void reqsk_free(struct request_sock *req)
110110
111111static inline void reqsk_put (struct request_sock * req )
112112{
113- /* temporary debugging, until req sock are put into ehash table */
114- WARN_ON_ONCE (atomic_read (& req -> rsk_refcnt ) != 1 );
115-
116113 if (atomic_dec_and_test (& req -> rsk_refcnt ))
117114 reqsk_free (req );
118115}
@@ -124,12 +121,16 @@ extern int sysctl_max_syn_backlog;
124121 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
125122 */
126123struct listen_sock {
127- u8 max_qlen_log ;
124+ int qlen_inc ; /* protected by listener lock */
125+ int young_inc ;/* protected by listener lock */
126+
127+ /* following fields can be updated by timer */
128+ atomic_t qlen_dec ; /* qlen = qlen_inc - qlen_dec */
129+ atomic_t young_dec ;
130+
131+ u8 max_qlen_log ____cacheline_aligned_in_smp ;
128132 u8 synflood_warned ;
129133 /* 2 bytes hole, try to use */
130- int qlen ;
131- int qlen_young ;
132- int clock_hand ;
133134 u32 hash_rnd ;
134135 u32 nr_table_entries ;
135136 struct request_sock * syn_table [0 ];
@@ -182,16 +183,17 @@ struct fastopen_queue {
182183struct request_sock_queue {
183184 struct request_sock * rskq_accept_head ;
184185 struct request_sock * rskq_accept_tail ;
185- rwlock_t syn_wait_lock ;
186186 u8 rskq_defer_accept ;
187- /* 3 bytes hole, try to pack */
188187 struct listen_sock * listen_opt ;
189188 struct fastopen_queue * fastopenq ; /* This is non-NULL iff TFO has been
190189 * enabled on this listener. Check
191190 * max_qlen != 0 in fastopen_queue
192191 * to determine if TFO is enabled
193192 * right at this moment.
194193 */
194+
195+ /* temporary alignment, our goal is to get rid of this lock */
196+ rwlock_t syn_wait_lock ____cacheline_aligned_in_smp ;
195197};
196198
197199int reqsk_queue_alloc (struct request_sock_queue * queue ,
@@ -223,11 +225,15 @@ static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
223225 struct request_sock * * prev ;
224226
225227 write_lock (& queue -> syn_wait_lock );
228+
226229 prev = & lopt -> syn_table [req -> rsk_hash ];
227230 while (* prev != req )
228231 prev = & (* prev )-> dl_next ;
229232 * prev = req -> dl_next ;
233+
230234 write_unlock (& queue -> syn_wait_lock );
235+ if (del_timer (& req -> rsk_timer ))
236+ reqsk_put (req );
231237}
232238
233239static inline void reqsk_queue_add (struct request_sock_queue * queue ,
@@ -260,64 +266,53 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
260266 return req ;
261267}
262268
263- static inline int reqsk_queue_removed (struct request_sock_queue * queue ,
264- struct request_sock * req )
269+ static inline void reqsk_queue_removed (struct request_sock_queue * queue ,
270+ const struct request_sock * req )
265271{
266272 struct listen_sock * lopt = queue -> listen_opt ;
267273
268274 if (req -> num_timeout == 0 )
269- -- lopt -> qlen_young ;
270-
271- return -- lopt -> qlen ;
275+ atomic_inc (& lopt -> young_dec );
276+ atomic_inc (& lopt -> qlen_dec );
272277}
273278
274- static inline int reqsk_queue_added (struct request_sock_queue * queue )
279+ static inline void reqsk_queue_added (struct request_sock_queue * queue )
275280{
276281 struct listen_sock * lopt = queue -> listen_opt ;
277- const int prev_qlen = lopt -> qlen ;
278282
279- lopt -> qlen_young ++ ;
280- lopt -> qlen ++ ;
281- return prev_qlen ;
283+ lopt -> young_inc ++ ;
284+ lopt -> qlen_inc ++ ;
282285}
283286
284- static inline int reqsk_queue_len (const struct request_sock_queue * queue )
287+ static inline int listen_sock_qlen (const struct listen_sock * lopt )
285288{
286- return queue -> listen_opt != NULL ? queue -> listen_opt -> qlen : 0 ;
289+ return lopt -> qlen_inc - atomic_read ( & lopt -> qlen_dec ) ;
287290}
288291
289- static inline int reqsk_queue_len_young (const struct request_sock_queue * queue )
292+ static inline int listen_sock_young (const struct listen_sock * lopt )
290293{
291- return queue -> listen_opt -> qlen_young ;
294+ return lopt -> young_inc - atomic_read ( & lopt -> young_dec ) ;
292295}
293296
294- static inline int reqsk_queue_is_full (const struct request_sock_queue * queue )
297+ static inline int reqsk_queue_len (const struct request_sock_queue * queue )
295298{
296- return queue -> listen_opt -> qlen >> queue -> listen_opt -> max_qlen_log ;
299+ const struct listen_sock * lopt = queue -> listen_opt ;
300+
301+ return lopt ? listen_sock_qlen (lopt ) : 0 ;
297302}
298303
299- static inline void reqsk_queue_hash_req (struct request_sock_queue * queue ,
300- u32 hash , struct request_sock * req ,
301- unsigned long timeout )
304+ static inline int reqsk_queue_len_young (const struct request_sock_queue * queue )
302305{
303- struct listen_sock * lopt = queue -> listen_opt ;
304-
305- req -> expires = jiffies + timeout ;
306- req -> num_retrans = 0 ;
307- req -> num_timeout = 0 ;
308- req -> sk = NULL ;
309-
310- /* before letting lookups find us, make sure all req fields
311- * are committed to memory and refcnt initialized.
312- */
313- smp_wmb ();
314- atomic_set (& req -> rsk_refcnt , 1 );
306+ return listen_sock_young (queue -> listen_opt );
307+ }
315308
316- req -> rsk_hash = hash ;
317- write_lock (& queue -> syn_wait_lock );
318- req -> dl_next = lopt -> syn_table [hash ];
319- lopt -> syn_table [hash ] = req ;
320- write_unlock (& queue -> syn_wait_lock );
309+ static inline int reqsk_queue_is_full (const struct request_sock_queue * queue )
310+ {
311+ return reqsk_queue_len (queue ) >> queue -> listen_opt -> max_qlen_log ;
321312}
322313
314+ void reqsk_queue_hash_req (struct request_sock_queue * queue ,
315+ u32 hash , struct request_sock * req ,
316+ unsigned long timeout );
317+
323318#endif /* _REQUEST_SOCK_H */
0 commit comments