Skip to content

Commit a0a4619

Browse files
committed
[NET]: Add NAPI_STATE_DISABLE.
Create a bit to signal that a napi_disable() is in progress. This sets up infrastructure such that net_rx_action() can generically break out of the ->poll() loop on a NAPI context that has a pending napi_disable() yet is being bombed with packets (and thus would otherwise poll endlessly and not allow the napi_disable() to finish). Now, what napi_disable() does is first set the NAPI_STATE_DISABLE bit (to indicate that a disable is pending), then it polls for the NAPI_STATE_SCHED bit, and once the NAPI_STATE_SCHED bit is acquired the NAPI_STATE_DISABLE bit is cleared. Here, the test_and_set_bit() provides the necessary memory barrier between the various bitops. napi_schedule_prep() now tests for a pending disable as it's first action and won't try to obtain the NAPI_STATE_SCHED bit if a disable is pending. As a result, we can remove the netif_running() check in netif_rx_schedule_prep() because the NAPI disable pending state serves this purpose. And, it does so in a NAPI centric manner which is what we really want. Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent bdb95b1 commit a0a4619

File tree

1 file changed

+13
-3
lines changed

1 file changed

+13
-3
lines changed

include/linux/netdevice.h

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -319,21 +319,29 @@ struct napi_struct {
319319
enum
320320
{
321321
NAPI_STATE_SCHED, /* Poll is scheduled */
322+
NAPI_STATE_DISABLE, /* Disable pending */
322323
};
323324

324325
extern void FASTCALL(__napi_schedule(struct napi_struct *n));
325326

327+
static inline int napi_disable_pending(struct napi_struct *n)
328+
{
329+
return test_bit(NAPI_STATE_DISABLE, &n->state);
330+
}
331+
326332
/**
327333
* napi_schedule_prep - check if napi can be scheduled
328334
* @n: napi context
329335
*
330336
* Test if NAPI routine is already running, and if not mark
331337
* it as running. This is used as a condition variable
332-
* insure only one NAPI poll instance runs
338+
* insure only one NAPI poll instance runs. We also make
339+
* sure there is no pending NAPI disable.
333340
*/
334341
static inline int napi_schedule_prep(struct napi_struct *n)
335342
{
336-
return !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
343+
return !napi_disable_pending(n) &&
344+
!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
337345
}
338346

339347
/**
@@ -389,8 +397,10 @@ static inline void napi_complete(struct napi_struct *n)
389397
*/
390398
static inline void napi_disable(struct napi_struct *n)
391399
{
400+
set_bit(NAPI_STATE_DISABLE, &n->state);
392401
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
393402
msleep(1);
403+
clear_bit(NAPI_STATE_DISABLE, &n->state);
394404
}
395405

396406
/**
@@ -1268,7 +1278,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
12681278
static inline int netif_rx_schedule_prep(struct net_device *dev,
12691279
struct napi_struct *napi)
12701280
{
1271-
return netif_running(dev) && napi_schedule_prep(napi);
1281+
return napi_schedule_prep(napi);
12721282
}
12731283

12741284
/* Add interface to tail of rx poll list. This assumes that _prep has

0 commit comments

Comments
 (0)