71
71
* a better scalability.
72
72
*/
73
73
74
- #define DEBUG_EPOLL 0
75
-
76
- #if DEBUG_EPOLL > 0
77
- #define DPRINTK (x ) printk x
78
- #define DNPRINTK (n , x ) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0)
79
- #else /* #if DEBUG_EPOLL > 0 */
80
- #define DPRINTK (x ) (void) 0
81
- #define DNPRINTK (n , x ) (void) 0
82
- #endif /* #if DEBUG_EPOLL > 0 */
83
-
84
- #define DEBUG_EPI 0
85
-
86
- #if DEBUG_EPI != 0
87
- #define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */ )
88
- #else /* #if DEBUG_EPI != 0 */
89
- #define EPI_SLAB_DEBUG 0
90
- #endif /* #if DEBUG_EPI != 0 */
91
-
92
74
/* Epoll private bits inside the event mask */
93
75
#define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
94
76
@@ -567,9 +549,6 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
567
549
568
550
atomic_dec (& ep -> user -> epoll_watches );
569
551
570
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n" ,
571
- current , ep , file ));
572
-
573
552
return 0 ;
574
553
}
575
554
@@ -625,7 +604,6 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
625
604
if (ep )
626
605
ep_free (ep );
627
606
628
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: close() ep=%p\n" , current , ep ));
629
607
return 0 ;
630
608
}
631
609
@@ -750,8 +728,6 @@ static int ep_alloc(struct eventpoll **pep)
750
728
751
729
* pep = ep ;
752
730
753
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n" ,
754
- current , ep ));
755
731
return 0 ;
756
732
757
733
free_uid :
@@ -785,9 +761,6 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
785
761
}
786
762
}
787
763
788
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n" ,
789
- current , file , epir ));
790
-
791
764
return epir ;
792
765
}
793
766
@@ -803,9 +776,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
803
776
struct epitem * epi = ep_item_from_wait (wait );
804
777
struct eventpoll * ep = epi -> ep ;
805
778
806
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n" ,
807
- current , epi -> ffd .file , epi , ep ));
808
-
809
779
spin_lock_irqsave (& ep -> lock , flags );
810
780
811
781
/*
@@ -978,9 +948,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
978
948
if (pwake )
979
949
ep_poll_safewake (& ep -> poll_wait );
980
950
981
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: ep_insert(%p, %p, %d)\n" ,
982
- current , ep , tfile , fd ));
983
-
984
951
return 0 ;
985
952
986
953
error_unregister :
@@ -1197,41 +1164,30 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1197
1164
*/
1198
1165
SYSCALL_DEFINE1 (epoll_create1 , int , flags )
1199
1166
{
1200
- int error , fd = -1 ;
1201
- struct eventpoll * ep ;
1167
+ int error ;
1168
+ struct eventpoll * ep = NULL ;
1202
1169
1203
1170
/* Check the EPOLL_* constant for consistency. */
1204
1171
BUILD_BUG_ON (EPOLL_CLOEXEC != O_CLOEXEC );
1205
1172
1206
1173
if (flags & ~EPOLL_CLOEXEC )
1207
1174
return - EINVAL ;
1208
-
1209
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n" ,
1210
- current , flags ));
1211
-
1212
1175
/*
1213
- * Create the internal data structure ( "struct eventpoll" ).
1176
+ * Create the internal data structure ("struct eventpoll").
1214
1177
*/
1215
1178
error = ep_alloc (& ep );
1216
- if (error < 0 ) {
1217
- fd = error ;
1218
- goto error_return ;
1219
- }
1220
-
1179
+ if (error < 0 )
1180
+ return error ;
1221
1181
/*
1222
1182
* Creates all the items needed to setup an eventpoll file. That is,
1223
1183
* a file structure and a free file descriptor.
1224
1184
*/
1225
- fd = anon_inode_getfd ("[eventpoll]" , & eventpoll_fops , ep ,
1226
- flags & O_CLOEXEC );
1227
- if (fd < 0 )
1185
+ error = anon_inode_getfd ("[eventpoll]" , & eventpoll_fops , ep ,
1186
+ flags & O_CLOEXEC );
1187
+ if (error < 0 )
1228
1188
ep_free (ep );
1229
1189
1230
- error_return :
1231
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n" ,
1232
- current , flags , fd ));
1233
-
1234
- return fd ;
1190
+ return error ;
1235
1191
}
1236
1192
1237
1193
SYSCALL_DEFINE1 (epoll_create , int , size )
@@ -1256,9 +1212,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1256
1212
struct epitem * epi ;
1257
1213
struct epoll_event epds ;
1258
1214
1259
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n" ,
1260
- current , epfd , op , fd , event ));
1261
-
1262
1215
error = - EFAULT ;
1263
1216
if (ep_op_has_event (op ) &&
1264
1217
copy_from_user (& epds , event , sizeof (struct epoll_event )))
@@ -1335,8 +1288,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1335
1288
error_fput :
1336
1289
fput (file );
1337
1290
error_return :
1338
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n" ,
1339
- current , epfd , op , fd , event , error ));
1340
1291
1341
1292
return error ;
1342
1293
}
@@ -1352,9 +1303,6 @@ SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
1352
1303
struct file * file ;
1353
1304
struct eventpoll * ep ;
1354
1305
1355
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n" ,
1356
- current , epfd , events , maxevents , timeout ));
1357
-
1358
1306
/* The maximum number of event must be greater than zero */
1359
1307
if (maxevents <= 0 || maxevents > EP_MAX_EVENTS )
1360
1308
return - EINVAL ;
@@ -1391,8 +1339,6 @@ SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
1391
1339
error_fput :
1392
1340
fput (file );
1393
1341
error_return :
1394
- DNPRINTK (3 , (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n" ,
1395
- current , epfd , events , maxevents , timeout , error ));
1396
1342
1397
1343
return error ;
1398
1344
}
@@ -1464,13 +1410,11 @@ static int __init eventpoll_init(void)
1464
1410
1465
1411
/* Allocates slab cache used to allocate "struct epitem" items */
1466
1412
epi_cache = kmem_cache_create ("eventpoll_epi" , sizeof (struct epitem ),
1467
- 0 , SLAB_HWCACHE_ALIGN |EPI_SLAB_DEBUG |SLAB_PANIC ,
1468
- NULL );
1413
+ 0 , SLAB_HWCACHE_ALIGN | SLAB_PANIC , NULL );
1469
1414
1470
1415
/* Allocates slab cache used to allocate "struct eppoll_entry" */
1471
1416
pwq_cache = kmem_cache_create ("eventpoll_pwq" ,
1472
- sizeof (struct eppoll_entry ), 0 ,
1473
- EPI_SLAB_DEBUG |SLAB_PANIC , NULL );
1417
+ sizeof (struct eppoll_entry ), 0 , SLAB_PANIC , NULL );
1474
1418
1475
1419
return 0 ;
1476
1420
}
0 commit comments