@@ -116,8 +116,8 @@ static int Consumer_traverse(Handle *self, visitproc visit, void *arg) {
116116 ****************************************************************************/
117117
118118
119- static PyObject * Consumer_subscribe ( Handle * self , PyObject * args ,
120- PyObject * kwargs ) {
119+ static PyObject *
120+ Consumer_subscribe ( Handle * self , PyObject * args , PyObject * kwargs ) {
121121
122122 rd_kafka_topic_partition_list_t * topics ;
123123 static char * kws [] = {"topics" , "on_assign" , "on_revoke" , "on_lost" ,
@@ -970,7 +970,7 @@ Consumer_offsets_for_times(Handle *self, PyObject *args, PyObject *kwargs) {
970970
971971/**
972972 * @brief Poll for a single message from the subscribed topics.
973- *
973+ *
974974 * Instead of a single blocking call to rd_kafka_consumer_poll() with the
975975 * full timeout, this function:
976976 * 1. Splits the timeout into 200ms chunks
@@ -988,14 +988,13 @@ Consumer_offsets_for_times(Handle *self, PyObject *args, PyObject *kwargs) {
988988 * @return PyObject* Message object, None if timeout, or NULL on error
989989 * (raises KeyboardInterrupt if signal detected)
990990 */
991- static PyObject * Consumer_poll (Handle * self , PyObject * args ,
992- PyObject * kwargs ) {
993- double tmout = -1.0f ;
994- static char * kws [] = {"timeout" , NULL };
991+ static PyObject * Consumer_poll (Handle * self , PyObject * args , PyObject * kwargs ) {
992+ double tmout = -1.0f ;
993+ static char * kws [] = {"timeout" , NULL };
995994 rd_kafka_message_t * rkm = NULL ;
996995 PyObject * msgobj ;
997996 CallState cs ;
998- const int CHUNK_TIMEOUT_MS = 200 ; /* 200ms chunks for signal checking */
997+ const int CHUNK_TIMEOUT_MS = 200 ; /* 200ms chunks for signal checking */
999998 int total_timeout_ms ;
1000999 int chunk_timeout_ms ;
10011000 int chunk_count = 0 ;
@@ -1021,15 +1020,16 @@ static PyObject *Consumer_poll(Handle *self, PyObject *args,
10211020 } else {
10221021 while (1 ) {
10231022 /* Calculate timeout for this chunk */
1024- chunk_timeout_ms = calculate_chunk_timeout (total_timeout_ms , chunk_count ,
1025- CHUNK_TIMEOUT_MS );
1023+ chunk_timeout_ms = calculate_chunk_timeout (
1024+ total_timeout_ms , chunk_count , CHUNK_TIMEOUT_MS );
10261025 if (chunk_timeout_ms == 0 ) {
10271026 /* Timeout expired */
10281027 break ;
10291028 }
10301029
10311030 /* Poll with chunk timeout */
1032- rkm = rd_kafka_consumer_poll (self -> rk , chunk_timeout_ms );
1031+ rkm =
1032+ rd_kafka_consumer_poll (self -> rk , chunk_timeout_ms );
10331033
10341034 /* If we got a message, exit the loop */
10351035 if (rkm ) {
@@ -1099,7 +1099,7 @@ Consumer_memberid(Handle *self, PyObject *args, PyObject *kwargs) {
10991099 * Instead of a single blocking call to rd_kafka_consume_batch_queue() with the
11001100 * full timeout, this function:
11011101 * 1. Splits the timeout into 200ms chunks
1102- * 2. Calls rd_kafka_consume_batch_queue() with chunk timeout
1102+ * 2. Calls rd_kafka_consume_batch_queue() with chunk timeout
11031103 * 3. Between chunks, re-acquires GIL and calls PyErr_CheckSignals()
11041104 * 4. If signal detected, returns NULL (raises KeyboardInterrupt)
11051105 * 5. Continues until messages received, timeout expired, or signal detected.
@@ -1111,11 +1111,11 @@ Consumer_memberid(Handle *self, PyObject *args, PyObject *kwargs) {
11111111 * consume per call. Default: 1. Maximum: 1000000.
11121112 * - timeout (float, optional): Timeout in seconds.
11131113 * Default: -1.0 (infinite timeout)
1114- * @return PyObject* List of Message objects, empty list if timeout, or NULL on error
1115- * (raises KeyboardInterrupt if signal detected)
1114+ * @return PyObject* List of Message objects, empty list if timeout, or NULL on
1115+ * error (raises KeyboardInterrupt if signal detected)
11161116 */
1117- static PyObject * Consumer_consume ( Handle * self , PyObject * args ,
1118- PyObject * kwargs ) {
1117+ static PyObject *
1118+ Consumer_consume ( Handle * self , PyObject * args , PyObject * kwargs ) {
11191119 unsigned int num_messages = 1 ;
11201120 double tmout = -1.0f ;
11211121 static char * kws [] = {"num_messages" , "timeout" , NULL };
@@ -1124,7 +1124,7 @@ static PyObject *Consumer_consume(Handle *self, PyObject *args,
11241124 rd_kafka_queue_t * rkqu = self -> u .Consumer .rkqu ;
11251125 CallState cs ;
11261126 Py_ssize_t i , n = 0 ;
1127- const int CHUNK_TIMEOUT_MS = 200 ; /* 200ms chunks for signal checking */
1127+ const int CHUNK_TIMEOUT_MS = 200 ; /* 200ms chunks for signal checking */
11281128 int total_timeout_ms ;
11291129 int chunk_timeout_ms ;
11301130 int chunk_count = 0 ;
@@ -1160,37 +1160,40 @@ static PyObject *Consumer_consume(Handle *self, PyObject *args,
11601160 * ThreadPool. Only use wakeable poll for
11611161 * blocking calls that need to be interruptible. */
11621162 if (total_timeout_ms >= 0 && total_timeout_ms < CHUNK_TIMEOUT_MS ) {
1163- n = (Py_ssize_t )rd_kafka_consume_batch_queue (rkqu , total_timeout_ms ,
1164- rkmessages , num_messages );
1163+ n = (Py_ssize_t )rd_kafka_consume_batch_queue (
1164+ rkqu , total_timeout_ms , rkmessages , num_messages );
11651165
11661166 if (n < 0 ) {
11671167 /* Error - need to restore GIL before setting error */
11681168 PyEval_RestoreThread (cs .thread_state );
11691169 free (rkmessages );
1170- cfl_PyErr_Format (rd_kafka_last_error (),
1171- "%s" , rd_kafka_err2str (rd_kafka_last_error ()));
1170+ cfl_PyErr_Format (
1171+ rd_kafka_last_error (), "%s" ,
1172+ rd_kafka_err2str (rd_kafka_last_error ()));
11721173 return NULL ;
11731174 }
11741175 } else {
11751176 while (1 ) {
11761177 /* Calculate timeout for this chunk */
1177- chunk_timeout_ms = calculate_chunk_timeout (total_timeout_ms , chunk_count ,
1178- CHUNK_TIMEOUT_MS );
1178+ chunk_timeout_ms = calculate_chunk_timeout (
1179+ total_timeout_ms , chunk_count , CHUNK_TIMEOUT_MS );
11791180 if (chunk_timeout_ms == 0 ) {
11801181 /* Timeout expired */
11811182 break ;
11821183 }
11831184
11841185 /* Consume with chunk timeout */
1185- n = (Py_ssize_t )rd_kafka_consume_batch_queue (rkqu , chunk_timeout_ms ,
1186- rkmessages , num_messages );
1186+ n = (Py_ssize_t )rd_kafka_consume_batch_queue (
1187+ rkqu , chunk_timeout_ms , rkmessages , num_messages );
11871188
11881189 if (n < 0 ) {
1189- /* Error - need to restore GIL before setting error */
1190+ /* Error - need to restore GIL before setting
1191+ * error */
11901192 PyEval_RestoreThread (cs .thread_state );
11911193 free (rkmessages );
1192- cfl_PyErr_Format (rd_kafka_last_error (),
1193- "%s" , rd_kafka_err2str (rd_kafka_last_error ()));
1194+ cfl_PyErr_Format (
1195+ rd_kafka_last_error (), "%s" ,
1196+ rd_kafka_err2str (rd_kafka_last_error ()));
11941197 return NULL ;
11951198 }
11961199
0 commit comments