forked from linkedin/li-apache-kafka-clients
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Issue#43 Handle exception in the message processing (linkedin#44)
If no key is provided for a message, splitter should not set the key to UUID. Handle errors during message processing in the ConsumerRecordProcessor. 1. Return all the messages before the exception. 2. Allow skipping messages with exceptions.
- Loading branch information
Showing
9 changed files
with
351 additions
and
34 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
67 changes: 67 additions & 0 deletions
67
src/main/java/com/linkedin/kafka/clients/largemessage/ConsumerRecordsProcessResult.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
package com.linkedin.kafka.clients.largemessage; | ||
|
||
import com.linkedin.kafka.clients.largemessage.errors.ConsumerRecordsProcessingException; | ||
import com.linkedin.kafka.clients.largemessage.errors.RecordProcessingException; | ||
import java.util.ArrayList; | ||
import java.util.HashMap; | ||
import java.util.List; | ||
import java.util.Map; | ||
import org.apache.kafka.clients.consumer.ConsumerRecord; | ||
import org.apache.kafka.clients.consumer.ConsumerRecords; | ||
import org.apache.kafka.common.TopicPartition; | ||
|
||
|
||
/** | ||
* The process result of ConsumerRecords returned by the open source KafkaConsumer. | ||
* | ||
* It contains the following information: | ||
* 1. The processed consumer records. | ||
* 2. If there were exception in processing, the offsets to skip those problematic messages for each partition. | ||
* 3. The the exception thrown by the last problematic partition. (We just need to throw an exception to the user). | ||
*/ | ||
public class ConsumerRecordsProcessResult<K, V> { | ||
private final Map<TopicPartition, Long> _resumeOffsets; | ||
private final List<RecordProcessingException> _exceptions; | ||
private Map<TopicPartition, List<ConsumerRecord<K, V>>> _processedRecords; | ||
|
||
ConsumerRecordsProcessResult() { | ||
_processedRecords = new HashMap<>(); | ||
_resumeOffsets = new HashMap<>(); | ||
_exceptions = new ArrayList<>(); | ||
} | ||
|
||
void addRecord(TopicPartition tp, ConsumerRecord<K, V> record) { | ||
// Only put record into map if it is not null | ||
if (record != null) { | ||
List<ConsumerRecord<K, V>> list = _processedRecords.computeIfAbsent(tp, k -> new ArrayList<>()); | ||
list.add(record); | ||
} | ||
} | ||
|
||
void recordException(TopicPartition tp, long offset, RuntimeException e) { | ||
_exceptions.add(new RecordProcessingException(tp, offset, e)); | ||
// The resume offset is the error offset + 1. i.e. if user ignore the exception thrown and poll again, the resuming | ||
// offset should be this one. | ||
_resumeOffsets.putIfAbsent(tp, offset + 1); | ||
} | ||
|
||
public void clearRecords() { | ||
_processedRecords = null; | ||
} | ||
|
||
boolean hasError(TopicPartition tp) { | ||
return resumeOffsets().containsKey(tp); | ||
} | ||
|
||
public ConsumerRecordsProcessingException exception() { | ||
return _exceptions.isEmpty() ? null : new ConsumerRecordsProcessingException(_exceptions); | ||
} | ||
|
||
public ConsumerRecords<K, V> consumerRecords() { | ||
return new ConsumerRecords<>(_processedRecords); | ||
} | ||
|
||
public Map<TopicPartition, Long> resumeOffsets() { | ||
return _resumeOffsets; | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
25 changes: 25 additions & 0 deletions
25
...va/com/linkedin/kafka/clients/largemessage/errors/ConsumerRecordsProcessingException.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
package com.linkedin.kafka.clients.largemessage.errors; | ||
|
||
import java.util.Collections; | ||
import java.util.Iterator; | ||
import java.util.List; | ||
|
||
|
||
public class ConsumerRecordsProcessingException extends RuntimeException { | ||
private final List<RecordProcessingException> _recordProcessingExceptions; | ||
|
||
public ConsumerRecordsProcessingException(List<RecordProcessingException> exceptions) { | ||
super(String.format("Received exception when processing messages for %d partitions.", exceptions.size()), exceptions.get(0)); | ||
Iterator<RecordProcessingException> exceptionIterator = exceptions.iterator(); | ||
// skip the first exception. | ||
exceptionIterator.next(); | ||
while (exceptionIterator.hasNext()) { | ||
addSuppressed(exceptionIterator.next()); | ||
} | ||
_recordProcessingExceptions = exceptions; | ||
} | ||
|
||
public List<RecordProcessingException> recordProcessingExceptions() { | ||
return Collections.unmodifiableList(_recordProcessingExceptions); | ||
} | ||
} |
31 changes: 31 additions & 0 deletions
31
src/main/java/com/linkedin/kafka/clients/largemessage/errors/RecordProcessingException.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
package com.linkedin.kafka.clients.largemessage.errors; | ||
|
||
import org.apache.kafka.common.TopicPartition; | ||
|
||
|
||
/** | ||
* An exception indicating a consumer record processing encountered error. | ||
*/ | ||
public class RecordProcessingException extends RuntimeException { | ||
private final TopicPartition _topicPartition; | ||
private final long _offset; | ||
|
||
public RecordProcessingException(TopicPartition tp, long offset, Throwable cause) { | ||
super(cause); | ||
_topicPartition = tp; | ||
_offset = offset; | ||
} | ||
|
||
public TopicPartition topicPartition() { | ||
return _topicPartition; | ||
} | ||
|
||
public long offset() { | ||
return _offset; | ||
} | ||
|
||
@Override | ||
public synchronized Throwable fillInStackTrace() { | ||
return this; | ||
} | ||
} |
Oops, something went wrong.