KAFKA-6120: RecordCollector should not retry sending
Author: Matthias J. Sax <matthias@confluent.io>
Reviewers: Damian Guy <damian.guy@gmail.com>, Guozhang Wang <wangguoz@gmail.com>
Closes#4148 from mjsax/kafka-6120-recordCollector
pull/4148/merge
Matthias J. Sax7 years agocommitted byGuozhang Wang
@ -93,13 +93,11 @@ public class RecordCollectorImpl implements RecordCollector {
@@ -93,13 +93,11 @@ public class RecordCollectorImpl implements RecordCollector {
@ -108,28 +106,57 @@ public class RecordCollectorImpl implements RecordCollector {
@@ -108,28 +106,57 @@ public class RecordCollectorImpl implements RecordCollector {
offsets.put(tp,metadata.offset());
}else{
if(sendException==null){
log.error("Error sending record (key {} value {} timestamp {}) to topic {} due to {}; "+
"No more records will be sent and no more offsets will be recorded for this task.",
key,value,timestamp,topic,exception);
if(exceptioninstanceofProducerFencedException){
sendException=newProducerFencedException(String.format("%sAbort sending since producer got fenced with a previous record (key %s value %s timestamp %d) to topic %s, error message: %s",
sendException=newStreamsException(String.format("%sAbort sending since an error caught with a previous record (key %s value %s timestamp %d) to topic %s due to %s.",