KAFKA-9131: Remove dead code for handling timeout exception (#7635)
Remove in catch clause and move it to the callback.
Reviewers: John Roesler <john@confluent.io>, Matthias J. Sax <mjsax@apache.org>, Guozhang Wang <wangguoz@gmail.com>
@ -56,13 +56,18 @@ public class RecordCollectorImpl implements RecordCollector {
@@ -56,13 +56,18 @@ public class RecordCollectorImpl implements RecordCollector {
privatefinalstaticStringLOG_MESSAGE="Error sending record to topic {} due to {}; "+
"No more records will be sent and no more offsets will be recorded for this task. "+
"Enable TRACE logging to view failed record key and value.";
privatefinalstaticStringEXCEPTION_MESSAGE="%sAbort sending since %s with a previous record (timestamp %d) to topic %s due to %s";
privatefinalstaticStringPARAMETER_HINT="\nYou can increase the producer configs `delivery.timeout.ms` and/or "+
"`retries` to avoid this error. Note that `retries` is set to infinite by default.";
privatefinalstaticStringTIMEOUT_HINT_TEMPLATE="%nTimeout exception caught when sending record to topic %s. "+
"This might happen if the producer cannot send data to the Kafka cluster and thus, "+
"its internal buffer fills up. "+
"This can also happen if the broker is slow to respond, if the network connection to "+
"the broker was interrupted, or if similar circumstances arise. "+
"You can increase producer parameter `max.block.ms` to increase this timeout.";
privatevolatileKafkaExceptionsendException;
@ -129,9 +134,14 @@ public class RecordCollectorImpl implements RecordCollector {
@@ -129,9 +134,14 @@ public class RecordCollectorImpl implements RecordCollector {
){
StringerrorLogMessage=LOG_MESSAGE;
StringerrorMessage=EXCEPTION_MESSAGE;
// There is no documented API for detecting retriable errors, so we rely on `RetriableException`
// even though it's an implementation detail (i.e. we do the best we can given what's available)
// There is no documented API for detecting retriable errors, so we rely on `RetriableException`
// even though it's an implementation detail (i.e. we do the best we can given what's available)
errorLogMessage+=PARAMETER_HINT;
errorMessage+=PARAMETER_HINT;
}
@ -220,21 +230,6 @@ public class RecordCollectorImpl implements RecordCollector {
@@ -220,21 +230,6 @@ public class RecordCollectorImpl implements RecordCollector {
}
}
});
}catch(finalTimeoutExceptione){
log.error(
"Timeout exception caught when sending record to topic {}. "+
"This might happen if the producer cannot send data to the Kafka cluster and thus, "+
"its internal buffer fills up. "+
"This can also happen if the broker is slow to respond, if the network connection to "+
"the broker was interrupted, or if similar circumstances arise. "+
"You can increase producer parameter `max.block.ms` to increase this timeout.",
topic,
e
);
thrownewStreamsException(
String.format("%sFailed to send record to topic %s due to timeout.",logPrefix,topic),