@ -16,6 +16,21 @@
@@ -16,6 +16,21 @@
* /
package org.apache.kafka.clients.producer.internals ;
import java.nio.ByteBuffer ;
import java.util.Collections ;
import java.util.Deque ;
import java.util.HashMap ;
import java.util.HashSet ;
import java.util.IdentityHashMap ;
import java.util.Iterator ;
import java.util.LinkedHashMap ;
import java.util.List ;
import java.util.Map ;
import java.util.Set ;
import java.util.concurrent.ExecutionException ;
import java.util.concurrent.Future ;
import java.util.concurrent.atomic.AtomicInteger ;
import java.util.concurrent.atomic.AtomicReference ;
import org.apache.kafka.clients.ApiVersions ;
import org.apache.kafka.clients.ClientRequest ;
import org.apache.kafka.clients.Metadata ;
@ -62,6 +77,7 @@ import org.apache.kafka.common.requests.ProduceResponse;
@@ -62,6 +77,7 @@ import org.apache.kafka.common.requests.ProduceResponse;
import org.apache.kafka.common.requests.ResponseHeader ;
import org.apache.kafka.common.utils.LogContext ;
import org.apache.kafka.common.utils.MockTime ;
import org.apache.kafka.common.utils.Time ;
import org.apache.kafka.test.DelayedReceive ;
import org.apache.kafka.test.MockSelector ;
import org.apache.kafka.test.TestUtils ;
@ -69,25 +85,10 @@ import org.junit.After;
@@ -69,25 +85,10 @@ import org.junit.After;
import org.junit.Before ;
import org.junit.Test ;
import java.nio.ByteBuffer ;
import java.util.Collections ;
import java.util.Deque ;
import java.util.HashMap ;
import java.util.HashSet ;
import java.util.Iterator ;
import java.util.LinkedHashMap ;
import java.util.List ;
import java.util.Map ;
import java.util.Set ;
import java.util.concurrent.ExecutionException ;
import java.util.concurrent.Future ;
import java.util.concurrent.atomic.AtomicInteger ;
import java.util.concurrent.atomic.AtomicReference ;
import static org.junit.Assert.assertEquals ;
import static org.junit.Assert.assertTrue ;
import static org.junit.Assert.assertFalse ;
import static org.junit.Assert.assertNull ;
import static org.junit.Assert.assertTrue ;
import static org.junit.Assert.fail ;
public class SenderTest {
@ -131,10 +132,12 @@ public class SenderTest {
@@ -131,10 +132,12 @@ public class SenderTest {
sender . run ( time . milliseconds ( ) ) ; // connect
sender . run ( time . milliseconds ( ) ) ; // send produce request
assertEquals ( "We should have a single produce request in flight." , 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
assertTrue ( client . hasInFlightRequests ( ) ) ;
client . respond ( produceResponse ( tp0 , offset , Errors . NONE , 0 ) ) ;
sender . run ( time . milliseconds ( ) ) ;
assertEquals ( "All requests completed." , 0 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
assertFalse ( client . hasInFlightRequests ( ) ) ;
sender . run ( time . milliseconds ( ) ) ;
assertTrue ( "Request should be completed" , future . isDone ( ) ) ;
@ -328,33 +331,42 @@ public class SenderTest {
@@ -328,33 +331,42 @@ public class SenderTest {
Node node = new Node ( Integer . parseInt ( id ) , "localhost" , 0 ) ;
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertTrue ( client . hasInFlightRequests ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
assertTrue ( "Client ready status should be true" , client . isReady ( node , 0L ) ) ;
client . disconnect ( id ) ;
assertEquals ( 0 , client . inFlightRequestCount ( ) ) ;
assertFalse ( client . hasInFlightRequests ( ) ) ;
assertFalse ( "Client ready status should be false" , client . isReady ( node , 0L ) ) ;
// the batch is in accumulator.inFlightBatches until it expires
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // receive error
sender . run ( time . milliseconds ( ) ) ; // reconnect
sender . run ( time . milliseconds ( ) ) ; // resend
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertTrue ( client . hasInFlightRequests ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
long offset = 0 ;
client . respond ( produceResponse ( tp0 , offset , Errors . NONE , 0 ) ) ;
sender . run ( time . milliseconds ( ) ) ;
assertTrue ( "Request should have retried and completed" , future . isDone ( ) ) ;
assertEquals ( offset , future . get ( ) . offset ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
// do an unsuccessful retry
future = accumulator . append ( tp0 , 0L , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
sender . run ( time . milliseconds ( ) ) ; // send produce request
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
for ( int i = 0 ; i < maxRetries + 1 ; i + + ) {
client . disconnect ( client . requests ( ) . peek ( ) . destination ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // receive error
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // reconnect
sender . run ( time . milliseconds ( ) ) ; // resend
assertEquals ( i > 0 ? 0 : 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
}
sender . run ( time . milliseconds ( ) ) ;
assertFutureFailure ( future , NetworkException . class ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
} finally {
m . close ( ) ;
}
@ -371,7 +383,7 @@ public class SenderTest {
@@ -371,7 +383,7 @@ public class SenderTest {
senderMetrics , time , REQUEST_TIMEOUT , 50 , null , apiVersions ) ;
// Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1
Cluster cluster1 = TestUtils . clusterWith ( 2 , "test" , 2 ) ;
metadata . update ( cluster1 , Collections . < String > emptySet ( ) , time . milliseconds ( ) ) ;
metadata . update ( cluster1 , Collections . emptySet ( ) , time . milliseconds ( ) ) ;
// Send the first message.
TopicPartition tp2 = new TopicPartition ( "test" , 1 ) ;
@ -384,6 +396,7 @@ public class SenderTest {
@@ -384,6 +396,7 @@ public class SenderTest {
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertTrue ( client . hasInFlightRequests ( ) ) ;
assertTrue ( "Client ready status should be true" , client . isReady ( node , 0L ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp2 ) . size ( ) ) ;
time . sleep ( 900 ) ;
// Now send another message to tp2
@ -391,11 +404,13 @@ public class SenderTest {
@@ -391,11 +404,13 @@ public class SenderTest {
// Update metadata before sender receives response from broker 0. Now partition 2 moves to broker 0
Cluster cluster2 = TestUtils . singletonCluster ( "test" , 2 ) ;
metadata . update ( cluster2 , Collections . < String > emptySet ( ) , time . milliseconds ( ) ) ;
metadata . update ( cluster2 , Collections . emptySet ( ) , time . milliseconds ( ) ) ;
// Sender should not send the second message to node 0.
sender . run ( time . milliseconds ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp2 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // receive the response for the previous send, and send the new batch
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertTrue ( client . hasInFlightRequests ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp2 ) . size ( ) ) ;
} finally {
m . close ( ) ;
}
@ -429,14 +444,18 @@ public class SenderTest {
@@ -429,14 +444,18 @@ public class SenderTest {
// Advance the clock to expire the first batch.
time . sleep ( 10000 ) ;
Node clusterNode = this . cluster . nodes ( ) . get ( 0 ) ;
Map < Integer , List < ProducerBatch > > drainedBatches =
accumulator . drain ( cluster , Collections . singleton ( clusterNode ) , Integer . MAX_VALUE , time . milliseconds ( ) ) ;
sender . addToInflightBatches ( drainedBatches ) ;
// Disconnect the target node for the pending produce request. This will ensure that sender will try to
// expire the batch.
Node clusterNode = this . cluster . nodes ( ) . get ( 0 ) ;
client . disconnect ( clusterNode . idString ( ) ) ;
client . blackout ( clusterNode , 100 ) ;
sender . run ( time . milliseconds ( ) ) ; // We should try to flush the batch, but we expire it instead without sending anything.
assertEquals ( "Callbacks not invoked for expiry" , messagesPerBatch , expiryCallbackCount . get ( ) ) ;
assertNull ( "Unexpected exception" , unexpectedException . get ( ) ) ;
// Make sure that the reconds were appended back to the batch.
@ -463,6 +482,7 @@ public class SenderTest {
@@ -463,6 +482,7 @@ public class SenderTest {
sender . run ( time . milliseconds ( ) ) ;
assertEquals ( "Request completed." , 0 , client . inFlightRequestCount ( ) ) ;
assertFalse ( client . hasInFlightRequests ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ;
assertTrue ( "Request should be completed" , future . isDone ( ) ) ;
@ -479,6 +499,7 @@ public class SenderTest {
@@ -479,6 +499,7 @@ public class SenderTest {
sender . run ( time . milliseconds ( ) ) ;
assertEquals ( "Request completed." , 0 , client . inFlightRequestCount ( ) ) ;
assertFalse ( client . hasInFlightRequests ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ;
assertTrue ( "Request should be completed" , future . isDone ( ) ) ;
}
@ -520,6 +541,7 @@ public class SenderTest {
@@ -520,6 +541,7 @@ public class SenderTest {
Node node = new Node ( Integer . parseInt ( id ) , "localhost" , 0 ) ;
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertTrue ( client . hasInFlightRequests ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
assertTrue ( "Client ready status should be true" , client . isReady ( node , 0L ) ) ;
assertFalse ( future . isDone ( ) ) ;
@ -583,6 +605,7 @@ public class SenderTest {
@@ -583,6 +605,7 @@ public class SenderTest {
sender . run ( time . milliseconds ( ) ) ; // receive response 1
assertEquals ( 1 , transactionManager . lastAckedSequence ( tp0 ) ) ;
assertFalse ( client . hasInFlightRequests ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
assertTrue ( request2 . isDone ( ) ) ;
assertEquals ( 1 , request2 . get ( ) . offset ( ) ) ;
}
@ -654,11 +677,12 @@ public class SenderTest {
@@ -654,11 +677,12 @@ public class SenderTest {
assertEquals ( 0 , transactionManager . lastAckedSequence ( tp0 ) ) ;
assertTrue ( request1 . isDone ( ) ) ;
assertEquals ( 0 , request1 . get ( ) . offset ( ) ) ;
assertFalse ( client . hasInFlightRequests ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // send request 2;
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sendIdempotentProducerResponse ( 1 , tp0 , Errors . NONE , 1L ) ;
sender . run ( time . milliseconds ( ) ) ; // receive response 2
@ -667,17 +691,19 @@ public class SenderTest {
@@ -667,17 +691,19 @@ public class SenderTest {
assertEquals ( 1 , request2 . get ( ) . offset ( ) ) ;
assertFalse ( client . hasInFlightRequests ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // send request 3
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sendIdempotentProducerResponse ( 2 , tp0 , Errors . NONE , 2L ) ;
sender . run ( time . milliseconds ( ) ) ; // receive response 3, send request 4 since we are out of 'retry' mode.
assertEquals ( 2 , transactionManager . lastAckedSequence ( tp0 ) ) ;
assertTrue ( request3 . isDone ( ) ) ;
assertEquals ( 2 , request3 . get ( ) . offset ( ) ) ;
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sendIdempotentProducerResponse ( 3 , tp0 , Errors . NONE , 3L ) ;
sender . run ( time . milliseconds ( ) ) ; // receive response 4
@ -795,7 +821,6 @@ public class SenderTest {
@@ -795,7 +821,6 @@ public class SenderTest {
setupWithTransactionState ( transactionManager ) ;
prepareAndReceiveInitProducerId ( producerId , Errors . NONE ) ;
assertTrue ( transactionManager . hasProducerId ( ) ) ;
assertEquals ( 0 , transactionManager . sequenceNumber ( tp0 ) . longValue ( ) ) ;
// Send first ProduceRequest
@ -965,46 +990,54 @@ public class SenderTest {
@@ -965,46 +990,54 @@ public class SenderTest {
public void testExpiryOfFirstBatchShouldNotCauseUnresolvedSequencesIfFutureBatchesSucceed ( ) throws Exception {
final long producerId = 343434L ;
TransactionManager transactionManager = new TransactionManager ( ) ;
setupWithTransactionState ( transactionManager ) ;
setupWithTransactionState ( transactionManager , false , null ) ;
prepareAndReceiveInitProducerId ( producerId , Errors . NONE ) ;
assertTrue ( transactionManager . hasProducerId ( ) ) ;
assertEquals ( 0 , transactionManager . sequenceNumber ( tp0 ) . longValue ( ) ) ;
// Send first ProduceRequest
Future < RecordMetadata > request1 = accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
sender . run ( time . milliseconds ( ) ) ; // send request
// We separate the two appends by 1 second so that the two batches
// don't expire at the same time.
time . sleep ( 1000L ) ;
Future < RecordMetadata > request2 = accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
sender . run ( time . milliseconds ( ) ) ; // send request
assertEquals ( 2 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 2 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sendIdempotentProducerResponse ( 0 , tp0 , Errors . REQUEST_TIMED_OUT , - 1 ) ;
sender . run ( time . milliseconds ( ) ) ; // receive first response
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
Node node = this . cluster . nodes ( ) . get ( 0 ) ;
time . sleep ( 10000L ) ;
// We add 600 millis to expire the first batch but not the second.
// Note deliveryTimeoutMs is 1500.
time . sleep ( 600L ) ;
client . disconnect ( node . idString ( ) ) ;
client . blackout ( node , 10 ) ;
sender . run ( time . milliseconds ( ) ) ; // now expire the first batch.
assertFutureFailure ( request1 , TimeoutException . class ) ;
assertTrue ( transactionManager . hasUnresolvedSequence ( tp0 ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
// let's enqueue another batch, which should not be dequeued until the unresolved state is clear.
Future < RecordMetadata > request3 = accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
time . sleep ( 20 ) ;
assertFalse ( request2 . isDone ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // send second request
sendIdempotentProducerResponse ( 1 , tp0 , Errors . NONE , 1 ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // receive second response, the third request shouldn't be sent since we are in an unresolved state.
assertTrue ( request2 . isDone ( ) ) ;
assertEquals ( 1 , request2 . get ( ) . offset ( ) ) ;
Deque < ProducerBatch > batches = accumulator . batches ( ) . get ( tp0 ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
Deque < ProducerBatch > batches = accumulator . batches ( ) . get ( tp0 ) ;
assertEquals ( 1 , batches . size ( ) ) ;
assertFalse ( batches . peekFirst ( ) . hasSequence ( ) ) ;
assertFalse ( client . hasInFlightRequests ( ) ) ;
@ -1017,6 +1050,7 @@ public class SenderTest {
@@ -1017,6 +1050,7 @@ public class SenderTest {
assertEquals ( 0 , batches . size ( ) ) ;
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertFalse ( request3 . isDone ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
}
@Test
@ -1026,13 +1060,13 @@ public class SenderTest {
@@ -1026,13 +1060,13 @@ public class SenderTest {
setupWithTransactionState ( transactionManager ) ;
prepareAndReceiveInitProducerId ( producerId , Errors . NONE ) ;
assertTrue ( transactionManager . hasProducerId ( ) ) ;
assertEquals ( 0 , transactionManager . sequenceNumber ( tp0 ) . longValue ( ) ) ;
// Send first ProduceRequest
Future < RecordMetadata > request1 = accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
sender . run ( time . milliseconds ( ) ) ; // send request
time . sleep ( 1000L ) ;
Future < RecordMetadata > request2 = accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
sender . run ( time . milliseconds ( ) ) ; // send request
@ -1042,7 +1076,7 @@ public class SenderTest {
@@ -1042,7 +1076,7 @@ public class SenderTest {
sender . run ( time . milliseconds ( ) ) ; // receive first response
Node node = this . cluster . nodes ( ) . get ( 0 ) ;
time . sleep ( 10000 L ) ;
time . sleep ( 1000L ) ;
client . disconnect ( node . idString ( ) ) ;
client . blackout ( node , 10 ) ;
@ -1053,9 +1087,7 @@ public class SenderTest {
@@ -1053,9 +1087,7 @@ public class SenderTest {
Future < RecordMetadata > request3 = accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
time . sleep ( 20 ) ;
assertFalse ( request2 . isDone ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // send second request
sendIdempotentProducerResponse ( 1 , tp0 , Errors . OUT_OF_ORDER_SEQUENCE_NUMBER , 1 ) ;
sender . run ( time . milliseconds ( ) ) ; // receive second response, the third request shouldn't be sent since we are in an unresolved state.
@ -1087,12 +1119,12 @@ public class SenderTest {
@@ -1087,12 +1119,12 @@ public class SenderTest {
Future < RecordMetadata > request1 = accumulator . append ( tp0 , 0L , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
sender . run ( time . milliseconds ( ) ) ; // send request
sendIdempotentProducerResponse ( 0 , tp0 , Errors . NOT_LEADER_FOR_PARTITION , - 1 ) ;
sender . run ( time . milliseconds ( ) ) ; // receive response
sender . run ( time . milliseconds ( ) ) ; // receive response
assertEquals ( 1L , transactionManager . sequenceNumber ( tp0 ) . longValue ( ) ) ;
Node node = this . cluster . nodes ( ) . get ( 0 ) ;
time . sleep ( 10 000L ) ;
time . sleep ( 15 000L ) ;
client . disconnect ( node . idString ( ) ) ;
client . blackout ( node , 10 ) ;
@ -1520,7 +1552,6 @@ public class SenderTest {
@@ -1520,7 +1552,6 @@ public class SenderTest {
RecordBatch firstBatch = batchIterator . next ( ) ;
assertFalse ( batchIterator . hasNext ( ) ) ;
assertEquals ( expectedSequence , firstBatch . baseSequence ( ) ) ;
return true ;
}
} , produceResponse ( tp , responseOffset , responseError , 0 , logStartOffset ) ) ;
@ -1754,11 +1785,13 @@ public class SenderTest {
@@ -1754,11 +1785,13 @@ public class SenderTest {
sender . run ( time . milliseconds ( ) ) ; // send.
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
client . respond ( produceResponse ( tp0 , 0 , Errors . OUT_OF_ORDER_SEQUENCE_NUMBER , 0 ) ) ;
sender . run ( time . milliseconds ( ) ) ;
assertTrue ( responseFuture . isDone ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
assertFalse ( "Expected transaction state to be reset upon receiving an OutOfOrderSequenceException" , transactionManager . hasProducerId ( ) ) ;
}
@ -1794,11 +1827,15 @@ public class SenderTest {
@@ -1794,11 +1827,15 @@ public class SenderTest {
TopicPartition tp ) throws Exception {
int maxRetries = 1 ;
String topic = tp . topic ( ) ;
long deliveryTimeoutMs = 3000L ;
long totalSize = 1024 * 1024 ;
String metricGrpName = "producer-metrics" ;
// Set a good compression ratio.
CompressionRatioEstimator . setEstimation ( topic , CompressionType . GZIP , 0 . 2f ) ;
try ( Metrics m = new Metrics ( ) ) {
accumulator = new RecordAccumulator ( logContext , batchSize , 1024 * 1024 , CompressionType . GZIP , 0L , 0L , m , time ,
new ApiVersions ( ) , txnManager ) ;
accumulator = new RecordAccumulator ( logContext , batchSize , CompressionType . GZIP ,
0L , 0L , deliveryTimeoutMs , m , metricGrpName , time , new ApiVersions ( ) , txnManager ,
new BufferPool ( totalSize , batchSize , metrics , time , "producer-internal-metrics" ) ) ;
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry ( m ) ;
Sender sender = new Sender ( logContext , client , metadata , this . accumulator , true , MAX_REQUEST_SIZE , ACKS_ALL , maxRetries ,
senderMetrics , time , REQUEST_TIMEOUT , 1000L , txnManager , new ApiVersions ( ) ) ;
@ -1865,9 +1902,153 @@ public class SenderTest {
@@ -1865,9 +1902,153 @@ public class SenderTest {
assertEquals ( "The last ack'd sequence number should be 1" , 1 , txnManager . lastAckedSequence ( tp ) ) ;
assertEquals ( "Offset of the first message should be 1" , 1L , f2 . get ( ) . offset ( ) ) ;
assertTrue ( "There should be no batch in the accumulator" , accumulator . batches ( ) . get ( tp ) . isEmpty ( ) ) ;
assertTrue ( "There should be a split" , m . metrics ( ) . get ( senderMetrics . batchSplitRate ) . value ( ) > 0 ) ;
}
}
@Test
public void testNoDoubleDeallocation ( ) throws Exception {
long deliverTimeoutMs = 1500L ;
long totalSize = 1024 * 1024 ;
String metricGrpName = "producer-custom-metrics" ;
MatchingBufferPool pool = new MatchingBufferPool ( totalSize , batchSize , metrics , time , metricGrpName ) ;
setupWithTransactionState ( null , false , pool ) ;
assertTrue ( "There should be a split" ,
m . metrics ( ) . get ( senderMetrics . batchSplitRate ) . value ( ) > 0 ) ;
// Send first ProduceRequest
Future < RecordMetadata > request1 =
accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
sender . run ( time . milliseconds ( ) ) ; // send request
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
time . sleep ( deliverTimeoutMs ) ;
assertFalse ( pool . allMatch ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // expire the batch
assertTrue ( request1 . isDone ( ) ) ;
assertTrue ( "The batch should have been de-allocated" , pool . allMatch ( ) ) ;
assertTrue ( pool . allMatch ( ) ) ;
sender . run ( time . milliseconds ( ) ) ;
assertTrue ( "The batch should have been de-allocated" , pool . allMatch ( ) ) ;
assertEquals ( 0 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
}
@Test
public void testInflightBatchesExpireOnDeliveryTimeout ( ) throws InterruptedException {
long deliveryTimeoutMs = 1500L ;
setupWithTransactionState ( null , true , null ) ;
// Send first ProduceRequest
Future < RecordMetadata > request = accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) . future ;
sender . run ( time . milliseconds ( ) ) ; // send request
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( "Expect one in-flight batch in accumulator" , 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
Map < TopicPartition , ProduceResponse . PartitionResponse > responseMap = new HashMap < > ( ) ;
responseMap . put ( tp0 , new ProduceResponse . PartitionResponse ( Errors . NONE , 0L , 0L , 0L ) ) ;
client . respond ( new ProduceResponse ( responseMap ) ) ;
time . sleep ( deliveryTimeoutMs ) ;
sender . run ( time . milliseconds ( ) ) ; // receive first response
assertEquals ( "Expect zero in-flight batch in accumulator" , 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
try {
request . get ( ) ;
fail ( "The expired batch should throw a TimeoutException" ) ;
} catch ( ExecutionException e ) {
assertTrue ( e . getCause ( ) instanceof TimeoutException ) ;
}
}
@Test
public void testWhenFirstBatchExpireNoSendSecondBatchIfGuaranteeOrder ( ) throws InterruptedException {
long deliveryTimeoutMs = 1500L ;
setupWithTransactionState ( null , true , null ) ;
// Send first ProduceRequest
accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) ;
sender . run ( time . milliseconds ( ) ) ; // send request
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
time . sleep ( deliveryTimeoutMs / 2 ) ;
// Send second ProduceRequest
accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null , MAX_BLOCK_TIMEOUT ) ;
sender . run ( time . milliseconds ( ) ) ; // must not send request because the partition is muted
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
time . sleep ( deliveryTimeoutMs / 2 ) ; // expire the first batch only
client . respond ( produceResponse ( tp0 , 0L , Errors . NONE , 0 , 0L ) ) ;
sender . run ( time . milliseconds ( ) ) ; // receive response (offset=0)
assertEquals ( 0 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // Drain the second request only this time
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 1 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
}
@Test
public void testExpiredBatchDoesNotRetry ( ) throws Exception {
long deliverTimeoutMs = 1500L ;
setupWithTransactionState ( null , false , null ) ;
// Send first ProduceRequest
Future < RecordMetadata > request1 =
accumulator . append ( tp0 , time . milliseconds ( ) , "key" . getBytes ( ) , "value" . getBytes ( ) , null , null ,
MAX_BLOCK_TIMEOUT ) . future ;
sender . run ( time . milliseconds ( ) ) ; // send request
assertEquals ( 1 , client . inFlightRequestCount ( ) ) ;
time . sleep ( deliverTimeoutMs ) ;
Map < TopicPartition , ProduceResponse . PartitionResponse > responseMap = new HashMap < > ( ) ;
responseMap . put ( tp0 , new ProduceResponse . PartitionResponse ( Errors . NONE , 0L , 0L , 0L ) ) ;
client . respond ( produceResponse ( tp0 , - 1 , Errors . NOT_LEADER_FOR_PARTITION , - 1 ) ) ; // return a retriable error
sender . run ( time . milliseconds ( ) ) ; // expire the batch
assertTrue ( request1 . isDone ( ) ) ;
assertEquals ( 0 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // receive first response and do not reenqueue.
assertEquals ( 0 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
sender . run ( time . milliseconds ( ) ) ; // run again and must not send anything.
assertEquals ( 0 , client . inFlightRequestCount ( ) ) ;
assertEquals ( 0 , sender . inFlightBatches ( tp0 ) . size ( ) ) ;
}
private class MatchingBufferPool extends BufferPool {
IdentityHashMap < ByteBuffer , Boolean > allocatedBuffers ;
MatchingBufferPool ( long totalSize , int batchSize , Metrics metrics , Time time , String metricGrpName ) {
super ( totalSize , batchSize , metrics , time , metricGrpName ) ;
allocatedBuffers = new IdentityHashMap < > ( ) ;
}
@Override
public ByteBuffer allocate ( int size , long maxTimeToBlockMs ) throws InterruptedException {
ByteBuffer buffer = super . allocate ( size , maxTimeToBlockMs ) ;
allocatedBuffers . put ( buffer , Boolean . TRUE ) ;
return buffer ;
}
@Override
public void deallocate ( ByteBuffer buffer , int size ) {
if ( ! allocatedBuffers . containsKey ( buffer ) ) {
throw new IllegalStateException ( "Deallocating a buffer that is not allocated" ) ;
}
allocatedBuffers . remove ( buffer ) ;
super . deallocate ( buffer , size ) ;
}
public boolean allMatch ( ) {
return allocatedBuffers . isEmpty ( ) ;
}
}
@ -1931,17 +2112,29 @@ public class SenderTest {
@@ -1931,17 +2112,29 @@ public class SenderTest {
}
private void setupWithTransactionState ( TransactionManager transactionManager ) {
setupWithTransactionState ( transactionManager , false , null ) ;
}
private void setupWithTransactionState ( TransactionManager transactionManager , boolean guaranteeOrder , BufferPool customPool ) {
long totalSize = 1024 * 1024 ;
String metricGrpName = "producer-metrics" ;
Map < String , String > metricTags = new LinkedHashMap < > ( ) ;
metricTags . put ( "client-id" , CLIENT_ID ) ;
MetricConfig metricConfig = new MetricConfig ( ) . tags ( metricTags ) ;
this . metrics = new Metrics ( metricConfig , time ) ;
this . accumulator = new RecordAccumulator ( logContext , batchSize , 1024 * 1024 , CompressionType . NONE , 0L , 0L , metrics , time ,
apiVersions , transactionManager ) ;
this . senderMetricsRegistry = new SenderMetricsRegistry ( this . metrics ) ;
BufferPool pool = ( customPool = = null ) ? new BufferPool ( totalSize , batchSize , metrics , time , metricGrpName ) : customPool ;
setupWithTransactionState ( transactionManager , guaranteeOrder , metricTags , pool ) ;
}
this . sender = new Sender ( logContext , this . client , this . metadata , this . accumulator , false , MAX_REQUEST_SIZE , ACKS_ALL ,
Integer . MAX_VALUE , this . senderMetricsRegistry , this . time , REQUEST_TIMEOUT , 50 , transactionManager , apiVersions ) ;
this . metadata . update ( this . cluster , Collections . < String > emptySet ( ) , time . milliseconds ( ) ) ;
private void setupWithTransactionState ( TransactionManager transactionManager , boolean guaranteeOrder , Map < String , String > metricTags , BufferPool pool ) {
long deliveryTimeoutMs = 1500L ;
String metricGrpName = "producer-metrics" ;
this . accumulator = new RecordAccumulator ( logContext , batchSize , CompressionType . NONE , 0L , 0L ,
deliveryTimeoutMs , metrics , metricGrpName , time , apiVersions , transactionManager , pool ) ;
this . senderMetricsRegistry = new SenderMetricsRegistry ( this . metrics ) ;
this . sender = new Sender ( logContext , this . client , this . metadata , this . accumulator , guaranteeOrder , MAX_REQUEST_SIZE , ACKS_ALL ,
Integer . MAX_VALUE , this . senderMetricsRegistry , this . time , REQUEST_TIMEOUT , 50 , transactionManager , apiVersions ) ;
this . metadata . update ( this . cluster , Collections . emptySet ( ) , time . milliseconds ( ) ) ;
}
private void assertSendFailure ( Class < ? extends RuntimeException > expectedError ) throws Exception {