@ -74,7 +74,6 @@ public final class RecordAccumulator {
@@ -74,7 +74,6 @@ public final class RecordAccumulator {
private final Set < TopicPartition > muted ;
private int drainIndex ;
/ * *
* Create a new record accumulator
*
@ -104,11 +103,11 @@ public final class RecordAccumulator {
@@ -104,11 +103,11 @@ public final class RecordAccumulator {
this . compression = compression ;
this . lingerMs = lingerMs ;
this . retryBackoffMs = retryBackoffMs ;
this . batches = new CopyOnWriteMap < TopicPartition , Deque < RecordBatch > > ( ) ;
this . batches = new CopyOnWriteMap < > ( ) ;
String metricGrpName = "producer-metrics" ;
this . free = new BufferPool ( totalSize , batchSize , metrics , time , metricGrpName ) ;
this . incomplete = new IncompleteRecordBatches ( ) ;
this . muted = new HashSet < TopicPartition > ( ) ;
this . muted = new HashSet < > ( ) ;
this . time = time ;
registerMetrics ( metrics , metricGrpName ) ;
}
@ -171,12 +170,9 @@ public final class RecordAccumulator {
@@ -171,12 +170,9 @@ public final class RecordAccumulator {
synchronized ( dq ) {
if ( closed )
throw new IllegalStateException ( "Cannot send after the producer is closed." ) ;
RecordBatch last = dq . peekLast ( ) ;
if ( last ! = null ) {
FutureRecordMetadata future = last . tryAppend ( timestamp , key , value , callback , time . milliseconds ( ) ) ;
if ( future ! = null )
return new RecordAppendResult ( future , dq . size ( ) > 1 | | last . records . isFull ( ) , false ) ;
}
RecordAppendResult appendResult = tryAppend ( timestamp , key , value , callback , dq ) ;
if ( appendResult ! = null )
return appendResult ;
}
// we don't have an in-progress record batch try to allocate a new batch
@ -187,14 +183,12 @@ public final class RecordAccumulator {
@@ -187,14 +183,12 @@ public final class RecordAccumulator {
// Need to check if producer is closed again after grabbing the dequeue lock.
if ( closed )
throw new IllegalStateException ( "Cannot send after the producer is closed." ) ;
RecordBatch last = dq . peekLast ( ) ;
if ( last ! = null ) {
FutureRecordMetadata future = last . tryAppend ( timestamp , key , value , callback , time . milliseconds ( ) ) ;
if ( future ! = null ) {
// Somebody else found us a batch, return the one we waited for! Hopefully this doesn't happen often...
free . deallocate ( buffer ) ;
return new RecordAppendResult ( future , dq . size ( ) > 1 | | last . records . isFull ( ) , false ) ;
}
RecordAppendResult appendResult = tryAppend ( timestamp , key , value , callback , dq ) ;
if ( appendResult ! = null ) {
// Somebody else found us a batch, return the one we waited for! Hopefully this doesn't happen often...
free . deallocate ( buffer ) ;
return appendResult ;
}
MemoryRecords records = MemoryRecords . emptyRecords ( buffer , compression , this . batchSize ) ;
RecordBatch batch = new RecordBatch ( tp , records , time . milliseconds ( ) ) ;
@ -209,12 +203,28 @@ public final class RecordAccumulator {
@@ -209,12 +203,28 @@ public final class RecordAccumulator {
}
}
/ * *
* If ` RecordBatch . tryAppend ` fails ( i . e . the record batch is full ) , close its memory records to release temporary
* resources ( like compression streams buffers ) .
* /
private RecordAppendResult tryAppend ( long timestamp , byte [ ] key , byte [ ] value , Callback callback , Deque < RecordBatch > deque ) {
RecordBatch last = deque . peekLast ( ) ;
if ( last ! = null ) {
FutureRecordMetadata future = last . tryAppend ( timestamp , key , value , callback , time . milliseconds ( ) ) ;
if ( future = = null )
last . records . close ( ) ;
else
return new RecordAppendResult ( future , deque . size ( ) > 1 | | last . records . isFull ( ) , false ) ;
}
return null ;
}
/ * *
* Abort the batches that have been sitting in RecordAccumulator for more than the configured requestTimeout
* due to metadata being unavailable
* /
public List < RecordBatch > abortExpiredBatches ( int requestTimeout , long now ) {
List < RecordBatch > expiredBatches = new ArrayList < RecordBatch > ( ) ;
List < RecordBatch > expiredBatches = new ArrayList < > ( ) ;
int count = 0 ;
for ( Map . Entry < TopicPartition , Deque < RecordBatch > > entry : this . batches . entrySet ( ) ) {
Deque < RecordBatch > dq = entry . getValue ( ) ;
@ -245,7 +255,7 @@ public final class RecordAccumulator {
@@ -245,7 +255,7 @@ public final class RecordAccumulator {
}
}
}
if ( expiredBatches . size ( ) > 0 )
if ( ! expiredBatches . isEmpty ( ) )
log . trace ( "Expired {} batches in accumulator" , count ) ;
return expiredBatches ;
@ -287,7 +297,7 @@ public final class RecordAccumulator {
@@ -287,7 +297,7 @@ public final class RecordAccumulator {
* < / ol >
* /
public ReadyCheckResult ready ( Cluster cluster , long nowMs ) {
Set < Node > readyNodes = new HashSet < Node > ( ) ;
Set < Node > readyNodes = new HashSet < > ( ) ;
long nextReadyCheckDelayMs = Long . MAX_VALUE ;
boolean unknownLeadersExist = false ;
@ -333,7 +343,7 @@ public final class RecordAccumulator {
@@ -333,7 +343,7 @@ public final class RecordAccumulator {
for ( Map . Entry < TopicPartition , Deque < RecordBatch > > entry : this . batches . entrySet ( ) ) {
Deque < RecordBatch > deque = entry . getValue ( ) ;
synchronized ( deque ) {
if ( deque . size ( ) > 0 )
if ( ! deque . isEmpty ( ) )
return true ;
}
}
@ -357,11 +367,11 @@ public final class RecordAccumulator {
@@ -357,11 +367,11 @@ public final class RecordAccumulator {
if ( nodes . isEmpty ( ) )
return Collections . emptyMap ( ) ;
Map < Integer , List < RecordBatch > > batches = new HashMap < Integer , List < RecordBatch > > ( ) ;
Map < Integer , List < RecordBatch > > batches = new HashMap < > ( ) ;
for ( Node node : nodes ) {
int size = 0 ;
List < PartitionInfo > parts = cluster . partitionsForNode ( node . id ( ) ) ;
List < RecordBatch > ready = new ArrayList < RecordBatch > ( ) ;
List < RecordBatch > ready = new ArrayList < > ( ) ;
/* to make starvation less likely this loop doesn't start at 0 */
int start = drainIndex = drainIndex % parts . size ( ) ;
do {
@ -436,6 +446,11 @@ public final class RecordAccumulator {
@@ -436,6 +446,11 @@ public final class RecordAccumulator {
boolean flushInProgress ( ) {
return flushesInProgress . get ( ) > 0 ;
}
/* Visible for testing */
Map < TopicPartition , Deque < RecordBatch > > batches ( ) {
return Collections . unmodifiableMap ( batches ) ;
}
/ * *
* Initiate the flushing of data from the accumulator . . . this makes all requests immediately ready
@ -569,7 +584,7 @@ public final class RecordAccumulator {
@@ -569,7 +584,7 @@ public final class RecordAccumulator {
public Iterable < RecordBatch > all ( ) {
synchronized ( incomplete ) {
return new ArrayList < RecordBatch > ( this . incomplete ) ;
return new ArrayList < > ( this . incomplete ) ;
}
}
}