Browse Source
This continues the work of providing the groundwork for the fetch refactoring work by introducing some new classes and refactoring the existing code to use the new classes where applicable. Changes: * Minor clean up of the events classes to make data immutable, private, and implement toString(). * Added IdempotentCloser which prevents a resource from being closed more than once. It's general enough that it could be used elsewhere in the project, but it's limited to the consumer internals for now. * Split core Fetcher code into classes to buffer raw results (FetchBuffer) and to collect raw results into ConsumerRecords (FetchCollector). These can be tested and changed in isolation from the core fetcher logic. * Added NodeStatusDetector which abstracts methods from ConsumerNetworkClient so that it and NetworkClientDelegate can be used in AbstractFetch via the interface instead of using ConsumerNetworkClient directly. Reviewers: Jun Rao <junrao@gmail.com>pull/13993/merge
Kirk True
1 year ago
committed by
GitHub
36 changed files with 2387 additions and 692 deletions
@ -0,0 +1,149 @@
@@ -0,0 +1,149 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.clients.consumer.internals; |
||||
|
||||
import org.apache.kafka.common.TopicPartition; |
||||
import org.apache.kafka.common.internals.IdempotentCloser; |
||||
import org.apache.kafka.common.utils.LogContext; |
||||
import org.slf4j.Logger; |
||||
|
||||
import java.io.Closeable; |
||||
import java.util.Collection; |
||||
import java.util.HashSet; |
||||
import java.util.Set; |
||||
import java.util.concurrent.ConcurrentLinkedQueue; |
||||
import java.util.function.Predicate; |
||||
|
||||
/** |
||||
* {@code FetchBuffer} buffers up {@link CompletedFetch the results} from the broker responses as they are received. |
||||
* It is essentially a wrapper around a {@link java.util.Queue} of {@link CompletedFetch}. There is at most one |
||||
* {@link CompletedFetch} per partition in the queue. |
||||
* |
||||
* <p/> |
||||
* |
||||
* <em>Note</em>: this class is not thread-safe and is intended to only be used from a single thread. |
||||
*/ |
||||
public class FetchBuffer implements Closeable { |
||||
|
||||
private final Logger log; |
||||
private final ConcurrentLinkedQueue<CompletedFetch> completedFetches; |
||||
private final IdempotentCloser idempotentCloser = new IdempotentCloser(); |
||||
|
||||
private CompletedFetch nextInLineFetch; |
||||
|
||||
public FetchBuffer(final LogContext logContext) { |
||||
this.log = logContext.logger(FetchBuffer.class); |
||||
this.completedFetches = new ConcurrentLinkedQueue<>(); |
||||
} |
||||
|
||||
/** |
||||
* Returns {@code true} if there are no completed fetches pending to return to the user. |
||||
* |
||||
* @return {@code true} if the buffer is empty, {@code false} otherwise |
||||
*/ |
||||
boolean isEmpty() { |
||||
return completedFetches.isEmpty(); |
||||
} |
||||
|
||||
/** |
||||
* Return whether we have any completed fetches pending return to the user. This method is thread-safe. Has |
||||
* visibility for testing. |
||||
* |
||||
* @return {@code true} if there are completed fetches that match the {@link Predicate}, {@code false} otherwise |
||||
*/ |
||||
boolean hasCompletedFetches(Predicate<CompletedFetch> predicate) { |
||||
return completedFetches.stream().anyMatch(predicate); |
||||
} |
||||
|
||||
void add(CompletedFetch completedFetch) { |
||||
completedFetches.add(completedFetch); |
||||
} |
||||
|
||||
void addAll(Collection<CompletedFetch> completedFetches) { |
||||
this.completedFetches.addAll(completedFetches); |
||||
} |
||||
|
||||
CompletedFetch nextInLineFetch() { |
||||
return nextInLineFetch; |
||||
} |
||||
|
||||
void setNextInLineFetch(CompletedFetch completedFetch) { |
||||
this.nextInLineFetch = completedFetch; |
||||
} |
||||
|
||||
CompletedFetch peek() { |
||||
return completedFetches.peek(); |
||||
} |
||||
|
||||
CompletedFetch poll() { |
||||
return completedFetches.poll(); |
||||
} |
||||
|
||||
/** |
||||
* Updates the buffer to retain only the fetch data that corresponds to the given partitions. Any previously |
||||
* {@link CompletedFetch fetched data} is removed if its partition is not in the given set of partitions. |
||||
* |
||||
* @param partitions {@link Set} of {@link TopicPartition}s for which any buffered data should be kept |
||||
*/ |
||||
void retainAll(final Set<TopicPartition> partitions) { |
||||
completedFetches.removeIf(cf -> maybeDrain(partitions, cf)); |
||||
|
||||
if (maybeDrain(partitions, nextInLineFetch)) |
||||
nextInLineFetch = null; |
||||
} |
||||
|
||||
private boolean maybeDrain(final Set<TopicPartition> partitions, final CompletedFetch completedFetch) { |
||||
if (completedFetch != null && !partitions.contains(completedFetch.partition)) { |
||||
log.debug("Removing {} from buffered fetch data as it is not in the set of partitions to retain ({})", completedFetch.partition, partitions); |
||||
completedFetch.drain(); |
||||
return true; |
||||
} else { |
||||
return false; |
||||
} |
||||
} |
||||
|
||||
/** |
||||
* Return the set of {@link TopicPartition partitions} for which we have data in the buffer. |
||||
* |
||||
* @return {@link TopicPartition Partition} set |
||||
*/ |
||||
Set<TopicPartition> bufferedPartitions() { |
||||
final Set<TopicPartition> partitions = new HashSet<>(); |
||||
|
||||
if (nextInLineFetch != null && !nextInLineFetch.isConsumed()) { |
||||
partitions.add(nextInLineFetch.partition); |
||||
} |
||||
|
||||
completedFetches.forEach(cf -> partitions.add(cf.partition)); |
||||
return partitions; |
||||
} |
||||
|
||||
@Override |
||||
public void close() { |
||||
idempotentCloser.close(() -> { |
||||
log.debug("Closing the fetch buffer"); |
||||
|
||||
if (nextInLineFetch != null) { |
||||
nextInLineFetch.drain(); |
||||
nextInLineFetch = null; |
||||
} |
||||
|
||||
completedFetches.forEach(CompletedFetch::drain); |
||||
completedFetches.clear(); |
||||
}, () -> log.warn("The fetch buffer was previously closed")); |
||||
} |
||||
} |
@ -0,0 +1,372 @@
@@ -0,0 +1,372 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.clients.consumer.internals; |
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig; |
||||
import org.apache.kafka.clients.consumer.ConsumerRecord; |
||||
import org.apache.kafka.clients.consumer.OffsetOutOfRangeException; |
||||
import org.apache.kafka.common.KafkaException; |
||||
import org.apache.kafka.common.TopicPartition; |
||||
import org.apache.kafka.common.errors.RecordTooLargeException; |
||||
import org.apache.kafka.common.errors.TopicAuthorizationException; |
||||
import org.apache.kafka.common.message.FetchResponseData; |
||||
import org.apache.kafka.common.protocol.Errors; |
||||
import org.apache.kafka.common.record.RecordBatch; |
||||
import org.apache.kafka.common.requests.FetchResponse; |
||||
import org.apache.kafka.common.utils.LogContext; |
||||
import org.apache.kafka.common.utils.Time; |
||||
import org.slf4j.Logger; |
||||
|
||||
import java.util.ArrayDeque; |
||||
import java.util.Collections; |
||||
import java.util.Iterator; |
||||
import java.util.List; |
||||
import java.util.Map; |
||||
import java.util.Optional; |
||||
import java.util.Queue; |
||||
|
||||
import static org.apache.kafka.clients.consumer.internals.FetchUtils.requestMetadataUpdate; |
||||
|
||||
/** |
||||
* {@code FetchCollector} operates at the {@link RecordBatch} level, as that is what is stored in the |
||||
* {@link FetchBuffer}. Each {@link org.apache.kafka.common.record.Record} in the {@link RecordBatch} is converted |
||||
* to a {@link ConsumerRecord} and added to the returned {@link Fetch}. |
||||
* |
||||
* @param <K> Record key type |
||||
* @param <V> Record value type |
||||
*/ |
||||
public class FetchCollector<K, V> { |
||||
|
||||
private final Logger log; |
||||
private final ConsumerMetadata metadata; |
||||
private final SubscriptionState subscriptions; |
||||
private final FetchConfig<K, V> fetchConfig; |
||||
private final FetchMetricsManager metricsManager; |
||||
private final Time time; |
||||
|
||||
public FetchCollector(final LogContext logContext, |
||||
final ConsumerMetadata metadata, |
||||
final SubscriptionState subscriptions, |
||||
final FetchConfig<K, V> fetchConfig, |
||||
final FetchMetricsManager metricsManager, |
||||
final Time time) { |
||||
this.log = logContext.logger(FetchCollector.class); |
||||
this.metadata = metadata; |
||||
this.subscriptions = subscriptions; |
||||
this.fetchConfig = fetchConfig; |
||||
this.metricsManager = metricsManager; |
||||
this.time = time; |
||||
} |
||||
|
||||
/** |
||||
* Return the fetched {@link ConsumerRecord records}, empty the {@link FetchBuffer record buffer}, and |
||||
* update the consumed position. |
||||
* |
||||
* </p> |
||||
* |
||||
* NOTE: returning an {@link Fetch#empty() empty} fetch guarantees the consumed position is not updated. |
||||
* |
||||
* @param fetchBuffer {@link FetchBuffer} from which to retrieve the {@link ConsumerRecord records} |
||||
* |
||||
* @return A {@link Fetch} for the requested partitions |
||||
* @throws OffsetOutOfRangeException If there is OffsetOutOfRange error in fetchResponse and |
||||
* the defaultResetPolicy is NONE |
||||
* @throws TopicAuthorizationException If there is TopicAuthorization error in fetchResponse. |
||||
*/ |
||||
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) { |
||||
final Fetch<K, V> fetch = Fetch.empty(); |
||||
final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>(); |
||||
int recordsRemaining = fetchConfig.maxPollRecords; |
||||
|
||||
try { |
||||
while (recordsRemaining > 0) { |
||||
final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch(); |
||||
|
||||
if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { |
||||
final CompletedFetch completedFetch = fetchBuffer.peek(); |
||||
|
||||
if (completedFetch == null) |
||||
break; |
||||
|
||||
if (!completedFetch.isInitialized()) { |
||||
try { |
||||
fetchBuffer.setNextInLineFetch(initialize(completedFetch)); |
||||
} catch (Exception e) { |
||||
// Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and
|
||||
// (2) there are no fetched completedFetch with actual content preceding this exception.
|
||||
// The first condition ensures that the completedFetches is not stuck with the same completedFetch
|
||||
// in cases such as the TopicAuthorizationException, and the second condition ensures that no
|
||||
// potential data loss due to an exception in a following record.
|
||||
if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0) |
||||
fetchBuffer.poll(); |
||||
|
||||
throw e; |
||||
} |
||||
} else { |
||||
fetchBuffer.setNextInLineFetch(completedFetch); |
||||
} |
||||
|
||||
fetchBuffer.poll(); |
||||
} else if (subscriptions.isPaused(nextInLineFetch.partition)) { |
||||
// when the partition is paused we add the records back to the completedFetches queue instead of draining
|
||||
// them so that they can be returned on a subsequent poll if the partition is resumed at that time
|
||||
log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition); |
||||
pausedCompletedFetches.add(nextInLineFetch); |
||||
fetchBuffer.setNextInLineFetch(null); |
||||
} else { |
||||
final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch); |
||||
recordsRemaining -= nextFetch.numRecords(); |
||||
fetch.add(nextFetch); |
||||
} |
||||
} |
||||
} catch (KafkaException e) { |
||||
if (fetch.isEmpty()) |
||||
throw e; |
||||
} finally { |
||||
// add any polled completed fetches for paused partitions back to the completed fetches queue to be
|
||||
// re-evaluated in the next poll
|
||||
fetchBuffer.addAll(pausedCompletedFetches); |
||||
} |
||||
|
||||
return fetch; |
||||
} |
||||
|
||||
private Fetch<K, V> fetchRecords(final CompletedFetch nextInLineFetch) { |
||||
final TopicPartition tp = nextInLineFetch.partition; |
||||
|
||||
if (!subscriptions.isAssigned(tp)) { |
||||
// this can happen when a rebalance happened before fetched records are returned to the consumer's poll call
|
||||
log.debug("Not returning fetched records for partition {} since it is no longer assigned", tp); |
||||
} else if (!subscriptions.isFetchable(tp)) { |
||||
// this can happen when a partition is paused before fetched records are returned to the consumer's
|
||||
// poll call or if the offset is being reset
|
||||
log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", tp); |
||||
} else { |
||||
SubscriptionState.FetchPosition position = subscriptions.position(tp); |
||||
|
||||
if (position == null) |
||||
throw new IllegalStateException("Missing position for fetchable partition " + tp); |
||||
|
||||
if (nextInLineFetch.nextFetchOffset() == position.offset) { |
||||
List<ConsumerRecord<K, V>> partRecords = nextInLineFetch.fetchRecords(fetchConfig, fetchConfig.maxPollRecords); |
||||
|
||||
log.trace("Returning {} fetched records at offset {} for assigned partition {}", |
||||
partRecords.size(), position, tp); |
||||
|
||||
boolean positionAdvanced = false; |
||||
|
||||
if (nextInLineFetch.nextFetchOffset() > position.offset) { |
||||
SubscriptionState.FetchPosition nextPosition = new SubscriptionState.FetchPosition( |
||||
nextInLineFetch.nextFetchOffset(), |
||||
nextInLineFetch.lastEpoch(), |
||||
position.currentLeader); |
||||
log.trace("Updating fetch position from {} to {} for partition {} and returning {} records from `poll()`", |
||||
position, nextPosition, tp, partRecords.size()); |
||||
subscriptions.position(tp, nextPosition); |
||||
positionAdvanced = true; |
||||
} |
||||
|
||||
Long partitionLag = subscriptions.partitionLag(tp, fetchConfig.isolationLevel); |
||||
if (partitionLag != null) |
||||
metricsManager.recordPartitionLag(tp, partitionLag); |
||||
|
||||
Long lead = subscriptions.partitionLead(tp); |
||||
if (lead != null) { |
||||
metricsManager.recordPartitionLead(tp, lead); |
||||
} |
||||
|
||||
return Fetch.forPartition(tp, partRecords, positionAdvanced); |
||||
} else { |
||||
// these records aren't next in line based on the last consumed position, ignore them
|
||||
// they must be from an obsolete request
|
||||
log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", |
||||
tp, nextInLineFetch.nextFetchOffset(), position); |
||||
} |
||||
} |
||||
|
||||
log.trace("Draining fetched records for partition {}", tp); |
||||
nextInLineFetch.drain(); |
||||
|
||||
return Fetch.empty(); |
||||
} |
||||
|
||||
/** |
||||
* Initialize a CompletedFetch object. |
||||
*/ |
||||
protected CompletedFetch initialize(final CompletedFetch completedFetch) { |
||||
final TopicPartition tp = completedFetch.partition; |
||||
final Errors error = Errors.forCode(completedFetch.partitionData.errorCode()); |
||||
boolean recordMetrics = true; |
||||
|
||||
try { |
||||
if (!subscriptions.hasValidPosition(tp)) { |
||||
// this can happen when a rebalance happened while fetch is still in-flight
|
||||
log.debug("Ignoring fetched records for partition {} since it no longer has valid position", tp); |
||||
return null; |
||||
} else if (error == Errors.NONE) { |
||||
final CompletedFetch ret = handleInitializeSuccess(completedFetch); |
||||
recordMetrics = ret == null; |
||||
return ret; |
||||
} else { |
||||
handleInitializeErrors(completedFetch, error); |
||||
return null; |
||||
} |
||||
} finally { |
||||
if (recordMetrics) { |
||||
completedFetch.recordAggregatedMetrics(0, 0); |
||||
} |
||||
|
||||
if (error != Errors.NONE) |
||||
// we move the partition to the end if there was an error. This way, it's more likely that partitions for
|
||||
// the same topic can remain together (allowing for more efficient serialization).
|
||||
subscriptions.movePartitionToEnd(tp); |
||||
} |
||||
} |
||||
|
||||
private CompletedFetch handleInitializeSuccess(final CompletedFetch completedFetch) { |
||||
final TopicPartition tp = completedFetch.partition; |
||||
final long fetchOffset = completedFetch.nextFetchOffset(); |
||||
|
||||
// we are interested in this fetch only if the beginning offset matches the
|
||||
// current consumed position
|
||||
SubscriptionState.FetchPosition position = subscriptions.position(tp); |
||||
if (position == null || position.offset != fetchOffset) { |
||||
log.debug("Discarding stale fetch response for partition {} since its offset {} does not match " + |
||||
"the expected offset {}", tp, fetchOffset, position); |
||||
return null; |
||||
} |
||||
|
||||
final FetchResponseData.PartitionData partition = completedFetch.partitionData; |
||||
log.trace("Preparing to read {} bytes of data for partition {} with offset {}", |
||||
FetchResponse.recordsSize(partition), tp, position); |
||||
Iterator<? extends RecordBatch> batches = FetchResponse.recordsOrFail(partition).batches().iterator(); |
||||
|
||||
if (!batches.hasNext() && FetchResponse.recordsSize(partition) > 0) { |
||||
if (completedFetch.requestVersion < 3) { |
||||
// Implement the pre KIP-74 behavior of throwing a RecordTooLargeException.
|
||||
Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset); |
||||
throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + |
||||
recordTooLargePartitions + " whose size is larger than the fetch size " + fetchConfig.fetchSize + |
||||
" and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + |
||||
"newer to avoid this issue. Alternately, increase the fetch size on the client (using " + |
||||
ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", |
||||
recordTooLargePartitions); |
||||
} else { |
||||
// This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74)
|
||||
throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + |
||||
fetchOffset + ". Received a non-empty fetch response from the server, but no " + |
||||
"complete records were found."); |
||||
} |
||||
} |
||||
|
||||
if (partition.highWatermark() >= 0) { |
||||
log.trace("Updating high watermark for partition {} to {}", tp, partition.highWatermark()); |
||||
subscriptions.updateHighWatermark(tp, partition.highWatermark()); |
||||
} |
||||
|
||||
if (partition.logStartOffset() >= 0) { |
||||
log.trace("Updating log start offset for partition {} to {}", tp, partition.logStartOffset()); |
||||
subscriptions.updateLogStartOffset(tp, partition.logStartOffset()); |
||||
} |
||||
|
||||
if (partition.lastStableOffset() >= 0) { |
||||
log.trace("Updating last stable offset for partition {} to {}", tp, partition.lastStableOffset()); |
||||
subscriptions.updateLastStableOffset(tp, partition.lastStableOffset()); |
||||
} |
||||
|
||||
if (FetchResponse.isPreferredReplica(partition)) { |
||||
subscriptions.updatePreferredReadReplica(completedFetch.partition, partition.preferredReadReplica(), () -> { |
||||
long expireTimeMs = time.milliseconds() + metadata.metadataExpireMs(); |
||||
log.debug("Updating preferred read replica for partition {} to {}, set to expire at {}", |
||||
tp, partition.preferredReadReplica(), expireTimeMs); |
||||
return expireTimeMs; |
||||
}); |
||||
} |
||||
|
||||
completedFetch.setInitialized(); |
||||
return completedFetch; |
||||
} |
||||
|
||||
private void handleInitializeErrors(final CompletedFetch completedFetch, final Errors error) { |
||||
final TopicPartition tp = completedFetch.partition; |
||||
final long fetchOffset = completedFetch.nextFetchOffset(); |
||||
|
||||
if (error == Errors.NOT_LEADER_OR_FOLLOWER || |
||||
error == Errors.REPLICA_NOT_AVAILABLE || |
||||
error == Errors.KAFKA_STORAGE_ERROR || |
||||
error == Errors.FENCED_LEADER_EPOCH || |
||||
error == Errors.OFFSET_NOT_AVAILABLE) { |
||||
log.debug("Error in fetch for partition {}: {}", tp, error.exceptionName()); |
||||
requestMetadataUpdate(metadata, subscriptions, tp); |
||||
} else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { |
||||
log.warn("Received unknown topic or partition error in fetch for partition {}", tp); |
||||
requestMetadataUpdate(metadata, subscriptions, tp); |
||||
} else if (error == Errors.UNKNOWN_TOPIC_ID) { |
||||
log.warn("Received unknown topic ID error in fetch for partition {}", tp); |
||||
requestMetadataUpdate(metadata, subscriptions, tp); |
||||
} else if (error == Errors.INCONSISTENT_TOPIC_ID) { |
||||
log.warn("Received inconsistent topic ID error in fetch for partition {}", tp); |
||||
requestMetadataUpdate(metadata, subscriptions, tp); |
||||
} else if (error == Errors.OFFSET_OUT_OF_RANGE) { |
||||
Optional<Integer> clearedReplicaId = subscriptions.clearPreferredReadReplica(tp); |
||||
|
||||
if (!clearedReplicaId.isPresent()) { |
||||
// If there's no preferred replica to clear, we're fetching from the leader so handle this error normally
|
||||
SubscriptionState.FetchPosition position = subscriptions.position(tp); |
||||
|
||||
if (position == null || fetchOffset != position.offset) { |
||||
log.debug("Discarding stale fetch response for partition {} since the fetched offset {} " + |
||||
"does not match the current offset {}", tp, fetchOffset, position); |
||||
} else { |
||||
String errorMessage = "Fetch position " + position + " is out of range for partition " + tp; |
||||
|
||||
if (subscriptions.hasDefaultOffsetResetPolicy()) { |
||||
log.info("{}, resetting offset", errorMessage); |
||||
subscriptions.requestOffsetReset(tp); |
||||
} else { |
||||
log.info("{}, raising error to the application since no reset policy is configured", errorMessage); |
||||
throw new OffsetOutOfRangeException(errorMessage, |
||||
Collections.singletonMap(tp, position.offset)); |
||||
} |
||||
} |
||||
} else { |
||||
log.debug("Unset the preferred read replica {} for partition {} since we got {} when fetching {}", |
||||
clearedReplicaId.get(), tp, error, fetchOffset); |
||||
} |
||||
} else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { |
||||
//we log the actual partition and not just the topic to help with ACL propagation issues in large clusters
|
||||
log.warn("Not authorized to read from partition {}.", tp); |
||||
throw new TopicAuthorizationException(Collections.singleton(tp.topic())); |
||||
} else if (error == Errors.UNKNOWN_LEADER_EPOCH) { |
||||
log.debug("Received unknown leader epoch error in fetch for partition {}", tp); |
||||
} else if (error == Errors.UNKNOWN_SERVER_ERROR) { |
||||
log.warn("Unknown server error while fetching offset {} for topic-partition {}", |
||||
fetchOffset, tp); |
||||
} else if (error == Errors.CORRUPT_MESSAGE) { |
||||
throw new KafkaException("Encountered corrupt message when fetching offset " |
||||
+ fetchOffset |
||||
+ " for topic-partition " |
||||
+ tp); |
||||
} else { |
||||
throw new IllegalStateException("Unexpected error code " |
||||
+ error.code() |
||||
+ " while fetching at offset " |
||||
+ fetchOffset |
||||
+ " from topic-partition " + tp); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,53 @@
@@ -0,0 +1,53 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.clients.consumer.internals; |
||||
|
||||
import org.apache.kafka.common.TopicPartition; |
||||
|
||||
/** |
||||
* {@code FetchUtils} provides a place for disparate parts of the fetch logic to live. |
||||
*/ |
||||
public class FetchUtils { |
||||
|
||||
/** |
||||
* Performs two combined actions based on the state related to the {@link TopicPartition}: |
||||
* |
||||
* <ol> |
||||
* <li> |
||||
* Invokes {@link ConsumerMetadata#requestUpdate(boolean)} to signal that the metadata is incorrect and |
||||
* needs to be updated |
||||
* </li> |
||||
* <li> |
||||
* Invokes {@link SubscriptionState#clearPreferredReadReplica(TopicPartition)} to clear out any read replica |
||||
* information that may be present. |
||||
* </li> |
||||
* </ol> |
||||
* |
||||
* This utility method should be invoked if the client detects (or is told by a node in the broker) that an |
||||
* attempt was made to fetch from a node that isn't the leader or preferred replica. |
||||
* |
||||
* @param metadata {@link ConsumerMetadata} for which to request an update |
||||
* @param subscriptions {@link SubscriptionState} to clear any internal read replica node |
||||
* @param topicPartition {@link TopicPartition} for which this state change is related |
||||
*/ |
||||
static void requestMetadataUpdate(final ConsumerMetadata metadata, |
||||
final SubscriptionState subscriptions, |
||||
final TopicPartition topicPartition) { |
||||
metadata.requestUpdate(false); |
||||
subscriptions.clearPreferredReadReplica(topicPartition); |
||||
} |
||||
} |
@ -0,0 +1,174 @@
@@ -0,0 +1,174 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.common.internals; |
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean; |
||||
import java.util.function.Supplier; |
||||
|
||||
/** |
||||
* {@code IdempotentCloser} encapsulates some basic logic to ensure that a given resource is only closed once. |
||||
* The underlying mechanism for ensuring that the close only happens once <em>and</em> is thread safe |
||||
* is via the {@link AtomicBoolean#compareAndSet(boolean, boolean)}. Users can provide callbacks (via optional |
||||
* {@link Runnable}s) for either the <em>initial</em> close and/or any <em>subsequent</em> closes. |
||||
* |
||||
* <p/> |
||||
* |
||||
* Here's an example: |
||||
* |
||||
* <pre> |
||||
* |
||||
* public class MyDataFile implements Closeable { |
||||
* |
||||
* private final IdempotentCloser closer = new IdempotentCloser(); |
||||
* |
||||
* private final File file; |
||||
* |
||||
* . . . |
||||
* |
||||
* public boolean write() { |
||||
* closer.assertOpen(() -> String.format("Data file %s already closed!", file)); |
||||
* writeToFile(); |
||||
* } |
||||
* |
||||
* public boolean isClosed() { |
||||
* return closer.isClosed(); |
||||
* } |
||||
* |
||||
* @Override |
||||
* public void close() { |
||||
* Runnable onInitialClose = () -> { |
||||
* cleanUpFile(file); |
||||
* log.debug("Data file {} closed", file); |
||||
* }; |
||||
* Runnable onSubsequentClose = () -> { |
||||
* log.warn("Data file {} already closed!", file); |
||||
* }; |
||||
* closer.close(onInitialClose, onSubsequentClose); |
||||
* } |
||||
* } |
||||
* </pre> |
||||
*/ |
||||
public class IdempotentCloser implements AutoCloseable { |
||||
|
||||
private final AtomicBoolean isClosed; |
||||
|
||||
/** |
||||
* Creates an {@code IdempotentCloser} that is not yet closed. |
||||
*/ |
||||
public IdempotentCloser() { |
||||
this(false); |
||||
} |
||||
|
||||
/** |
||||
* Creates an {@code IdempotentCloser} with the given initial state. |
||||
* |
||||
* @param isClosed Initial value for underlying state |
||||
*/ |
||||
public IdempotentCloser(boolean isClosed) { |
||||
this.isClosed = new AtomicBoolean(isClosed); |
||||
} |
||||
|
||||
/** |
||||
* This method serves as an assert that the {@link IdempotentCloser} is still open. If it is open, this method |
||||
* simply returns. If it is closed, a new {@link IllegalStateException} will be thrown using the supplied message. |
||||
* |
||||
* @param message {@link Supplier} that supplies the message for the exception |
||||
*/ |
||||
public void assertOpen(Supplier<String> message) { |
||||
if (isClosed.get()) |
||||
throw new IllegalStateException(message.get()); |
||||
} |
||||
|
||||
/** |
||||
* This method serves as an assert that the {@link IdempotentCloser} is still open. If it is open, this method |
||||
* simply returns. If it is closed, a new {@link IllegalStateException} will be thrown using the given message. |
||||
* |
||||
* @param message Message to use for the exception |
||||
*/ |
||||
public void assertOpen(String message) { |
||||
if (isClosed.get()) |
||||
throw new IllegalStateException(message); |
||||
} |
||||
|
||||
public boolean isClosed() { |
||||
return isClosed.get(); |
||||
} |
||||
|
||||
/** |
||||
* Closes the resource in a thread-safe manner. |
||||
* |
||||
* <p/> |
||||
* |
||||
* After the execution has completed, calls to {@link #isClosed()} will return {@code false} and calls to |
||||
* {@link #assertOpen(String)} and {@link #assertOpen(Supplier)} |
||||
* will throw an {@link IllegalStateException}. |
||||
*/ |
||||
@Override |
||||
public void close() { |
||||
close(null, null); |
||||
} |
||||
|
||||
/** |
||||
* Closes the resource in a thread-safe manner. |
||||
* |
||||
* <p/> |
||||
* |
||||
* After the execution has completed, calls to {@link #isClosed()} will return {@code false} and calls to |
||||
* {@link #assertOpen(String)} and {@link #assertOpen(Supplier)} |
||||
* will throw an {@link IllegalStateException}. |
||||
* |
||||
* @param onInitialClose Optional {@link Runnable} to execute when the resource is closed. Note that the |
||||
* object will still be considered closed even if an exception is thrown during the course |
||||
* of its execution; can be {@code null} |
||||
*/ |
||||
public void close(final Runnable onInitialClose) { |
||||
close(onInitialClose, null); |
||||
} |
||||
|
||||
/** |
||||
* Closes the resource in a thread-safe manner. |
||||
* |
||||
* <p/> |
||||
* |
||||
* After the execution has completed, calls to {@link #isClosed()} will return {@code false} and calls to |
||||
* {@link #assertOpen(String)} and {@link #assertOpen(Supplier)} |
||||
* will throw an {@link IllegalStateException}. |
||||
* |
||||
* @param onInitialClose Optional {@link Runnable} to execute when the resource is closed. Note that the |
||||
* object will still be considered closed even if an exception is thrown during the course |
||||
* of its execution; can be {@code null} |
||||
* @param onSubsequentClose Optional {@link Runnable} to execute if this resource was previously closed. Note that |
||||
* no state will be affected if an exception is thrown during its execution; can be |
||||
* {@code null} |
||||
*/ |
||||
public void close(final Runnable onInitialClose, final Runnable onSubsequentClose) { |
||||
if (isClosed.compareAndSet(false, true)) { |
||||
if (onInitialClose != null) |
||||
onInitialClose.run(); |
||||
} else { |
||||
if (onSubsequentClose != null) |
||||
onSubsequentClose.run(); |
||||
} |
||||
} |
||||
|
||||
@Override |
||||
public String toString() { |
||||
return "IdempotentCloser{" + |
||||
"isClosed=" + isClosed + |
||||
'}'; |
||||
} |
||||
} |
@ -0,0 +1,194 @@
@@ -0,0 +1,194 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.clients.consumer.internals; |
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig; |
||||
import org.apache.kafka.common.TopicPartition; |
||||
import org.apache.kafka.common.message.FetchResponseData; |
||||
import org.apache.kafka.common.metrics.Metrics; |
||||
import org.apache.kafka.common.protocol.ApiKeys; |
||||
import org.apache.kafka.common.serialization.StringSerializer; |
||||
import org.apache.kafka.common.utils.BufferSupplier; |
||||
import org.apache.kafka.common.utils.LogContext; |
||||
import org.apache.kafka.common.utils.MockTime; |
||||
import org.apache.kafka.common.utils.Time; |
||||
import org.junit.jupiter.api.BeforeEach; |
||||
import org.junit.jupiter.api.Test; |
||||
|
||||
import java.util.Arrays; |
||||
import java.util.HashSet; |
||||
import java.util.Properties; |
||||
import java.util.Set; |
||||
|
||||
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createFetchMetricsManager; |
||||
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createMetrics; |
||||
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createSubscriptionState; |
||||
import static org.junit.jupiter.api.Assertions.assertEquals; |
||||
import static org.junit.jupiter.api.Assertions.assertFalse; |
||||
import static org.junit.jupiter.api.Assertions.assertNotNull; |
||||
import static org.junit.jupiter.api.Assertions.assertNull; |
||||
import static org.junit.jupiter.api.Assertions.assertSame; |
||||
import static org.junit.jupiter.api.Assertions.assertTrue; |
||||
|
||||
/** |
||||
* This tests the {@link FetchBuffer} functionality in addition to what {@link FetcherTest} covers in its tests. |
||||
* One of the main concerns of these tests are that we correctly handle both places that data is held internally: |
||||
* |
||||
* <ol> |
||||
* <li>A special "next in line" buffer</li> |
||||
* <li>The remainder of the buffers in a queue</li> |
||||
* </ol> |
||||
*/ |
||||
public class FetchBufferTest { |
||||
|
||||
private final Time time = new MockTime(0, 0, 0); |
||||
private final TopicPartition topicAPartition0 = new TopicPartition("topic-a", 0); |
||||
private final TopicPartition topicAPartition1 = new TopicPartition("topic-a", 1); |
||||
private final TopicPartition topicAPartition2 = new TopicPartition("topic-a", 2); |
||||
private final Set<TopicPartition> allPartitions = partitions(topicAPartition0, topicAPartition1, topicAPartition2); |
||||
private LogContext logContext; |
||||
|
||||
private SubscriptionState subscriptions; |
||||
|
||||
private FetchMetricsManager metricsManager; |
||||
|
||||
@BeforeEach |
||||
public void setup() { |
||||
logContext = new LogContext(); |
||||
|
||||
Properties p = new Properties(); |
||||
p.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); |
||||
p.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); |
||||
p.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); |
||||
ConsumerConfig config = new ConsumerConfig(p); |
||||
|
||||
subscriptions = createSubscriptionState(config, logContext); |
||||
|
||||
Metrics metrics = createMetrics(config, time); |
||||
metricsManager = createFetchMetricsManager(metrics); |
||||
} |
||||
|
||||
/** |
||||
* Verifies the basics: we can add buffered data to the queue, peek to view them, and poll to remove them. |
||||
*/ |
||||
@Test |
||||
public void testBasicPeekAndPoll() { |
||||
try (FetchBuffer fetchBuffer = new FetchBuffer(logContext)) { |
||||
CompletedFetch completedFetch = completedFetch(topicAPartition0); |
||||
assertTrue(fetchBuffer.isEmpty()); |
||||
fetchBuffer.add(completedFetch); |
||||
assertTrue(fetchBuffer.hasCompletedFetches(p -> true)); |
||||
assertFalse(fetchBuffer.isEmpty()); |
||||
assertNotNull(fetchBuffer.peek()); |
||||
assertSame(completedFetch, fetchBuffer.peek()); |
||||
assertSame(completedFetch, fetchBuffer.poll()); |
||||
assertNull(fetchBuffer.peek()); |
||||
} |
||||
} |
||||
|
||||
/** |
||||
* Verifies {@link FetchBuffer#close()}} closes the buffered data for both the queue and the next-in-line buffer. |
||||
*/ |
||||
@Test |
||||
public void testCloseClearsData() { |
||||
// We don't use the try-with-resources approach because we want to have access to the FetchBuffer after
|
||||
// the try block so that we can run our asserts on the object.
|
||||
FetchBuffer fetchBuffer = null; |
||||
|
||||
try { |
||||
fetchBuffer = new FetchBuffer(logContext); |
||||
assertNull(fetchBuffer.nextInLineFetch()); |
||||
assertTrue(fetchBuffer.isEmpty()); |
||||
|
||||
fetchBuffer.add(completedFetch(topicAPartition0)); |
||||
assertFalse(fetchBuffer.isEmpty()); |
||||
|
||||
fetchBuffer.setNextInLineFetch(completedFetch(topicAPartition0)); |
||||
assertNotNull(fetchBuffer.nextInLineFetch()); |
||||
} finally { |
||||
if (fetchBuffer != null) |
||||
fetchBuffer.close(); |
||||
} |
||||
|
||||
assertNull(fetchBuffer.nextInLineFetch()); |
||||
assertTrue(fetchBuffer.isEmpty()); |
||||
} |
||||
|
||||
/** |
||||
* Tests that the buffer returns partitions for both the queue and the next-in-line buffer. |
||||
*/ |
||||
@Test |
||||
public void testBufferedPartitions() { |
||||
try (FetchBuffer fetchBuffer = new FetchBuffer(logContext)) { |
||||
fetchBuffer.setNextInLineFetch(completedFetch(topicAPartition0)); |
||||
fetchBuffer.add(completedFetch(topicAPartition1)); |
||||
fetchBuffer.add(completedFetch(topicAPartition2)); |
||||
assertEquals(allPartitions, fetchBuffer.bufferedPartitions()); |
||||
|
||||
fetchBuffer.setNextInLineFetch(null); |
||||
assertEquals(partitions(topicAPartition1, topicAPartition2), fetchBuffer.bufferedPartitions()); |
||||
|
||||
fetchBuffer.poll(); |
||||
assertEquals(partitions(topicAPartition2), fetchBuffer.bufferedPartitions()); |
||||
|
||||
fetchBuffer.poll(); |
||||
assertEquals(partitions(), fetchBuffer.bufferedPartitions()); |
||||
} |
||||
} |
||||
|
||||
/** |
||||
* Tests that the buffer manipulates partitions for both the queue and the next-in-line buffer. |
||||
*/ |
||||
@Test |
||||
public void testAddAllAndRetainAll() { |
||||
try (FetchBuffer fetchBuffer = new FetchBuffer(logContext)) { |
||||
fetchBuffer.setNextInLineFetch(completedFetch(topicAPartition0)); |
||||
fetchBuffer.addAll(Arrays.asList(completedFetch(topicAPartition1), completedFetch(topicAPartition2))); |
||||
assertEquals(allPartitions, fetchBuffer.bufferedPartitions()); |
||||
|
||||
fetchBuffer.retainAll(partitions(topicAPartition1, topicAPartition2)); |
||||
assertEquals(partitions(topicAPartition1, topicAPartition2), fetchBuffer.bufferedPartitions()); |
||||
|
||||
fetchBuffer.retainAll(partitions(topicAPartition2)); |
||||
assertEquals(partitions(topicAPartition2), fetchBuffer.bufferedPartitions()); |
||||
|
||||
fetchBuffer.retainAll(partitions()); |
||||
assertEquals(partitions(), fetchBuffer.bufferedPartitions()); |
||||
} |
||||
} |
||||
|
||||
private CompletedFetch completedFetch(TopicPartition tp) { |
||||
FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData(); |
||||
FetchMetricsAggregator metricsAggregator = new FetchMetricsAggregator(metricsManager, allPartitions); |
||||
return new CompletedFetch( |
||||
logContext, |
||||
subscriptions, |
||||
BufferSupplier.create(), |
||||
tp, |
||||
partitionData, |
||||
metricsAggregator, |
||||
0L, |
||||
ApiKeys.FETCH.latestVersion()); |
||||
} |
||||
|
||||
/** |
||||
* This is a handy utility method for returning a set from a varargs array. |
||||
*/ |
||||
private static Set<TopicPartition> partitions(TopicPartition... partitions) { |
||||
return new HashSet<>(Arrays.asList(partitions)); |
||||
} |
||||
} |
@ -0,0 +1,579 @@
@@ -0,0 +1,579 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.clients.consumer.internals; |
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig; |
||||
import org.apache.kafka.common.KafkaException; |
||||
import org.apache.kafka.common.TopicPartition; |
||||
import org.apache.kafka.common.errors.TopicAuthorizationException; |
||||
import org.apache.kafka.common.internals.ClusterResourceListeners; |
||||
import org.apache.kafka.common.message.FetchResponseData; |
||||
import org.apache.kafka.common.metrics.Metrics; |
||||
import org.apache.kafka.common.protocol.ApiKeys; |
||||
import org.apache.kafka.common.protocol.Errors; |
||||
import org.apache.kafka.common.record.CompressionType; |
||||
import org.apache.kafka.common.record.MemoryRecords; |
||||
import org.apache.kafka.common.record.MemoryRecordsBuilder; |
||||
import org.apache.kafka.common.record.Records; |
||||
import org.apache.kafka.common.record.TimestampType; |
||||
import org.apache.kafka.common.serialization.StringDeserializer; |
||||
import org.apache.kafka.common.serialization.StringSerializer; |
||||
import org.apache.kafka.common.utils.BufferSupplier; |
||||
import org.apache.kafka.common.utils.LogContext; |
||||
import org.apache.kafka.common.utils.MockTime; |
||||
import org.apache.kafka.common.utils.Time; |
||||
import org.junit.jupiter.api.Test; |
||||
import org.junit.jupiter.params.ParameterizedTest; |
||||
import org.junit.jupiter.params.provider.Arguments; |
||||
import org.junit.jupiter.params.provider.MethodSource; |
||||
|
||||
import java.nio.ByteBuffer; |
||||
import java.util.ArrayList; |
||||
import java.util.Arrays; |
||||
import java.util.HashSet; |
||||
import java.util.List; |
||||
import java.util.Optional; |
||||
import java.util.Properties; |
||||
import java.util.Set; |
||||
import java.util.stream.Stream; |
||||
|
||||
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createFetchConfig; |
||||
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createFetchMetricsManager; |
||||
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createMetrics; |
||||
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createSubscriptionState; |
||||
import static org.junit.jupiter.api.Assertions.assertEquals; |
||||
import static org.junit.jupiter.api.Assertions.assertFalse; |
||||
import static org.junit.jupiter.api.Assertions.assertNotNull; |
||||
import static org.junit.jupiter.api.Assertions.assertNull; |
||||
import static org.junit.jupiter.api.Assertions.assertSame; |
||||
import static org.junit.jupiter.api.Assertions.assertThrows; |
||||
import static org.junit.jupiter.api.Assertions.assertTrue; |
||||
|
||||
/** |
||||
* This tests the {@link FetchCollector} functionality in addition to what {@link FetcherTest} tests during the course |
||||
* of its tests. |
||||
*/ |
||||
public class FetchCollectorTest { |
||||
|
||||
private final static int DEFAULT_RECORD_COUNT = 10; |
||||
private final static int DEFAULT_MAX_POLL_RECORDS = ConsumerConfig.DEFAULT_MAX_POLL_RECORDS; |
||||
private final Time time = new MockTime(0, 0, 0); |
||||
private final TopicPartition topicAPartition0 = new TopicPartition("topic-a", 0); |
||||
private final TopicPartition topicAPartition1 = new TopicPartition("topic-a", 1); |
||||
private final TopicPartition topicAPartition2 = new TopicPartition("topic-a", 2); |
||||
private final Set<TopicPartition> allPartitions = partitions(topicAPartition0, topicAPartition1, topicAPartition2); |
||||
private LogContext logContext; |
||||
|
||||
private SubscriptionState subscriptions; |
||||
private FetchConfig<String, String> fetchConfig; |
||||
private FetchMetricsManager metricsManager; |
||||
private ConsumerMetadata metadata; |
||||
private FetchBuffer fetchBuffer; |
||||
private FetchCollector<String, String> fetchCollector; |
||||
private CompletedFetchBuilder completedFetchBuilder; |
||||
|
||||
@Test |
||||
public void testFetchNormal() { |
||||
int recordCount = DEFAULT_MAX_POLL_RECORDS; |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
CompletedFetch completedFetch = completedFetchBuilder |
||||
.recordCount(recordCount) |
||||
.build(); |
||||
|
||||
// Validate that the buffer is empty until after we add the fetch data.
|
||||
assertTrue(fetchBuffer.isEmpty()); |
||||
fetchBuffer.add(completedFetch); |
||||
assertFalse(fetchBuffer.isEmpty()); |
||||
|
||||
// Validate that the completed fetch isn't initialized just because we add it to the buffer.
|
||||
assertFalse(completedFetch.isInitialized()); |
||||
|
||||
// Fetch the data and validate that we get all the records we want back.
|
||||
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
assertFalse(fetch.isEmpty()); |
||||
assertEquals(recordCount, fetch.numRecords()); |
||||
|
||||
// When we collected the data from the buffer, this will cause the completed fetch to get initialized.
|
||||
assertTrue(completedFetch.isInitialized()); |
||||
|
||||
// However, even though we've collected the data, it isn't (completely) consumed yet.
|
||||
assertFalse(completedFetch.isConsumed()); |
||||
|
||||
// The buffer is now considered "empty" because our queue is empty.
|
||||
assertTrue(fetchBuffer.isEmpty()); |
||||
assertNull(fetchBuffer.peek()); |
||||
assertNull(fetchBuffer.poll()); |
||||
|
||||
// However, while the queue is "empty", the next-in-line fetch is actually still in the buffer.
|
||||
assertNotNull(fetchBuffer.nextInLineFetch()); |
||||
|
||||
// Validate that the next fetch position has been updated to point to the record after our last fetched
|
||||
// record.
|
||||
SubscriptionState.FetchPosition position = subscriptions.position(topicAPartition0); |
||||
assertEquals(recordCount, position.offset); |
||||
|
||||
// Now attempt to collect more records from the fetch buffer.
|
||||
fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
|
||||
// The Fetch object is non-null, but it's empty.
|
||||
assertEquals(0, fetch.numRecords()); |
||||
assertTrue(fetch.isEmpty()); |
||||
|
||||
// However, once we read *past* the end of the records in the CompletedFetch, then we will call
|
||||
// drain on it, and it will be considered all consumed.
|
||||
assertTrue(completedFetch.isConsumed()); |
||||
} |
||||
|
||||
@Test |
||||
public void testFetchWithReadReplica() { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
// Set the preferred read replica and just to be safe, verify it was set.
|
||||
int preferredReadReplicaId = 67; |
||||
subscriptions.updatePreferredReadReplica(topicAPartition0, preferredReadReplicaId, time::milliseconds); |
||||
assertNotNull(subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); |
||||
assertEquals(Optional.of(preferredReadReplicaId), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); |
||||
|
||||
CompletedFetch completedFetch = completedFetchBuilder.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
|
||||
// The Fetch and read replica settings should be empty.
|
||||
assertEquals(DEFAULT_RECORD_COUNT, fetch.numRecords()); |
||||
assertEquals(Optional.of(preferredReadReplicaId), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); |
||||
} |
||||
|
||||
@Test |
||||
public void testNoResultsIfInitializing() { |
||||
buildDependencies(); |
||||
|
||||
// Intentionally call assign (vs. assignAndSeek) so that we don't set the position. The SubscriptionState
|
||||
// will consider the partition as in the SubscriptionState.FetchStates.INITIALIZED state.
|
||||
assign(topicAPartition0); |
||||
|
||||
// The position should thus be null and considered un-fetchable and invalid.
|
||||
assertNull(subscriptions.position(topicAPartition0)); |
||||
assertFalse(subscriptions.isFetchable(topicAPartition0)); |
||||
assertFalse(subscriptions.hasValidPosition(topicAPartition0)); |
||||
|
||||
// Add some valid CompletedFetch records to the FetchBuffer queue and collect them into the Fetch.
|
||||
CompletedFetch completedFetch = completedFetchBuilder.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
|
||||
// Verify that no records are fetched for the partition as it did not have a valid position set.
|
||||
assertEquals(0, fetch.numRecords()); |
||||
} |
||||
|
||||
@ParameterizedTest |
||||
@MethodSource("testErrorInInitializeSource") |
||||
public void testErrorInInitialize(int recordCount, RuntimeException expectedException) { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
// Create a FetchCollector that fails on CompletedFetch initialization.
|
||||
fetchCollector = new FetchCollector<String, String>(logContext, |
||||
metadata, |
||||
subscriptions, |
||||
fetchConfig, |
||||
metricsManager, |
||||
time) { |
||||
|
||||
@Override |
||||
protected CompletedFetch initialize(final CompletedFetch completedFetch) { |
||||
throw expectedException; |
||||
} |
||||
}; |
||||
|
||||
// Add the CompletedFetch to the FetchBuffer queue
|
||||
CompletedFetch completedFetch = completedFetchBuilder |
||||
.recordCount(recordCount) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
|
||||
// At first, the queue is populated
|
||||
assertFalse(fetchBuffer.isEmpty()); |
||||
|
||||
// Now run our ill-fated collectFetch.
|
||||
assertThrows(expectedException.getClass(), () -> fetchCollector.collectFetch(fetchBuffer)); |
||||
|
||||
// If the number of records in the CompletedFetch was 0, the call to FetchCollector.collectFetch() will
|
||||
// remove it from the queue. If there are records in the CompletedFetch, FetchCollector.collectFetch will
|
||||
// leave it on the queue.
|
||||
assertEquals(recordCount == 0, fetchBuffer.isEmpty()); |
||||
} |
||||
|
||||
@Test |
||||
public void testFetchingPausedPartitionsYieldsNoRecords() { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
// The partition should not be 'paused' in the SubscriptionState until we explicitly tell it to.
|
||||
assertFalse(subscriptions.isPaused(topicAPartition0)); |
||||
subscriptions.pause(topicAPartition0); |
||||
assertTrue(subscriptions.isPaused(topicAPartition0)); |
||||
|
||||
CompletedFetch completedFetch = completedFetchBuilder.build(); |
||||
|
||||
// Set the CompletedFetch to the next-in-line fetch, *not* the queue.
|
||||
fetchBuffer.setNextInLineFetch(completedFetch); |
||||
|
||||
// The next-in-line CompletedFetch should reference the same object that was just created
|
||||
assertSame(fetchBuffer.nextInLineFetch(), completedFetch); |
||||
|
||||
// The FetchBuffer queue should be empty as the CompletedFetch was added to the next-in-line.
|
||||
// CompletedFetch, not the queue.
|
||||
assertTrue(fetchBuffer.isEmpty()); |
||||
|
||||
// Ensure that the partition for the next-in-line CompletedFetch is still 'paused'.
|
||||
assertTrue(subscriptions.isPaused(completedFetch.partition)); |
||||
|
||||
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
|
||||
// There should be no records in the Fetch as the partition being fetched is 'paused'.
|
||||
assertEquals(0, fetch.numRecords()); |
||||
|
||||
// The FetchBuffer queue should not be empty; the CompletedFetch is added to the FetchBuffer queue by
|
||||
// the FetchCollector when it detects a 'paused' partition.
|
||||
assertFalse(fetchBuffer.isEmpty()); |
||||
|
||||
// The next-in-line CompletedFetch should be null; the CompletedFetch is added to the FetchBuffer
|
||||
// queue by the FetchCollector when it detects a 'paused' partition.
|
||||
assertNull(fetchBuffer.nextInLineFetch()); |
||||
} |
||||
|
||||
@ParameterizedTest |
||||
@MethodSource("testFetchWithMetadataRefreshErrorsSource") |
||||
public void testFetchWithMetadataRefreshErrors(final Errors error) { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
CompletedFetch completedFetch = completedFetchBuilder |
||||
.error(error) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
|
||||
// Set the preferred read replica and just to be safe, verify it was set.
|
||||
int preferredReadReplicaId = 5; |
||||
subscriptions.updatePreferredReadReplica(topicAPartition0, preferredReadReplicaId, time::milliseconds); |
||||
assertNotNull(subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); |
||||
assertEquals(Optional.of(preferredReadReplicaId), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); |
||||
|
||||
// Fetch the data and validate that we get all the records we want back.
|
||||
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
assertTrue(fetch.isEmpty()); |
||||
assertTrue(metadata.updateRequested()); |
||||
assertEquals(Optional.empty(), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); |
||||
} |
||||
|
||||
@Test |
||||
public void testFetchWithOffsetOutOfRange() { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
CompletedFetch completedFetch = completedFetchBuilder.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
|
||||
// Fetch the data and validate that we get our first batch of records back.
|
||||
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
assertFalse(fetch.isEmpty()); |
||||
assertEquals(DEFAULT_RECORD_COUNT, fetch.numRecords()); |
||||
|
||||
// Try to fetch more data and validate that we get an empty Fetch back.
|
||||
completedFetch = completedFetchBuilder |
||||
.fetchOffset(fetch.numRecords()) |
||||
.error(Errors.OFFSET_OUT_OF_RANGE) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
assertTrue(fetch.isEmpty()); |
||||
|
||||
// Try to fetch more data and validate that we get an empty Fetch back.
|
||||
completedFetch = completedFetchBuilder |
||||
.fetchOffset(fetch.numRecords()) |
||||
.error(Errors.OFFSET_OUT_OF_RANGE) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
assertTrue(fetch.isEmpty()); |
||||
} |
||||
|
||||
@Test |
||||
public void testFetchWithOffsetOutOfRangeWithPreferredReadReplica() { |
||||
int records = 10; |
||||
buildDependencies(records); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
// Set the preferred read replica and just to be safe, verify it was set.
|
||||
int preferredReadReplicaId = 67; |
||||
subscriptions.updatePreferredReadReplica(topicAPartition0, preferredReadReplicaId, time::milliseconds); |
||||
assertNotNull(subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); |
||||
assertEquals(Optional.of(preferredReadReplicaId), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); |
||||
|
||||
CompletedFetch completedFetch = completedFetchBuilder |
||||
.error(Errors.OFFSET_OUT_OF_RANGE) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
|
||||
// The Fetch and read replica settings should be empty.
|
||||
assertTrue(fetch.isEmpty()); |
||||
assertEquals(Optional.empty(), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); |
||||
} |
||||
|
||||
@Test |
||||
public void testFetchWithTopicAuthorizationFailed() { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
// Try to data and validate that we get an empty Fetch back.
|
||||
CompletedFetch completedFetch = completedFetchBuilder |
||||
.error(Errors.TOPIC_AUTHORIZATION_FAILED) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
assertThrows(TopicAuthorizationException.class, () -> fetchCollector.collectFetch(fetchBuffer)); |
||||
} |
||||
|
||||
@Test |
||||
public void testFetchWithUnknownLeaderEpoch() { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
// Try to data and validate that we get an empty Fetch back.
|
||||
CompletedFetch completedFetch = completedFetchBuilder |
||||
.error(Errors.UNKNOWN_LEADER_EPOCH) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
assertTrue(fetch.isEmpty()); |
||||
} |
||||
|
||||
@Test |
||||
public void testFetchWithUnknownServerError() { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
// Try to data and validate that we get an empty Fetch back.
|
||||
CompletedFetch completedFetch = completedFetchBuilder |
||||
.error(Errors.UNKNOWN_SERVER_ERROR) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); |
||||
assertTrue(fetch.isEmpty()); |
||||
} |
||||
|
||||
@Test |
||||
public void testFetchWithCorruptMessage() { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
// Try to data and validate that we get an empty Fetch back.
|
||||
CompletedFetch completedFetch = completedFetchBuilder |
||||
.error(Errors.CORRUPT_MESSAGE) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
assertThrows(KafkaException.class, () -> fetchCollector.collectFetch(fetchBuffer)); |
||||
} |
||||
|
||||
@ParameterizedTest |
||||
@MethodSource("testFetchWithOtherErrorsSource") |
||||
public void testFetchWithOtherErrors(final Errors error) { |
||||
buildDependencies(); |
||||
assignAndSeek(topicAPartition0); |
||||
|
||||
CompletedFetch completedFetch = completedFetchBuilder |
||||
.error(error) |
||||
.build(); |
||||
fetchBuffer.add(completedFetch); |
||||
assertThrows(IllegalStateException.class, () -> fetchCollector.collectFetch(fetchBuffer)); |
||||
} |
||||
|
||||
/** |
||||
* This is a handy utility method for returning a set from a varargs array. |
||||
*/ |
||||
private static Set<TopicPartition> partitions(TopicPartition... partitions) { |
||||
return new HashSet<>(Arrays.asList(partitions)); |
||||
} |
||||
|
||||
private void buildDependencies() { |
||||
buildDependencies(DEFAULT_MAX_POLL_RECORDS); |
||||
} |
||||
|
||||
private void buildDependencies(int maxPollRecords) { |
||||
logContext = new LogContext(); |
||||
|
||||
Properties p = new Properties(); |
||||
p.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); |
||||
p.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); |
||||
p.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); |
||||
p.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, String.valueOf(maxPollRecords)); |
||||
|
||||
ConsumerConfig config = new ConsumerConfig(p); |
||||
|
||||
Deserializers<String, String> deserializers = new Deserializers<>(new StringDeserializer(), new StringDeserializer()); |
||||
|
||||
subscriptions = createSubscriptionState(config, logContext); |
||||
fetchConfig = createFetchConfig(config, deserializers); |
||||
|
||||
Metrics metrics = createMetrics(config, time); |
||||
metricsManager = createFetchMetricsManager(metrics); |
||||
metadata = new ConsumerMetadata( |
||||
0, |
||||
1000, |
||||
10000, |
||||
false, |
||||
false, |
||||
subscriptions, |
||||
logContext, |
||||
new ClusterResourceListeners()); |
||||
fetchCollector = new FetchCollector<>( |
||||
logContext, |
||||
metadata, |
||||
subscriptions, |
||||
fetchConfig, |
||||
metricsManager, |
||||
time); |
||||
fetchBuffer = new FetchBuffer(logContext); |
||||
completedFetchBuilder = new CompletedFetchBuilder(); |
||||
} |
||||
|
||||
private void assign(TopicPartition... partitions) { |
||||
subscriptions.assignFromUser(partitions(partitions)); |
||||
} |
||||
|
||||
private void assignAndSeek(TopicPartition tp) { |
||||
assign(tp); |
||||
subscriptions.seek(tp, 0); |
||||
} |
||||
|
||||
/** |
||||
* Supplies the {@link Arguments} to {@link #testFetchWithMetadataRefreshErrors(Errors)}. |
||||
*/ |
||||
private static Stream<Arguments> testFetchWithMetadataRefreshErrorsSource() { |
||||
List<Errors> errors = Arrays.asList( |
||||
Errors.NOT_LEADER_OR_FOLLOWER, |
||||
Errors.REPLICA_NOT_AVAILABLE, |
||||
Errors.KAFKA_STORAGE_ERROR, |
||||
Errors.FENCED_LEADER_EPOCH, |
||||
Errors.OFFSET_NOT_AVAILABLE, |
||||
Errors.UNKNOWN_TOPIC_OR_PARTITION, |
||||
Errors.UNKNOWN_TOPIC_ID, |
||||
Errors.INCONSISTENT_TOPIC_ID |
||||
); |
||||
|
||||
return errors.stream().map(Arguments::of); |
||||
} |
||||
|
||||
/** |
||||
* Supplies the {@link Arguments} to {@link #testFetchWithOtherErrors(Errors)}. |
||||
*/ |
||||
private static Stream<Arguments> testFetchWithOtherErrorsSource() { |
||||
List<Errors> errors = new ArrayList<>(Arrays.asList(Errors.values())); |
||||
errors.removeAll(Arrays.asList( |
||||
Errors.NONE, |
||||
Errors.NOT_LEADER_OR_FOLLOWER, |
||||
Errors.REPLICA_NOT_AVAILABLE, |
||||
Errors.KAFKA_STORAGE_ERROR, |
||||
Errors.FENCED_LEADER_EPOCH, |
||||
Errors.OFFSET_NOT_AVAILABLE, |
||||
Errors.UNKNOWN_TOPIC_OR_PARTITION, |
||||
Errors.UNKNOWN_TOPIC_ID, |
||||
Errors.INCONSISTENT_TOPIC_ID, |
||||
Errors.OFFSET_OUT_OF_RANGE, |
||||
Errors.TOPIC_AUTHORIZATION_FAILED, |
||||
Errors.UNKNOWN_LEADER_EPOCH, |
||||
Errors.UNKNOWN_SERVER_ERROR, |
||||
Errors.CORRUPT_MESSAGE |
||||
)); |
||||
|
||||
return errors.stream().map(Arguments::of); |
||||
} |
||||
|
||||
|
||||
/** |
||||
* Supplies the {@link Arguments} to {@link #testErrorInInitialize(int, RuntimeException)}. |
||||
*/ |
||||
private static Stream<Arguments> testErrorInInitializeSource() { |
||||
return Stream.of( |
||||
Arguments.of(10, new RuntimeException()), |
||||
Arguments.of(0, new RuntimeException()), |
||||
Arguments.of(10, new KafkaException()), |
||||
Arguments.of(0, new KafkaException()) |
||||
); |
||||
} |
||||
|
||||
private class CompletedFetchBuilder { |
||||
|
||||
private long fetchOffset = 0; |
||||
|
||||
private int recordCount = DEFAULT_RECORD_COUNT; |
||||
|
||||
private Errors error = null; |
||||
|
||||
private CompletedFetchBuilder fetchOffset(long fetchOffset) { |
||||
this.fetchOffset = fetchOffset; |
||||
return this; |
||||
} |
||||
|
||||
private CompletedFetchBuilder recordCount(int recordCount) { |
||||
this.recordCount = recordCount; |
||||
return this; |
||||
} |
||||
|
||||
private CompletedFetchBuilder error(Errors error) { |
||||
this.error = error; |
||||
return this; |
||||
} |
||||
|
||||
private CompletedFetch build() { |
||||
Records records; |
||||
ByteBuffer allocate = ByteBuffer.allocate(1024); |
||||
|
||||
try (MemoryRecordsBuilder builder = MemoryRecords.builder(allocate, |
||||
CompressionType.NONE, |
||||
TimestampType.CREATE_TIME, |
||||
0)) { |
||||
for (int i = 0; i < recordCount; i++) |
||||
builder.append(0L, "key".getBytes(), ("value-" + i).getBytes()); |
||||
|
||||
records = builder.build(); |
||||
} |
||||
|
||||
FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData() |
||||
.setPartitionIndex(topicAPartition0.partition()) |
||||
.setHighWatermark(1000) |
||||
.setRecords(records); |
||||
|
||||
if (error != null) |
||||
partitionData.setErrorCode(error.code()); |
||||
|
||||
FetchMetricsAggregator metricsAggregator = new FetchMetricsAggregator(metricsManager, allPartitions); |
||||
return new CompletedFetch( |
||||
logContext, |
||||
subscriptions, |
||||
BufferSupplier.create(), |
||||
topicAPartition0, |
||||
partitionData, |
||||
metricsAggregator, |
||||
fetchOffset, |
||||
ApiKeys.FETCH.latestVersion()); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,183 @@
@@ -0,0 +1,183 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.common.internals; |
||||
|
||||
import org.junit.jupiter.api.Test; |
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger; |
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; |
||||
import static org.junit.jupiter.api.Assertions.assertEquals; |
||||
import static org.junit.jupiter.api.Assertions.assertFalse; |
||||
import static org.junit.jupiter.api.Assertions.assertThrows; |
||||
import static org.junit.jupiter.api.Assertions.assertTrue; |
||||
|
||||
public class IdempotentCloserTest { |
||||
|
||||
private static final Runnable CALLBACK_NO_OP = () -> { }; |
||||
|
||||
private static final Runnable CALLBACK_WITH_RUNTIME_EXCEPTION = () -> { |
||||
throw new RuntimeException("Simulated error during callback"); |
||||
}; |
||||
|
||||
/** |
||||
* Tests basic functionality, i.e. that close <em>means</em> closed. |
||||
*/ |
||||
@Test |
||||
public void testBasicClose() { |
||||
IdempotentCloser ic = new IdempotentCloser(); |
||||
assertFalse(ic.isClosed()); |
||||
ic.close(); |
||||
assertTrue(ic.isClosed()); |
||||
} |
||||
|
||||
/** |
||||
* Tests that the onClose callback is only invoked once. |
||||
*/ |
||||
@Test |
||||
public void testCountCloses() { |
||||
AtomicInteger onCloseCounter = new AtomicInteger(); |
||||
IdempotentCloser ic = new IdempotentCloser(); |
||||
|
||||
// Verify initial invariants.
|
||||
assertFalse(ic.isClosed()); |
||||
assertEquals(0, onCloseCounter.get()); |
||||
|
||||
// Close with our onClose callback to increment our counter.
|
||||
ic.close(onCloseCounter::getAndIncrement); |
||||
assertTrue(ic.isClosed()); |
||||
assertEquals(1, onCloseCounter.get()); |
||||
|
||||
// Close with our onClose callback again, but verify it wasn't invoked as it was previously closed.
|
||||
ic.close(onCloseCounter::getAndIncrement); |
||||
assertTrue(ic.isClosed()); |
||||
assertEquals(1, onCloseCounter.get()); |
||||
} |
||||
|
||||
/** |
||||
* Tests that the onClose callback is only invoked once, while the onPreviousClose callback can be invoked |
||||
* a variable number of times. |
||||
*/ |
||||
@Test |
||||
public void testEnsureIdempotentClose() { |
||||
AtomicInteger onCloseCounter = new AtomicInteger(); |
||||
AtomicInteger onPreviousCloseCounter = new AtomicInteger(); |
||||
|
||||
IdempotentCloser ic = new IdempotentCloser(); |
||||
|
||||
// Verify initial invariants.
|
||||
assertFalse(ic.isClosed()); |
||||
assertEquals(0, onCloseCounter.get()); |
||||
assertEquals(0, onPreviousCloseCounter.get()); |
||||
|
||||
// Our first close passes in both callbacks. As a result, our onClose callback should be run but our
|
||||
// onPreviousClose callback should not be invoked.
|
||||
ic.close(onCloseCounter::getAndIncrement, onPreviousCloseCounter::getAndIncrement); |
||||
assertTrue(ic.isClosed()); |
||||
assertEquals(1, onCloseCounter.get()); |
||||
assertEquals(0, onPreviousCloseCounter.get()); |
||||
|
||||
// Our second close again passes in both callbacks. As this is the second close, our onClose callback
|
||||
// should not be run but our onPreviousClose callback should be executed.
|
||||
ic.close(onCloseCounter::getAndIncrement, onPreviousCloseCounter::getAndIncrement); |
||||
assertTrue(ic.isClosed()); |
||||
assertEquals(1, onCloseCounter.get()); |
||||
assertEquals(1, onPreviousCloseCounter.get()); |
||||
|
||||
// Our third close yet again passes in both callbacks. As before, our onClose callback should not be run
|
||||
// but our onPreviousClose callback should be run again.
|
||||
ic.close(onCloseCounter::getAndIncrement, onPreviousCloseCounter::getAndIncrement); |
||||
assertTrue(ic.isClosed()); |
||||
assertEquals(1, onCloseCounter.get()); |
||||
assertEquals(2, onPreviousCloseCounter.get()); |
||||
} |
||||
|
||||
/** |
||||
* Tests that the {@link IdempotentCloser#assertOpen(String)} method will not throw an |
||||
* exception if the closer is in the "open" state, but if invoked after it's in the "closed" state, it will |
||||
* throw the exception. |
||||
*/ |
||||
@Test |
||||
public void testCloseBeforeThrows() { |
||||
IdempotentCloser ic = new IdempotentCloser(); |
||||
|
||||
// Verify initial invariants.
|
||||
assertFalse(ic.isClosed()); |
||||
|
||||
// maybeThrowIllegalStateException doesn't throw anything since the closer is still in its "open" state.
|
||||
assertDoesNotThrow(() -> ic.assertOpen(() -> "test")); |
||||
|
||||
// Post-close, our call to maybeThrowIllegalStateException will, in fact, throw said exception.
|
||||
ic.close(); |
||||
assertTrue(ic.isClosed()); |
||||
assertThrows(IllegalStateException.class, () -> ic.assertOpen(() -> "test")); |
||||
} |
||||
|
||||
/** |
||||
* Tests that if the invoked onClose callback throws an exception, that: |
||||
* |
||||
* <ol> |
||||
* <li>The exception does not prevent the {@link IdempotentCloser} from being updated to the closed state</li> |
||||
* <li>The exception is bubbled up to the user</li> |
||||
* </ol> |
||||
*/ |
||||
@Test |
||||
public void testErrorsInOnCloseCallbacksAreNotSwallowed() { |
||||
IdempotentCloser ic = new IdempotentCloser(); |
||||
|
||||
// Verify initial invariants.
|
||||
assertFalse(ic.isClosed()); |
||||
|
||||
// Upon close, our onClose callback will throw an error. First ensure that it is thrown at the user.
|
||||
assertThrows(RuntimeException.class, () -> ic.close(CALLBACK_WITH_RUNTIME_EXCEPTION)); |
||||
|
||||
// Make sure the IdempotentCloser is still closed, though.
|
||||
assertTrue(ic.isClosed()); |
||||
} |
||||
|
||||
/** |
||||
* Tests that if the invoked onSubsequentClose callback throws an exception, that it is thrown from |
||||
* {@link IdempotentCloser#close(Runnable, Runnable)} so the user can handle it. |
||||
*/ |
||||
@Test |
||||
public void testErrorsInOnPreviousCloseCallbacksAreNotSwallowed() { |
||||
IdempotentCloser ic = new IdempotentCloser(); |
||||
|
||||
// Verify initial invariants.
|
||||
assertFalse(ic.isClosed()); |
||||
|
||||
// Perform the initial close. No errors here.
|
||||
ic.close(CALLBACK_NO_OP); |
||||
assertTrue(ic.isClosed()); |
||||
|
||||
// Perform the subsequent close and verify that the exception is bubbled up to the user.
|
||||
assertThrows(RuntimeException.class, () -> ic.close(CALLBACK_NO_OP, CALLBACK_WITH_RUNTIME_EXCEPTION)); |
||||
assertTrue(ic.isClosed()); |
||||
} |
||||
|
||||
/** |
||||
* Tests that if the {@link IdempotentCloser} is created with its initial state as closed, the various APIs |
||||
* will behave as expected. |
||||
*/ |
||||
@Test |
||||
public void testCreatedClosed() { |
||||
IdempotentCloser ic = new IdempotentCloser(true); |
||||
assertTrue(ic.isClosed()); |
||||
assertThrows(IllegalStateException.class, () -> ic.assertOpen(() -> "test")); |
||||
assertDoesNotThrow(() -> ic.close(CALLBACK_WITH_RUNTIME_EXCEPTION)); |
||||
} |
||||
} |
Loading…
Reference in new issue