Browse Source
This PR expands the scope of ApiVersionManager a bit to include returning the current MetadataVersion and features that are in effect. This is useful in general because that information needs to be returned in an ApiVersionsResponse. It also allows us to fix the ApiVersionManager interface so that all subclasses implement all methods of the interface. Having subclasses that don't implement some methods is dangerous because they could cause exceptions at runtime in unexpected scenarios. On the KRaft controller, we were previously performing a read operation in the QuorumController thread to get the current metadata version and features. With this PR, we now read a volatile variable maintained by a separate MetadataVersionContextPublisher object. This will improve performance and simplify the code. It should not change the guarantees we are providing; in both the old and new scenarios, we need to be robust against version skew scenarios during updates. Add a Features class which just has a 3-tuple of metadata version, features, and feature epoch. Remove MetadataCache.FinalizedFeaturesAndEpoch, since it just duplicates the Features class. (There are some additional feature-related classes that can be consolidated in in a follow-on PR.) Create a java class, EndpointReadyFutures, for managing the futures associated with individual authorizer endpoints. This avoids code duplication between ControllerServer and BrokerServer and makes this code unit-testable. Reviewers: David Arthur <mumrah@gmail.com>, dengziming <dengziming1993@gmail.com>, Luke Chen <showuon@gmail.com>pull/13877/head
Colin P. McCabe
1 year ago
25 changed files with 857 additions and 152 deletions
@ -0,0 +1,54 @@
@@ -0,0 +1,54 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.kafka.metadata.publisher; |
||||
|
||||
import org.apache.kafka.image.MetadataDelta; |
||||
import org.apache.kafka.image.MetadataImage; |
||||
import org.apache.kafka.image.loader.LoaderManifest; |
||||
import org.apache.kafka.image.publisher.MetadataPublisher; |
||||
import org.apache.kafka.server.common.Features; |
||||
|
||||
import static org.apache.kafka.server.common.MetadataVersion.MINIMUM_KRAFT_VERSION; |
||||
|
||||
|
||||
public class FeaturesPublisher implements MetadataPublisher { |
||||
private volatile Features features = Features.fromKRaftVersion(MINIMUM_KRAFT_VERSION); |
||||
|
||||
public Features features() { |
||||
return features; |
||||
} |
||||
|
||||
@Override |
||||
public String name() { |
||||
return "FeaturesPublisher"; |
||||
} |
||||
|
||||
@Override |
||||
public void onMetadataUpdate( |
||||
MetadataDelta delta, |
||||
MetadataImage newImage, |
||||
LoaderManifest manifest |
||||
) { |
||||
if (delta.featuresDelta() != null) { |
||||
features = new Features(newImage.features().metadataVersion(), |
||||
newImage.features().finalizedVersions(), |
||||
newImage.provenance().lastContainedOffset(), |
||||
true); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,87 @@
@@ -0,0 +1,87 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.server.common; |
||||
|
||||
import java.util.Collections; |
||||
import java.util.HashMap; |
||||
import java.util.Map; |
||||
import java.util.Objects; |
||||
|
||||
import static org.apache.kafka.server.common.MetadataVersion.FEATURE_NAME; |
||||
|
||||
public final class Features { |
||||
private final MetadataVersion version; |
||||
private final Map<String, Short> finalizedFeatures; |
||||
private final long finalizedFeaturesEpoch; |
||||
|
||||
public static Features fromKRaftVersion(MetadataVersion version) { |
||||
return new Features(version, Collections.emptyMap(), -1, true); |
||||
} |
||||
|
||||
public Features( |
||||
MetadataVersion version, |
||||
Map<String, Short> finalizedFeatures, |
||||
long finalizedFeaturesEpoch, |
||||
boolean kraftMode |
||||
) { |
||||
this.version = version; |
||||
this.finalizedFeatures = new HashMap<>(finalizedFeatures); |
||||
this.finalizedFeaturesEpoch = finalizedFeaturesEpoch; |
||||
// In KRaft mode, we always include the metadata version in the features map.
|
||||
// In ZK mode, we never include it.
|
||||
if (kraftMode) { |
||||
this.finalizedFeatures.put(FEATURE_NAME, version.featureLevel()); |
||||
} else { |
||||
this.finalizedFeatures.remove(FEATURE_NAME); |
||||
} |
||||
} |
||||
|
||||
public MetadataVersion metadataVersion() { |
||||
return version; |
||||
} |
||||
|
||||
public Map<String, Short> finalizedFeatures() { |
||||
return finalizedFeatures; |
||||
} |
||||
|
||||
public long finalizedFeaturesEpoch() { |
||||
return finalizedFeaturesEpoch; |
||||
} |
||||
|
||||
@Override |
||||
public boolean equals(Object o) { |
||||
if (o == null || !(o.getClass().equals(Features.class))) return false; |
||||
Features other = (Features) o; |
||||
return version == other.version && |
||||
finalizedFeatures.equals(other.finalizedFeatures) && |
||||
finalizedFeaturesEpoch == other.finalizedFeaturesEpoch; |
||||
} |
||||
|
||||
@Override |
||||
public int hashCode() { |
||||
return Objects.hash(version, finalizedFeatures, finalizedFeaturesEpoch); |
||||
} |
||||
|
||||
@Override |
||||
public String toString() { |
||||
return "Features" + |
||||
"(version=" + version + |
||||
", finalizedFeatures=" + finalizedFeatures + |
||||
", finalizedFeaturesEpoch=" + finalizedFeaturesEpoch + |
||||
")"; |
||||
} |
||||
} |
@ -0,0 +1,219 @@
@@ -0,0 +1,219 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.kafka.server.network; |
||||
|
||||
import org.apache.kafka.common.Endpoint; |
||||
import org.apache.kafka.common.utils.LogContext; |
||||
import org.apache.kafka.server.authorizer.Authorizer; |
||||
import org.apache.kafka.server.authorizer.AuthorizerServerInfo; |
||||
import org.slf4j.Logger; |
||||
|
||||
import java.util.ArrayList; |
||||
import java.util.Collection; |
||||
import java.util.Collections; |
||||
import java.util.HashMap; |
||||
import java.util.List; |
||||
import java.util.Map; |
||||
import java.util.Optional; |
||||
import java.util.TreeSet; |
||||
import java.util.concurrent.CompletableFuture; |
||||
import java.util.concurrent.CompletionStage; |
||||
|
||||
/** |
||||
* Manages a set of per-endpoint futures. |
||||
*/ |
||||
public class EndpointReadyFutures { |
||||
public static class Builder { |
||||
private LogContext logContext = null; |
||||
private final Map<Endpoint, List<EndpointCompletionStage>> endpointStages = new HashMap<>(); |
||||
private final List<EndpointCompletionStage> stages = new ArrayList<>(); |
||||
|
||||
/** |
||||
* Add a readiness future that will block all endpoints. |
||||
* |
||||
* @param name The future name. |
||||
* @param future The future object. |
||||
* |
||||
* @return This builder object. |
||||
*/ |
||||
public Builder addReadinessFuture( |
||||
String name, |
||||
CompletableFuture<?> future |
||||
) { |
||||
stages.add(new EndpointCompletionStage(name, future)); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* Add readiness futures for individual endpoints. |
||||
* |
||||
* @param name The future name. |
||||
* @param newFutures A map from endpoints to futures. |
||||
* |
||||
* @return This builder object. |
||||
*/ |
||||
public Builder addReadinessFutures( |
||||
String name, |
||||
Map<Endpoint, ? extends CompletionStage<?>> newFutures |
||||
) { |
||||
newFutures.forEach((endpoint, future) -> { |
||||
endpointStages.computeIfAbsent(endpoint, __ -> new ArrayList<>()). |
||||
add(new EndpointCompletionStage(name, future)); |
||||
}); |
||||
return this; |
||||
} |
||||
|
||||
/** |
||||
* Build the EndpointReadyFutures object. |
||||
* |
||||
* @param authorizer The authorizer to use, if any. Will be started. |
||||
* @param info Server information to be passed to the authorizer. |
||||
* |
||||
* @return The new futures object. |
||||
*/ |
||||
public EndpointReadyFutures build( |
||||
Optional<Authorizer> authorizer, |
||||
AuthorizerServerInfo info |
||||
) { |
||||
if (authorizer.isPresent()) { |
||||
return build(authorizer.get().start(info), info); |
||||
} else { |
||||
return build(Collections.emptyMap(), info); |
||||
} |
||||
} |
||||
|
||||
EndpointReadyFutures build( |
||||
Map<Endpoint, ? extends CompletionStage<?>> authorizerStartFutures, |
||||
AuthorizerServerInfo info |
||||
) { |
||||
if (logContext == null) logContext = new LogContext(); |
||||
Map<Endpoint, CompletionStage<?>> effectiveStartFutures = |
||||
new HashMap<>(authorizerStartFutures); |
||||
for (Endpoint endpoint : info.endpoints()) { |
||||
if (!effectiveStartFutures.containsKey(endpoint)) { |
||||
CompletableFuture<Void> completedFuture = CompletableFuture.completedFuture(null); |
||||
effectiveStartFutures.put(endpoint, completedFuture); |
||||
} |
||||
} |
||||
if (info.endpoints().size() != effectiveStartFutures.size()) { |
||||
List<String> notInInfo = new ArrayList<>(); |
||||
for (Endpoint endpoint : effectiveStartFutures.keySet()) { |
||||
if (!info.endpoints().contains(endpoint)) { |
||||
notInInfo.add(endpoint.listenerName().orElse("[none]")); |
||||
} |
||||
} |
||||
throw new RuntimeException("Found authorizer futures that weren't included " + |
||||
"in AuthorizerServerInfo: " + notInInfo); |
||||
} |
||||
addReadinessFutures("authorizerStart", effectiveStartFutures); |
||||
stages.forEach(stage -> { |
||||
Map<Endpoint, CompletionStage<?>> newReadinessFutures = new HashMap<>(); |
||||
info.endpoints().forEach(endpoint -> { |
||||
newReadinessFutures.put(endpoint, stage.future); |
||||
}); |
||||
addReadinessFutures(stage.name, newReadinessFutures); |
||||
}); |
||||
return new EndpointReadyFutures(logContext, |
||||
endpointStages); |
||||
} |
||||
} |
||||
|
||||
static class EndpointCompletionStage { |
||||
final String name; |
||||
final CompletionStage<?> future; |
||||
|
||||
EndpointCompletionStage(String name, CompletionStage<?> future) { |
||||
this.name = name; |
||||
this.future = future; |
||||
} |
||||
} |
||||
|
||||
class EndpointReadyFuture { |
||||
final String endpointName; |
||||
final TreeSet<String> incomplete; |
||||
final CompletableFuture<Void> future; |
||||
|
||||
EndpointReadyFuture(Endpoint endpoint, Collection<String> stageNames) { |
||||
this.endpointName = endpoint.listenerName().orElse("UNNAMED"); |
||||
this.incomplete = new TreeSet<>(stageNames); |
||||
this.future = new CompletableFuture<>(); |
||||
} |
||||
|
||||
void completeStage(String stageName) { |
||||
boolean done = false; |
||||
synchronized (EndpointReadyFuture.this) { |
||||
if (incomplete.remove(stageName)) { |
||||
if (incomplete.isEmpty()) { |
||||
done = true; |
||||
} else { |
||||
log.info("{} completed for endpoint {}. Still waiting for {}.", |
||||
stageName, endpointName, incomplete); |
||||
} |
||||
} |
||||
} |
||||
if (done) { |
||||
if (future.complete(null)) { |
||||
log.info("{} completed for endpoint {}. Endpoint is now READY.", |
||||
stageName, endpointName); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void failStage(String what, Throwable exception) { |
||||
if (future.completeExceptionally(exception)) { |
||||
synchronized (EndpointReadyFuture.this) { |
||||
incomplete.clear(); |
||||
} |
||||
log.warn("Endpoint {} will never become ready because we encountered an {} exception", |
||||
endpointName, what, exception); |
||||
} |
||||
} |
||||
} |
||||
|
||||
private final Logger log; |
||||
|
||||
private final Map<Endpoint, CompletableFuture<Void>> futures; |
||||
|
||||
private EndpointReadyFutures( |
||||
LogContext logContext, |
||||
Map<Endpoint, List<EndpointCompletionStage>> endpointStages |
||||
) { |
||||
this.log = logContext.logger(EndpointReadyFutures.class); |
||||
Map<Endpoint, CompletableFuture<Void>> newFutures = new HashMap<>(); |
||||
endpointStages.forEach((endpoint, stages) -> { |
||||
List<String> stageNames = new ArrayList<>(); |
||||
stages.forEach(stage -> stageNames.add(stage.name)); |
||||
EndpointReadyFuture readyFuture = new EndpointReadyFuture(endpoint, stageNames); |
||||
newFutures.put(endpoint, readyFuture.future); |
||||
stages.forEach(stage -> { |
||||
stage.future.whenComplete((__, exception) -> { |
||||
if (exception != null) { |
||||
readyFuture.failStage(stage.name, exception); |
||||
} else { |
||||
readyFuture.completeStage(stage.name); |
||||
} |
||||
}); |
||||
}); |
||||
}); |
||||
this.futures = Collections.unmodifiableMap(newFutures); |
||||
} |
||||
|
||||
public Map<Endpoint, CompletableFuture<Void>> futures() { |
||||
return futures; |
||||
} |
||||
} |
@ -0,0 +1,108 @@
@@ -0,0 +1,108 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.kafka.server.network; |
||||
|
||||
import org.apache.kafka.common.ClusterResource; |
||||
import org.apache.kafka.common.Endpoint; |
||||
import org.apache.kafka.server.authorizer.AuthorizerServerInfo; |
||||
|
||||
import java.util.ArrayList; |
||||
import java.util.Collection; |
||||
import java.util.Collections; |
||||
import java.util.Objects; |
||||
|
||||
|
||||
/** |
||||
* Runtime broker configuration metadata provided to authorizers during start up. |
||||
*/ |
||||
public final class KafkaAuthorizerServerInfo implements AuthorizerServerInfo { |
||||
private final ClusterResource clusterResource; |
||||
private final int brokerId; |
||||
private final Collection<Endpoint> endpoints; |
||||
private final Endpoint interbrokerEndpoint; |
||||
private final Collection<String> earlyStartListeners; |
||||
|
||||
public KafkaAuthorizerServerInfo( |
||||
ClusterResource clusterResource, |
||||
int brokerId, |
||||
Collection<Endpoint> endpoints, |
||||
Endpoint interbrokerEndpoint, |
||||
Collection<String> earlyStartListeners |
||||
) { |
||||
this.clusterResource = clusterResource; |
||||
this.brokerId = brokerId; |
||||
this.endpoints = Collections.unmodifiableCollection(new ArrayList<>(endpoints)); |
||||
this.interbrokerEndpoint = interbrokerEndpoint; |
||||
this.earlyStartListeners = Collections.unmodifiableCollection(new ArrayList<>(earlyStartListeners)); |
||||
} |
||||
|
||||
@Override |
||||
public ClusterResource clusterResource() { |
||||
return clusterResource; |
||||
} |
||||
|
||||
@Override |
||||
public int brokerId() { |
||||
return brokerId; |
||||
} |
||||
|
||||
@Override |
||||
public Collection<Endpoint> endpoints() { |
||||
return endpoints; |
||||
} |
||||
|
||||
@Override |
||||
public Endpoint interBrokerEndpoint() { |
||||
return interbrokerEndpoint; |
||||
} |
||||
|
||||
@Override |
||||
public Collection<String> earlyStartListeners() { |
||||
return earlyStartListeners; |
||||
} |
||||
|
||||
@Override |
||||
public boolean equals(Object o) { |
||||
if (o == null || (!(o.getClass().equals(KafkaAuthorizerServerInfo.class)))) return false; |
||||
KafkaAuthorizerServerInfo other = (KafkaAuthorizerServerInfo) o; |
||||
return clusterResource.equals(other.clusterResource) && |
||||
brokerId == other.brokerId && |
||||
endpoints.equals(other.endpoints) && |
||||
interbrokerEndpoint.equals(other.interbrokerEndpoint) && |
||||
earlyStartListeners.equals(other.earlyStartListeners); |
||||
} |
||||
|
||||
@Override |
||||
public int hashCode() { |
||||
return Objects.hash(clusterResource, |
||||
brokerId, |
||||
endpoints, |
||||
interbrokerEndpoint, |
||||
earlyStartListeners); |
||||
} |
||||
|
||||
@Override |
||||
public String toString() { |
||||
return "KafkaAuthorizerServerInfo(" + |
||||
"clusterResource=" + clusterResource + |
||||
", brokerId=" + brokerId + |
||||
", endpoints=" + endpoints + |
||||
", earlyStartListeners=" + earlyStartListeners + |
||||
")"; |
||||
} |
||||
} |
@ -0,0 +1,50 @@
@@ -0,0 +1,50 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.kafka.server.common; |
||||
|
||||
import org.junit.jupiter.api.Test; |
||||
|
||||
import java.util.Collections; |
||||
|
||||
import static org.apache.kafka.server.common.MetadataVersion.FEATURE_NAME; |
||||
import static org.apache.kafka.server.common.MetadataVersion.MINIMUM_KRAFT_VERSION; |
||||
import static org.junit.jupiter.api.Assertions.assertEquals; |
||||
import static org.junit.jupiter.api.Assertions.assertNull; |
||||
|
||||
class FeaturesTest { |
||||
@Test |
||||
public void testKRaftModeFeatures() { |
||||
Features features = new Features(MINIMUM_KRAFT_VERSION, |
||||
Collections.singletonMap("foo", (short) 2), 123, true); |
||||
assertEquals(MINIMUM_KRAFT_VERSION.featureLevel(), |
||||
features.finalizedFeatures().get(FEATURE_NAME)); |
||||
assertEquals((short) 2, |
||||
features.finalizedFeatures().get("foo")); |
||||
assertEquals(2, features.finalizedFeatures().size()); |
||||
} |
||||
|
||||
@Test |
||||
public void testZkModeFeatures() { |
||||
Features features = new Features(MINIMUM_KRAFT_VERSION, |
||||
Collections.singletonMap("foo", (short) 2), 123, false); |
||||
assertNull(features.finalizedFeatures().get(FEATURE_NAME)); |
||||
assertEquals((short) 2, |
||||
features.finalizedFeatures().get("foo")); |
||||
assertEquals(1, features.finalizedFeatures().size()); |
||||
} |
||||
} |
@ -0,0 +1,169 @@
@@ -0,0 +1,169 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.kafka.server.network; |
||||
|
||||
import java.util.Arrays; |
||||
import java.util.HashMap; |
||||
import java.util.HashSet; |
||||
import java.util.Map; |
||||
import java.util.Optional; |
||||
import java.util.concurrent.CompletableFuture; |
||||
import java.util.concurrent.CompletionException; |
||||
|
||||
import org.apache.kafka.common.ClusterResource; |
||||
import org.apache.kafka.common.Endpoint; |
||||
import org.apache.kafka.common.security.auth.SecurityProtocol; |
||||
import org.junit.jupiter.api.Test; |
||||
import static org.junit.jupiter.api.Assertions.assertEquals; |
||||
import static org.junit.jupiter.api.Assertions.assertFalse; |
||||
import static org.junit.jupiter.api.Assertions.assertNotNull; |
||||
import static org.junit.jupiter.api.Assertions.assertThrows; |
||||
import static org.junit.jupiter.api.Assertions.assertTrue; |
||||
|
||||
final public class EndpointReadyFuturesTest { |
||||
private static final Endpoint EXTERNAL = |
||||
new Endpoint("EXTERNAL", SecurityProtocol.SSL, "127.0.0.1", 9092); |
||||
|
||||
private static final Endpoint INTERNAL = |
||||
new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "127.0.0.1", 9093); |
||||
|
||||
private static final KafkaAuthorizerServerInfo INFO = new KafkaAuthorizerServerInfo( |
||||
new ClusterResource("S6-01LPiQOCBhhFIunQUcQ"), |
||||
1, |
||||
Arrays.asList(EXTERNAL, INTERNAL), |
||||
INTERNAL, |
||||
Arrays.asList("INTERNAL")); |
||||
|
||||
static void assertComplete( |
||||
EndpointReadyFutures readyFutures, |
||||
Endpoint... endpoints |
||||
) { |
||||
for (Endpoint endpoint : endpoints) { |
||||
String name = endpoint.listenerName().get(); |
||||
CompletableFuture<Void> future = readyFutures.futures().get(endpoint); |
||||
assertNotNull(future, "Unable to find future for " + name); |
||||
assertTrue(future.isDone(), "Future for " + name + " is not done."); |
||||
assertFalse(future.isCompletedExceptionally(), |
||||
"Future for " + name + " is completed exceptionally."); |
||||
} |
||||
} |
||||
|
||||
static void assertIncomplete( |
||||
EndpointReadyFutures readyFutures, |
||||
Endpoint... endpoints |
||||
) { |
||||
for (Endpoint endpoint : endpoints) { |
||||
CompletableFuture<Void> future = readyFutures.futures().get(endpoint); |
||||
assertNotNull(future, "Unable to find future for " + endpoint); |
||||
assertFalse(future.isDone(), "Future for " + endpoint + " is done."); |
||||
} |
||||
} |
||||
|
||||
static void assertException( |
||||
EndpointReadyFutures readyFutures, |
||||
Throwable throwable, |
||||
Endpoint... endpoints |
||||
) { |
||||
for (Endpoint endpoint : endpoints) { |
||||
CompletableFuture<Void> future = readyFutures.futures().get(endpoint); |
||||
assertNotNull(future, "Unable to find future for " + endpoint); |
||||
assertTrue(future.isCompletedExceptionally(), |
||||
"Future for " + endpoint + " is not completed exceptionally."); |
||||
Throwable cause = assertThrows(CompletionException.class, |
||||
() -> future.getNow(null)).getCause(); |
||||
assertNotNull(cause, "Unable to find CompletionException cause for " + endpoint); |
||||
assertEquals(throwable.getClass(), cause.getClass()); |
||||
assertEquals(throwable.getMessage(), cause.getMessage()); |
||||
} |
||||
} |
||||
|
||||
@Test |
||||
public void testImmediateCompletion() { |
||||
EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). |
||||
build(Optional.empty(), INFO); |
||||
assertEquals(new HashSet<>(Arrays.asList(EXTERNAL, INTERNAL)), |
||||
readyFutures.futures().keySet()); |
||||
assertComplete(readyFutures, EXTERNAL, INTERNAL); |
||||
} |
||||
|
||||
@Test |
||||
public void testAddReadinessFuture() { |
||||
CompletableFuture<Void> foo = new CompletableFuture<>(); |
||||
EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). |
||||
addReadinessFuture("foo", foo). |
||||
build(Optional.empty(), INFO); |
||||
assertEquals(new HashSet<>(Arrays.asList(EXTERNAL, INTERNAL)), |
||||
readyFutures.futures().keySet()); |
||||
assertIncomplete(readyFutures, EXTERNAL, INTERNAL); |
||||
foo.complete(null); |
||||
assertComplete(readyFutures, EXTERNAL, INTERNAL); |
||||
} |
||||
|
||||
@Test |
||||
public void testAddMultipleReadinessFutures() { |
||||
CompletableFuture<Void> foo = new CompletableFuture<>(); |
||||
CompletableFuture<Void> bar = new CompletableFuture<>(); |
||||
EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). |
||||
addReadinessFuture("foo", foo). |
||||
addReadinessFuture("bar", bar). |
||||
build(Optional.empty(), INFO); |
||||
assertEquals(new HashSet<>(Arrays.asList(EXTERNAL, INTERNAL)), |
||||
readyFutures.futures().keySet()); |
||||
assertIncomplete(readyFutures, EXTERNAL, INTERNAL); |
||||
foo.complete(null); |
||||
assertIncomplete(readyFutures, EXTERNAL, INTERNAL); |
||||
bar.complete(null); |
||||
assertComplete(readyFutures, EXTERNAL, INTERNAL); |
||||
} |
||||
|
||||
@Test |
||||
public void testAddReadinessFutures() { |
||||
Map<Endpoint, CompletableFuture<Void>> bazFutures = new HashMap<>(); |
||||
bazFutures.put(EXTERNAL, new CompletableFuture<>()); |
||||
bazFutures.put(INTERNAL, new CompletableFuture<>()); |
||||
EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). |
||||
addReadinessFutures("baz", bazFutures). |
||||
build(Optional.empty(), INFO); |
||||
assertEquals(new HashSet<>(Arrays.asList(EXTERNAL, INTERNAL)), |
||||
readyFutures.futures().keySet()); |
||||
assertIncomplete(readyFutures, EXTERNAL, INTERNAL); |
||||
bazFutures.get(EXTERNAL).complete(null); |
||||
assertComplete(readyFutures, EXTERNAL); |
||||
assertIncomplete(readyFutures, INTERNAL); |
||||
bazFutures.get(INTERNAL).complete(null); |
||||
assertComplete(readyFutures, EXTERNAL, INTERNAL); |
||||
} |
||||
|
||||
@Test |
||||
public void testFailedReadinessFuture() { |
||||
CompletableFuture<Void> foo = new CompletableFuture<>(); |
||||
CompletableFuture<Void> bar = new CompletableFuture<>(); |
||||
EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). |
||||
addReadinessFuture("foo", foo). |
||||
addReadinessFuture("bar", bar). |
||||
build(Optional.empty(), INFO); |
||||
assertEquals(new HashSet<>(Arrays.asList(EXTERNAL, INTERNAL)), |
||||
readyFutures.futures().keySet()); |
||||
assertIncomplete(readyFutures, EXTERNAL, INTERNAL); |
||||
foo.complete(null); |
||||
assertIncomplete(readyFutures, EXTERNAL, INTERNAL); |
||||
bar.completeExceptionally(new RuntimeException("Failed.")); |
||||
assertException(readyFutures, new RuntimeException("Failed."), |
||||
EXTERNAL, INTERNAL); |
||||
} |
||||
} |
Loading…
Reference in new issue