From 86062e9a78dccad74e012f11755025512ad5cf63 Mon Sep 17 00:00:00 2001
From: Adem Efe Gencer
Date: Sun, 5 Nov 2017 18:00:43 -0800
Subject: [PATCH] KAFKA-6157; Fix repeated words words in JavaDoc and comments.
Author: Adem Efe Gencer
Reviewers: Jiangjie Qin
Closes #4170 from efeg/bug/typoFix
---
.../org/apache/kafka/clients/InFlightRequests.java | 4 ++--
.../java/org/apache/kafka/clients/NetworkClient.java | 2 +-
.../apache/kafka/clients/consumer/KafkaConsumer.java | 8 ++++----
.../apache/kafka/clients/producer/KafkaProducer.java | 2 +-
.../kafka/common/record/MemoryRecordsBuilder.java | 2 +-
.../authenticator/SaslClientAuthenticator.java | 2 +-
.../runtime/distributed/DistributedHerder.java | 2 +-
.../org/apache/kafka/connect/util/KafkaBasedLog.java | 2 +-
.../connect/storage/KafkaConfigBackingStoreTest.java | 4 ++--
core/src/main/scala/kafka/cluster/Partition.scala | 2 +-
.../kafka/coordinator/group/GroupMetadata.scala | 3 ++-
.../transaction/TransactionStateManager.scala | 2 +-
.../scala/kafka/server/DelayedDeleteRecords.scala | 2 +-
core/src/main/scala/kafka/tools/JmxTool.scala | 2 +-
.../integration/kafka/api/ProducerBounceTest.scala | 2 +-
.../unit/kafka/admin/DeleteConsumerGroupTest.scala | 2 +-
.../test/scala/unit/kafka/log/LogManagerTest.scala | 2 +-
.../scala/unit/kafka/network/SocketServerTest.scala | 2 +-
core/src/test/scala/unit/kafka/utils/TestUtils.scala | 2 +-
docs/connect.html | 2 +-
docs/documentation/streams/architecture.html | 2 +-
docs/documentation/streams/core-concepts.html | 2 +-
docs/documentation/streams/developer-guide.html | 2 +-
docs/documentation/streams/index.html | 2 +-
docs/documentation/streams/quickstart.html | 2 +-
docs/documentation/streams/tutorial.html | 2 +-
docs/documentation/streams/upgrade-guide.html | 2 +-
docs/implementation.html | 2 +-
.../org/apache/kafka/streams/StreamsBuilder.java | 10 +++++-----
.../apache/kafka/streams/kstream/KStreamBuilder.java | 12 ++++++------
.../streams/kstream/internals/KStreamAggregate.java | 2 +-
.../streams/kstream/internals/KStreamReduce.java | 2 +-
.../kstream/internals/KStreamWindowAggregate.java | 2 +-
.../kstream/internals/KStreamWindowReduce.java | 2 +-
.../streams/kstream/internals/KTableAggregate.java | 2 +-
.../streams/kstream/internals/KTableReduce.java | 2 +-
.../streams/processor/internals/StreamTask.java | 2 +-
tests/setup.cfg | 2 +-
tests/unit/setup.cfg | 2 +-
.../org/apache/kafka/tools/ProducerPerformance.java | 2 +-
40 files changed, 55 insertions(+), 54 deletions(-)
diff --git a/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java b/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
index f9773297dbb..3689a09a117 100644
--- a/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
+++ b/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
@@ -60,7 +60,7 @@ final class InFlightRequests {
}
/**
- * Get the oldest request (the one that that will be completed next) for the given node
+ * Get the oldest request (the one that will be completed next) for the given node
*/
public NetworkClient.InFlightRequest completeNext(String node) {
return requestQueue(node).pollLast();
@@ -167,5 +167,5 @@ final class InFlightRequests {
}
return nodeIds;
}
-
+
}
diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
index ee7258ad2f9..0654a91c8b2 100644
--- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
+++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
@@ -369,7 +369,7 @@ public class NetworkClient implements KafkaClient {
if (!isInternalRequest) {
// If this request came from outside the NetworkClient, validate
// that we can send data. If the request is internal, we trust
- // that that internal code has done this validation. Validation
+ // that internal code has done this validation. Validation
// will be slightly different for some internal requests (for
// example, ApiVersionsRequests can be sent prior to being in
// READY state.)
diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index 05bca2234c6..e9499cbd465 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -423,7 +423,7 @@ import java.util.regex.Pattern;
*
* Transactions were introduced in Kafka 0.11.0 wherein applications can write to multiple topics and partitions atomically.
* In order for this to work, consumers reading from these partitions should be configured to only read committed data.
- * This can be achieved by by setting the {@code isolation.level=read_committed} in the consumer's configuration.
+ * This can be achieved by setting the {@code isolation.level=read_committed} in the consumer's configuration.
*
*
* In read_committed
mode, the consumer will read only those transactional messages which have been
@@ -704,9 +704,9 @@ public class KafkaConsumer implements Consumer {
IsolationLevel isolationLevel = IsolationLevel.valueOf(
config.getString(ConsumerConfig.ISOLATION_LEVEL_CONFIG).toUpperCase(Locale.ROOT));
Sensor throttleTimeSensor = Fetcher.throttleTimeSensor(metrics, metricsRegistry.fetcherMetrics);
-
- int heartbeatIntervalMs = config.getInt(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG);
-
+
+ int heartbeatIntervalMs = config.getInt(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG);
+
NetworkClient netClient = new NetworkClient(
new Selector(config.getLong(ConsumerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder, logContext),
this.metadata,
diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
index 8004180b647..b3cff19114e 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
@@ -951,7 +951,7 @@ public class KafkaProducer implements Producer {
*
*
* Applications don't need to call this method for transactional producers, since the {@link #commitTransaction()} will
- * flush all buffered records before performing the commit. This ensures that all the the {@link #send(ProducerRecord)}
+ * flush all buffered records before performing the commit. This ensures that all the {@link #send(ProducerRecord)}
* calls made since the previous {@link #beginTransaction()} are completed before the commit.
*
*
diff --git a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
index ad0bab74d39..a9b57ac22df 100644
--- a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
+++ b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
@@ -384,7 +384,7 @@ public class MemoryRecordsBuilder {
}
/**
- * Append a record and return its checksum for message format v0 and v1, or null for for v2 and above.
+ * Append a record and return its checksum for message format v0 and v1, or null for v2 and above.
*/
private Long appendWithOffset(long offset, boolean isControlRecord, long timestamp, ByteBuffer key,
ByteBuffer value, Header[] headers) {
diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java
index b01ae4cd32c..8b0116563d8 100644
--- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java
+++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java
@@ -186,7 +186,7 @@ public class SaslClientAuthenticator implements Authenticator {
if (authenticateVersion != null)
saslAuthenticateVersion((short) Math.min(authenticateVersion.maxVersion, ApiKeys.SASL_AUTHENTICATE.latestVersion()));
setSaslState(SaslState.SEND_HANDSHAKE_REQUEST);
- // Fall through to send send handshake request with the latest supported version
+ // Fall through to send handshake request with the latest supported version
}
case SEND_HANDSHAKE_REQUEST:
SaslHandshakeRequest handshakeRequest = createSaslHandshakeRequest(saslHandshakeVersion);
diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
index 4d3d07b1023..79d32da65c6 100644
--- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
+++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
@@ -1213,7 +1213,7 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
public void onRevoked(String leader, Collection connectors, Collection tasks) {
log.info("Rebalance started");
- // Note that since we don't reset the assignment, we we don't revoke leadership here. During a rebalance,
+ // Note that since we don't reset the assignment, we don't revoke leadership here. During a rebalance,
// it is still important to have a leader that can write configs, offsets, etc.
if (rebalanceResolved) {
diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java
index 0e190bc3767..de1ceb3be10 100644
--- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java
+++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java
@@ -316,7 +316,7 @@ public class KafkaBasedLog {
synchronized (KafkaBasedLog.this) {
// Only invoke exactly the number of callbacks we found before triggering the read to log end
- // since it is possible for another write + readToEnd to sneak in in the meantime
+ // since it is possible for another write + readToEnd to sneak in the meantime
for (int i = 0; i < numCallbacks; i++) {
Callback cb = readLogEndOffsetCallbacks.poll();
cb.onCompletion(null, null);
diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java
index e9dd18e1377..aac1b78c918 100644
--- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java
+++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java
@@ -282,7 +282,7 @@ public class KafkaConfigBackingStoreTest {
assertNull(configState.taskConfig(TASK_IDS.get(0)));
assertNull(configState.taskConfig(TASK_IDS.get(1)));
- // Writing task task configs should block until all the writes have been performed and the root record update
+ // Writing task configs should block until all the writes have been performed and the root record update
// has completed
List
- We use the the same varint encoding as Protobuf. More information on the latter can be found here. The count of headers in a record
+
We use the same varint encoding as Protobuf. More information on the latter can be found here. The count of headers in a record
is also encoded as a varint.
diff --git a/streams/src/main/java/org/apache/kafka/streams/StreamsBuilder.java b/streams/src/main/java/org/apache/kafka/streams/StreamsBuilder.java
index b5cc6d79c6c..0aac45a20bb 100644
--- a/streams/src/main/java/org/apache/kafka/streams/StreamsBuilder.java
+++ b/streams/src/main/java/org/apache/kafka/streams/StreamsBuilder.java
@@ -59,7 +59,7 @@ public class StreamsBuilder {
final InternalTopologyBuilder internalTopologyBuilder = topology.internalTopologyBuilder;
private final InternalStreamsBuilder internalStreamsBuilder = new InternalStreamsBuilder(internalTopologyBuilder);
-
+
/**
* Create a {@link KStream} from the specified topics.
* The default {@code "auto.offset.reset"} strategy, default {@link TimestampExtractor}, and default key and value
@@ -237,7 +237,7 @@ public class StreamsBuilder {
* If this is not the case the returned {@link KTable} will be corrupted.
*
* The resulting {@link KTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
*
@@ -258,7 +258,7 @@ public class StreamsBuilder {
* If this is not the case the returned {@link KTable} will be corrupted.
*
* The resulting {@link KTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
*
@@ -312,7 +312,7 @@ public class StreamsBuilder {
* Input {@link KeyValue records} with {@code null} key will be dropped.
*
* The resulting {@link GlobalKTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
*
@@ -343,7 +343,7 @@ public class StreamsBuilder {
* Input {@link KeyValue records} with {@code null} key will be dropped.
*
* The resulting {@link GlobalKTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
*
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/KStreamBuilder.java b/streams/src/main/java/org/apache/kafka/streams/kstream/KStreamBuilder.java
index 77745d3113c..d747ce8d049 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/KStreamBuilder.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/KStreamBuilder.java
@@ -444,7 +444,7 @@ public class KStreamBuilder extends org.apache.kafka.streams.processor.TopologyB
* If this is not the case the returned {@link KTable} will be corrupted.
*
* The resulting {@link KTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
* @param topic the topic name; cannot be {@code null}
@@ -537,7 +537,7 @@ public class KStreamBuilder extends org.apache.kafka.streams.processor.TopologyB
* If this is not the case the returned {@link KTable} will be corrupted.
*
* The resulting {@link KTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
*
@@ -714,7 +714,7 @@ public class KStreamBuilder extends org.apache.kafka.streams.processor.TopologyB
* If this is not the case the returned {@link KTable} will be corrupted.
*
* The resulting {@link KTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
*
@@ -908,7 +908,7 @@ public class KStreamBuilder extends org.apache.kafka.streams.processor.TopologyB
* If this is not the case the returned {@link KTable} will be corrupted.
*
* The resulting {@link KTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
*
@@ -1007,7 +1007,7 @@ public class KStreamBuilder extends org.apache.kafka.streams.processor.TopologyB
* Input {@link KeyValue records} with {@code null} key will be dropped.
*
* The resulting {@link GlobalKTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
*
@@ -1196,7 +1196,7 @@ public class KStreamBuilder extends org.apache.kafka.streams.processor.TopologyB
* Input {@link KeyValue records} with {@code null} key will be dropped.
*
* The resulting {@link GlobalKTable} will be materialized in a local {@link KeyValueStore} with an internal
- * store name. Note that that store name may not be queriable through Interactive Queries.
+ * store name. Note that store name may not be queriable through Interactive Queries.
* No internal changelog topic is created since the original input topic can be used for recovery (cf.
* methods of {@link KGroupedStream} and {@link KGroupedTable} that return a {@link KTable}).
*
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamAggregate.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamAggregate.java
index 67b65a38701..b1abdc29de0 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamAggregate.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamAggregate.java
@@ -74,7 +74,7 @@ public class KStreamAggregate implements KStreamAggProcessorSupplier implements KStreamAggProcessorSupplier implements KStrea
if (oldAgg == null)
oldAgg = initializer.apply();
- // try to add the new new value (there will never be old value)
+ // try to add the new value (there will never be old value)
T newAgg = aggregator.apply(key, value, oldAgg);
// update the store with the new value
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowReduce.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowReduce.java
index c20601af0de..7d02f118f89 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowReduce.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowReduce.java
@@ -98,7 +98,7 @@ public class KStreamWindowReduce implements KStreamAggPr
V oldAgg = entry.value;
V newAgg = oldAgg;
- // try to add the new new value (there will never be old value)
+ // try to add the new value (there will never be old value)
if (newAgg == null) {
newAgg = value;
} else {
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableAggregate.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableAggregate.java
index 973de0f7379..0fe3e1f42b1 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableAggregate.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableAggregate.java
@@ -84,7 +84,7 @@ public class KTableAggregate implements KTableProcessorSupplier implements KTableProcessorSupplier {
V oldAgg = store.get(key);
V newAgg = oldAgg;
- // first try to add the new new value
+ // first try to add the new value
if (value.newValue != null) {
if (newAgg == null) {
newAgg = value.newValue;
diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java
index 8180b2cf7b1..06f45ed24f4 100644
--- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java
@@ -469,7 +469,7 @@ public class StreamTask extends AbstractTask implements ProcessorNodePunctuator
transactionInFlight = false;
} catch (final ProducerFencedException ignore) {
/* TODO
- * this should actually never happen atm as we we guard the call to #abortTransaction
+ * this should actually never happen atm as we guard the call to #abortTransaction
* -> the reason for the guard is a "bug" in the Producer -- it throws IllegalStateException
* instead of ProducerFencedException atm. We can remove the isZombie flag after KAFKA-5604 got
* fixed and fall-back to this catch-and-swallow code
diff --git a/tests/setup.cfg b/tests/setup.cfg
index c70f1e498bd..974d5bb9a97 100644
--- a/tests/setup.cfg
+++ b/tests/setup.cfg
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# pytest configuration (can also be defined in in tox.ini or pytest.ini file)
+# pytest configuration (can also be defined in tox.ini or pytest.ini file)
#
# This file defines naming convention and root search directory for autodiscovery of
# pytest unit tests for the system test service classes.
diff --git a/tests/unit/setup.cfg b/tests/unit/setup.cfg
index e757a99fb3b..3470da12185 100644
--- a/tests/unit/setup.cfg
+++ b/tests/unit/setup.cfg
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# pytest configuration (can also be defined in in tox.ini or pytest.ini file)
+# pytest configuration (can also be defined in tox.ini or pytest.ini file)
#
# To ease possible confusion, prefix muckrake *unit* tests with 'check' instead of 'test', since
# many muckrake files, classes, and methods have 'test' somewhere in the name
diff --git a/tools/src/main/java/org/apache/kafka/tools/ProducerPerformance.java b/tools/src/main/java/org/apache/kafka/tools/ProducerPerformance.java
index 0436d67080d..d7572b0f33d 100644
--- a/tools/src/main/java/org/apache/kafka/tools/ProducerPerformance.java
+++ b/tools/src/main/java/org/apache/kafka/tools/ProducerPerformance.java
@@ -289,7 +289,7 @@ public class ProducerPerformance {
.metavar("TRANSACTION-DURATION")
.dest("transactionDurationMs")
.setDefault(0L)
- .help("The max age of each transaction. The commitTransaction will be called after this this time has elapsed. Transactions are only enabled if this value is positive.");
+ .help("The max age of each transaction. The commitTransaction will be called after this time has elapsed. Transactions are only enabled if this value is positive.");
return parser;