Browse Source

MINOR: Fix typos in code comments

This patch fixes all occurances of two consecutive 'the's in the code comments.

Author: Ishita Mandhan (imandhaus.ibm.com)

Author: Ishita Mandhan <imandha@us.ibm.com>

Reviewers: Guozhang Wang <wangguoz@gmail.com>

Closes #1240 from imandhan/typofixes
pull/1240/merge
Ishita Mandhan 9 years ago committed by Guozhang Wang
parent
commit
0bf61039c8
  1. 2
      clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
  2. 2
      clients/src/main/java/org/apache/kafka/common/network/LoginType.java
  3. 2
      clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java
  4. 2
      connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java
  5. 2
      core/src/main/scala/kafka/admin/TopicCommand.scala
  6. 2
      core/src/main/scala/kafka/utils/ZkUtils.scala
  7. 2
      core/src/test/scala/unit/kafka/log/LogTest.scala
  8. 2
      core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala
  9. 2
      docs/streams.html

2
clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java

@ -467,7 +467,7 @@ public final class RecordAccumulator { @@ -467,7 +467,7 @@ public final class RecordAccumulator {
abortBatches();
} while (appendsInProgress());
// After this point, no thread will append any messages because they will see the close
// flag set. We need to do the last abort after no thread was appending in case the there was a new
// flag set. We need to do the last abort after no thread was appending in case there was a new
// batch appended by the last appending thread.
abortBatches();
this.batches.clear();

2
clients/src/main/java/org/apache/kafka/common/network/LoginType.java

@ -20,7 +20,7 @@ import org.apache.kafka.common.security.JaasUtils; @@ -20,7 +20,7 @@ import org.apache.kafka.common.security.JaasUtils;
/**
* The type of the login context, it should be SERVER for the broker and CLIENT for the clients (i.e. consumer and
* producer). It provides the the login context name which defines the section of the JAAS configuration file to be used
* producer). It provides the login context name which defines the section of the JAAS configuration file to be used
* for login.
*/
public enum LoginType {

2
clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java

@ -478,7 +478,7 @@ public class Protocol { @@ -478,7 +478,7 @@ public class Protocol {
"The current state of the group (one of: Dead, Stable, AwaitingSync, or PreparingRebalance, or empty if there is no active group)"),
new Field("protocol_type",
STRING,
"The current group protocol type (will be empty if the there is no active group)"),
"The current group protocol type (will be empty if there is no active group)"),
new Field("protocol",
STRING,
"The current group protocol (only provided if the group is Stable)"),

2
connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java

@ -299,7 +299,7 @@ public class ConnectSchema implements Schema { @@ -299,7 +299,7 @@ public class ConnectSchema implements Schema {
/**
* Get the {@link Schema.Type} associated with the the given class.
* Get the {@link Schema.Type} associated with the given class.
*
* @param klass the Class to
* @return the corresponding type, nor null if there is no matching type

2
core/src/main/scala/kafka/admin/TopicCommand.scala

@ -379,7 +379,7 @@ object TopicCommand extends Logging { @@ -379,7 +379,7 @@ object TopicCommand extends Logging {
def shortMessageSizeWarning(maxMessageBytes: Int): String = {
"\n\n" +
"*****************************************************************************************************\n" +
"*** WARNING: you are creating a topic where the the max.message.bytes is greater than the consumer ***\n" +
"*** WARNING: you are creating a topic where the max.message.bytes is greater than the consumer ***\n" +
"*** default. This operation is potentially dangerous. Consumers will get failures if their ***\n" +
"*** fetch.message.max.bytes < the value you are using. ***\n" +
"*****************************************************************************************************\n" +

2
core/src/main/scala/kafka/utils/ZkUtils.scala

@ -668,7 +668,7 @@ class ZkUtils(val zkClient: ZkClient, @@ -668,7 +668,7 @@ class ZkUtils(val zkClient: ZkClient,
}
}
// Parses without deduplicating keys so the the data can be checked before allowing reassignment to proceed
// Parses without deduplicating keys so the data can be checked before allowing reassignment to proceed
def parsePartitionReassignmentDataWithoutDedup(jsonData: String): Seq[(TopicAndPartition, Seq[Int])] = {
Json.parseFull(jsonData) match {
case Some(m) =>

2
core/src/test/scala/unit/kafka/log/LogTest.scala

@ -228,7 +228,7 @@ class LogTest extends JUnitSuite { @@ -228,7 +228,7 @@ class LogTest extends JUnitSuite {
/**
* Test reading at the boundary of the log, specifically
* - reading from the logEndOffset should give an empty message set
* - reading from the the maxOffset should give an empty message set
* - reading from the maxOffset should give an empty message set
* - reading beyond the log end offset should throw an OffsetOutOfRangeException
*/
@Test

2
core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala

@ -104,7 +104,7 @@ class ClientQuotaManagerTest { @@ -104,7 +104,7 @@ class ClientQuotaManagerTest {
assertEquals(10, numCallbacks)
time.sleep(sleepTime)
// Callback can only be triggered after the the delay time passes
// Callback can only be triggered after the delay time passes
clientMetrics.throttledRequestReaper.doWork()
assertEquals(0, queueSizeMetric.value().toInt)
assertEquals(11, numCallbacks)

2
docs/streams.html

@ -64,7 +64,7 @@ developers define and connect custom processors as well as to interact with <a h @@ -64,7 +64,7 @@ developers define and connect custom processors as well as to interact with <a h
<h5><a id="streams_time" href="#streams_time">Time</a></h5>
<p>
A critical aspect in stream processing is the the notion of <b>time</b>, and how it is modeled and integrated.
A critical aspect in stream processing is the notion of <b>time</b>, and how it is modeled and integrated.
For example, some operations such as <b>windowing</b> are defined based on time boundaries.
</p>
<p>

Loading…
Cancel
Save