Browse Source

MINOR: A few cleanups and compiler warning fixes (#6986)

Reviewers: Jason Gustafson <jason@confluent.io>
pull/7034/head
Lee Dongjin 5 years ago committed by Jason Gustafson
parent
commit
05cba28ca7
  1. 10
      clients/src/main/java/org/apache/kafka/common/utils/Utils.java
  2. 7
      clients/src/test/java/org/apache/kafka/common/record/ByteBufferLogInputStreamTest.java
  3. 12
      core/src/test/scala/unit/kafka/log/LogValidatorTest.scala
  4. 2
      core/src/test/scala/unit/kafka/log/TransactionIndexTest.scala
  5. 2
      core/src/test/scala/unit/kafka/security/auth/AclTest.scala

10
clients/src/main/java/org/apache/kafka/common/utils/Utils.java

@ -602,15 +602,6 @@ public final class Utils { @@ -602,15 +602,6 @@ public final class Utils {
return sw.toString();
}
/**
* Print an error message and shutdown the JVM
* @param message The error message
*/
public static void croak(String message) {
System.err.println(message);
Exit.exit(1);
}
/**
* Read a buffer into a Byte array for the given offset and length
*/
@ -869,7 +860,6 @@ public final class Utils { @@ -869,7 +860,6 @@ public final class Utils {
}
}
/**
* A cheap way to deterministically convert a number to a positive value. When the input is
* positive, the original value is returned. When the input number is negative, the returned

7
clients/src/test/java/org/apache/kafka/common/record/ByteBufferLogInputStreamTest.java

@ -19,7 +19,6 @@ package org.apache.kafka.common.record; @@ -19,7 +19,6 @@ package org.apache.kafka.common.record;
import org.apache.kafka.common.errors.CorruptRecordException;
import org.junit.Test;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Iterator;
@ -56,7 +55,7 @@ public class ByteBufferLogInputStreamTest { @@ -56,7 +55,7 @@ public class ByteBufferLogInputStreamTest {
}
@Test(expected = CorruptRecordException.class)
public void iteratorRaisesOnTooSmallRecords() throws IOException {
public void iteratorRaisesOnTooSmallRecords() {
ByteBuffer buffer = ByteBuffer.allocate(1024);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
builder.append(15L, "a".getBytes(), "1".getBytes());
@ -79,7 +78,7 @@ public class ByteBufferLogInputStreamTest { @@ -79,7 +78,7 @@ public class ByteBufferLogInputStreamTest {
}
@Test(expected = CorruptRecordException.class)
public void iteratorRaisesOnInvalidMagic() throws IOException {
public void iteratorRaisesOnInvalidMagic() {
ByteBuffer buffer = ByteBuffer.allocate(1024);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
builder.append(15L, "a".getBytes(), "1".getBytes());
@ -102,7 +101,7 @@ public class ByteBufferLogInputStreamTest { @@ -102,7 +101,7 @@ public class ByteBufferLogInputStreamTest {
}
@Test(expected = CorruptRecordException.class)
public void iteratorRaisesOnTooLargeRecords() throws IOException {
public void iteratorRaisesOnTooLargeRecords() {
ByteBuffer buffer = ByteBuffer.allocate(1024);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
builder.append(15L, "a".getBytes(), "1".getBytes());

12
core/src/test/scala/unit/kafka/log/LogValidatorTest.scala

@ -1178,7 +1178,7 @@ class LogValidatorTest { @@ -1178,7 +1178,7 @@ class LogValidatorTest {
}
private def createTwoBatchedRecords(magicValue: Byte,
timestamp: Long = RecordBatch.NO_TIMESTAMP,
timestamp: Long,
codec: CompressionType): MemoryRecords = {
val buf = ByteBuffer.allocate(2048)
var builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, 0L)
@ -1193,16 +1193,6 @@ class LogValidatorTest { @@ -1193,16 +1193,6 @@ class LogValidatorTest {
MemoryRecords.readableRecords(buf.slice())
}
private def createDiscontinuousOffsetRecords(magicValue: Byte,
codec: CompressionType): MemoryRecords = {
val buf = ByteBuffer.allocate(512)
val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, 0L)
builder.appendWithOffset(0, RecordBatch.NO_TIMESTAMP, null, "hello".getBytes)
builder.appendWithOffset(2, RecordBatch.NO_TIMESTAMP, null, "there".getBytes)
builder.appendWithOffset(3, RecordBatch.NO_TIMESTAMP, null, "beautiful".getBytes)
builder.build()
}
/* check that offsets are assigned consecutively from the given base offset */
def checkOffsets(records: MemoryRecords, baseOffset: Long) {
assertTrue("Message set should not be empty", records.records.asScala.nonEmpty)

2
core/src/test/scala/unit/kafka/log/TransactionIndexTest.scala

@ -22,7 +22,7 @@ import kafka.utils.TestUtils @@ -22,7 +22,7 @@ import kafka.utils.TestUtils
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
import org.junit.Assert._
import org.junit.{After, Before, Test}
import org.scalatest.junit.JUnitSuite
import org.scalatestplus.junit.JUnitSuite
class TransactionIndexTest extends JUnitSuite {
var file: File = _

2
core/src/test/scala/unit/kafka/security/auth/AclTest.scala

@ -21,7 +21,7 @@ import java.nio.charset.StandardCharsets.UTF_8 @@ -21,7 +21,7 @@ import java.nio.charset.StandardCharsets.UTF_8
import kafka.utils.Json
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.junit.{Assert, Test}
import org.scalatest.junit.JUnitSuite
import org.scalatestplus.junit.JUnitSuite
import scala.collection.JavaConverters._
class AclTest extends JUnitSuite {

Loading…
Cancel
Save