Browse Source
Reviewers: Mickael Maison <mickael.maison@gmail.com>, Federico Valeri <fedevaleri@gmail.com>pull/14069/head
Nikolay
1 year ago
committed by
GitHub
21 changed files with 408 additions and 168 deletions
@ -1,137 +0,0 @@
@@ -1,137 +0,0 @@
|
||||
/** |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0 |
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package kafka.admin |
||||
|
||||
import java.io.PrintStream |
||||
import java.util.Properties |
||||
import kafka.common.AdminCommandFailedException |
||||
import kafka.utils.json.JsonValue |
||||
import kafka.utils.{CoreUtils, Json} |
||||
import org.apache.kafka.clients.admin.{Admin, RecordsToDelete} |
||||
import org.apache.kafka.clients.CommonClientConfigs |
||||
import org.apache.kafka.common.TopicPartition |
||||
import org.apache.kafka.common.utils.Utils |
||||
import org.apache.kafka.server.util.{CommandDefaultOptions, CommandLineUtils} |
||||
|
||||
import scala.jdk.CollectionConverters._ |
||||
import scala.collection.Seq |
||||
|
||||
/** |
||||
* A command for delete records of the given partitions down to the specified offset. |
||||
*/ |
||||
object DeleteRecordsCommand { |
||||
|
||||
private[admin] val EarliestVersion = 1 |
||||
|
||||
def main(args: Array[String]): Unit = { |
||||
execute(args, System.out) |
||||
} |
||||
|
||||
def parseOffsetJsonStringWithoutDedup(jsonData: String): Seq[(TopicPartition, Long)] = { |
||||
Json.parseFull(jsonData) match { |
||||
case Some(js) => |
||||
val version = js.asJsonObject.get("version") match { |
||||
case Some(jsonValue) => jsonValue.to[Int] |
||||
case None => EarliestVersion |
||||
} |
||||
parseJsonData(version, js) |
||||
case None => throw new AdminOperationException("The input string is not a valid JSON") |
||||
} |
||||
} |
||||
|
||||
def parseJsonData(version: Int, js: JsonValue): Seq[(TopicPartition, Long)] = { |
||||
version match { |
||||
case 1 => |
||||
js.asJsonObject.get("partitions") match { |
||||
case Some(partitions) => |
||||
partitions.asJsonArray.iterator.map(_.asJsonObject).map { partitionJs => |
||||
val topic = partitionJs("topic").to[String] |
||||
val partition = partitionJs("partition").to[Int] |
||||
val offset = partitionJs("offset").to[Long] |
||||
new TopicPartition(topic, partition) -> offset |
||||
}.toBuffer |
||||
case _ => throw new AdminOperationException("Missing partitions field"); |
||||
} |
||||
case _ => throw new AdminOperationException(s"Not supported version field value $version") |
||||
} |
||||
} |
||||
|
||||
def execute(args: Array[String], out: PrintStream): Unit = { |
||||
val opts = new DeleteRecordsCommandOptions(args) |
||||
val adminClient = createAdminClient(opts) |
||||
val offsetJsonFile = opts.options.valueOf(opts.offsetJsonFileOpt) |
||||
val offsetJsonString = Utils.readFileAsString(offsetJsonFile) |
||||
val offsetSeq = parseOffsetJsonStringWithoutDedup(offsetJsonString) |
||||
|
||||
val duplicatePartitions = CoreUtils.duplicates(offsetSeq.map { case (tp, _) => tp }) |
||||
if (duplicatePartitions.nonEmpty) |
||||
throw new AdminCommandFailedException("Offset json file contains duplicate topic partitions: %s".format(duplicatePartitions.mkString(","))) |
||||
|
||||
val recordsToDelete = offsetSeq.map { case (topicPartition, offset) => |
||||
(topicPartition, RecordsToDelete.beforeOffset(offset)) |
||||
}.toMap.asJava |
||||
|
||||
out.println("Executing records delete operation") |
||||
val deleteRecordsResult = adminClient.deleteRecords(recordsToDelete) |
||||
out.println("Records delete operation completed:") |
||||
|
||||
deleteRecordsResult.lowWatermarks.forEach { (tp, partitionResult) => |
||||
try out.println(s"partition: $tp\tlow_watermark: ${partitionResult.get.lowWatermark}") |
||||
catch { |
||||
case e: Exception => out.println(s"partition: $tp\terror: ${e.getMessage}") |
||||
} |
||||
} |
||||
|
||||
adminClient.close() |
||||
} |
||||
|
||||
private def createAdminClient(opts: DeleteRecordsCommandOptions): Admin = { |
||||
val props = if (opts.options.has(opts.commandConfigOpt)) |
||||
Utils.loadProps(opts.options.valueOf(opts.commandConfigOpt)) |
||||
else |
||||
new Properties() |
||||
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, opts.options.valueOf(opts.bootstrapServerOpt)) |
||||
Admin.create(props) |
||||
} |
||||
|
||||
class DeleteRecordsCommandOptions(args: Array[String]) extends CommandDefaultOptions(args) { |
||||
val BootstrapServerDoc = "REQUIRED: The server to connect to." |
||||
val offsetJsonFileDoc = "REQUIRED: The JSON file with offset per partition. The format to use is:\n" + |
||||
"{\"partitions\":\n [{\"topic\": \"foo\", \"partition\": 1, \"offset\": 1}],\n \"version\":1\n}" |
||||
val CommandConfigDoc = "A property file containing configs to be passed to Admin Client." |
||||
|
||||
val bootstrapServerOpt = parser.accepts("bootstrap-server", BootstrapServerDoc) |
||||
.withRequiredArg |
||||
.describedAs("server(s) to use for bootstrapping") |
||||
.ofType(classOf[String]) |
||||
val offsetJsonFileOpt = parser.accepts("offset-json-file", offsetJsonFileDoc) |
||||
.withRequiredArg |
||||
.describedAs("Offset json file path") |
||||
.ofType(classOf[String]) |
||||
val commandConfigOpt = parser.accepts("command-config", CommandConfigDoc) |
||||
.withRequiredArg |
||||
.describedAs("command config property file path") |
||||
.ofType(classOf[String]) |
||||
|
||||
options = parser.parse(args : _*) |
||||
|
||||
CommandLineUtils.maybePrintHelpOrVersion(this, "This tool helps to delete records of the given partitions down to the specified offset.") |
||||
|
||||
CommandLineUtils.checkRequiredArgs(parser, options, bootstrapServerOpt, offsetJsonFileOpt) |
||||
} |
||||
} |
@ -0,0 +1,183 @@
@@ -0,0 +1,183 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.tools; |
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException; |
||||
import com.fasterxml.jackson.databind.JsonMappingException; |
||||
import joptsimple.OptionSpec; |
||||
import org.apache.kafka.clients.CommonClientConfigs; |
||||
import org.apache.kafka.clients.admin.Admin; |
||||
import org.apache.kafka.clients.admin.DeleteRecordsResult; |
||||
import org.apache.kafka.clients.admin.RecordsToDelete; |
||||
import org.apache.kafka.common.TopicPartition; |
||||
import org.apache.kafka.common.utils.Utils; |
||||
import org.apache.kafka.server.common.AdminCommandFailedException; |
||||
import org.apache.kafka.server.common.AdminOperationException; |
||||
import org.apache.kafka.server.util.CommandDefaultOptions; |
||||
import org.apache.kafka.server.util.CommandLineUtils; |
||||
import org.apache.kafka.server.util.Json; |
||||
import org.apache.kafka.server.util.json.DecodeJson; |
||||
import org.apache.kafka.server.util.json.JsonObject; |
||||
import org.apache.kafka.server.util.json.JsonValue; |
||||
|
||||
import java.io.IOException; |
||||
import java.io.PrintStream; |
||||
import java.util.ArrayList; |
||||
import java.util.HashMap; |
||||
import java.util.Iterator; |
||||
import java.util.List; |
||||
import java.util.Map; |
||||
import java.util.Optional; |
||||
import java.util.Properties; |
||||
import java.util.Set; |
||||
import java.util.StringJoiner; |
||||
import java.util.concurrent.ExecutionException; |
||||
import java.util.stream.Collectors; |
||||
|
||||
/** |
||||
* A command for delete records of the given partitions down to the specified offset. |
||||
*/ |
||||
public class DeleteRecordsCommand { |
||||
private static final int EARLIEST_VERSION = 1; |
||||
|
||||
private static final DecodeJson.DecodeInteger INT = new DecodeJson.DecodeInteger(); |
||||
|
||||
private static final DecodeJson.DecodeLong LONG = new DecodeJson.DecodeLong(); |
||||
|
||||
private static final DecodeJson.DecodeString STRING = new DecodeJson.DecodeString(); |
||||
|
||||
public static void main(String[] args) throws Exception { |
||||
execute(args, System.out); |
||||
} |
||||
|
||||
static Map<TopicPartition, List<Long>> parseOffsetJsonStringWithoutDedup(String jsonData) throws JsonProcessingException { |
||||
JsonValue js = Json.parseFull(jsonData) |
||||
.orElseThrow(() -> new AdminOperationException("The input string is not a valid JSON")); |
||||
|
||||
Optional<JsonValue> version = js.asJsonObject().get("version"); |
||||
|
||||
return parseJsonData(version.isPresent() ? version.get().to(INT) : EARLIEST_VERSION, js); |
||||
} |
||||
|
||||
private static Map<TopicPartition, List<Long>> parseJsonData(int version, JsonValue js) throws JsonMappingException { |
||||
if (version == 1) { |
||||
JsonValue partitions = js.asJsonObject().get("partitions") |
||||
.orElseThrow(() -> new AdminOperationException("Missing partitions field")); |
||||
|
||||
Map<TopicPartition, List<Long>> res = new HashMap<>(); |
||||
|
||||
Iterator<JsonValue> iterator = partitions.asJsonArray().iterator(); |
||||
|
||||
while (iterator.hasNext()) { |
||||
JsonObject partitionJs = iterator.next().asJsonObject(); |
||||
|
||||
String topic = partitionJs.apply("topic").to(STRING); |
||||
int partition = partitionJs.apply("partition").to(INT); |
||||
long offset = partitionJs.apply("offset").to(LONG); |
||||
|
||||
res.computeIfAbsent(new TopicPartition(topic, partition), k -> new ArrayList<>()).add(offset); |
||||
} |
||||
|
||||
return res; |
||||
} |
||||
|
||||
throw new AdminOperationException("Not supported version field value " + version); |
||||
} |
||||
|
||||
public static void execute(String[] args, PrintStream out) throws IOException { |
||||
DeleteRecordsCommandOptions opts = new DeleteRecordsCommandOptions(args); |
||||
|
||||
try (Admin adminClient = createAdminClient(opts)) { |
||||
execute(adminClient, Utils.readFileAsString(opts.options.valueOf(opts.offsetJsonFileOpt)), out); |
||||
} |
||||
} |
||||
|
||||
static void execute(Admin adminClient, String offsetJsonString, PrintStream out) throws JsonProcessingException { |
||||
Map<TopicPartition, List<Long>> offsetSeq = parseOffsetJsonStringWithoutDedup(offsetJsonString); |
||||
|
||||
Set<TopicPartition> duplicatePartitions = offsetSeq.entrySet().stream() |
||||
.filter(e -> e.getValue().size() > 1) |
||||
.map(Map.Entry::getKey) |
||||
.collect(Collectors.toSet()); |
||||
|
||||
if (!duplicatePartitions.isEmpty()) { |
||||
StringJoiner duplicates = new StringJoiner(","); |
||||
duplicatePartitions.forEach(tp -> duplicates.add(tp.toString())); |
||||
throw new AdminCommandFailedException( |
||||
String.format("Offset json file contains duplicate topic partitions: %s", duplicates) |
||||
); |
||||
} |
||||
|
||||
Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>(); |
||||
|
||||
for (Map.Entry<TopicPartition, List<Long>> e : offsetSeq.entrySet()) |
||||
recordsToDelete.put(e.getKey(), RecordsToDelete.beforeOffset(e.getValue().get(0))); |
||||
|
||||
out.println("Executing records delete operation"); |
||||
DeleteRecordsResult deleteRecordsResult = adminClient.deleteRecords(recordsToDelete); |
||||
out.println("Records delete operation completed:"); |
||||
|
||||
deleteRecordsResult.lowWatermarks().forEach((tp, partitionResult) -> { |
||||
try { |
||||
out.printf("partition: %s\tlow_watermark: %s%n", tp, partitionResult.get().lowWatermark()); |
||||
} catch (InterruptedException | ExecutionException e) { |
||||
out.printf("partition: %s\terror: %s%n", tp, e.getMessage()); |
||||
} |
||||
}); |
||||
} |
||||
|
||||
private static Admin createAdminClient(DeleteRecordsCommandOptions opts) throws IOException { |
||||
Properties props = opts.options.has(opts.commandConfigOpt) |
||||
? Utils.loadProps(opts.options.valueOf(opts.commandConfigOpt)) |
||||
: new Properties(); |
||||
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, opts.options.valueOf(opts.bootstrapServerOpt)); |
||||
return Admin.create(props); |
||||
} |
||||
|
||||
private static class DeleteRecordsCommandOptions extends CommandDefaultOptions { |
||||
private final OptionSpec<String> bootstrapServerOpt; |
||||
private final OptionSpec<String> offsetJsonFileOpt; |
||||
private final OptionSpec<String> commandConfigOpt; |
||||
|
||||
public DeleteRecordsCommandOptions(String[] args) { |
||||
super(args); |
||||
|
||||
bootstrapServerOpt = parser.accepts("bootstrap-server", "REQUIRED: The server to connect to.") |
||||
.withRequiredArg() |
||||
.describedAs("server(s) to use for bootstrapping") |
||||
.ofType(String.class); |
||||
|
||||
offsetJsonFileOpt = parser.accepts("offset-json-file", "REQUIRED: The JSON file with offset per partition. " + |
||||
"The format to use is:\n" + |
||||
"{\"partitions\":\n [{\"topic\": \"foo\", \"partition\": 1, \"offset\": 1}],\n \"version\":1\n}") |
||||
.withRequiredArg() |
||||
.describedAs("Offset json file path") |
||||
.ofType(String.class); |
||||
|
||||
commandConfigOpt = parser.accepts("command-config", "A property file containing configs to be passed to Admin Client.") |
||||
.withRequiredArg() |
||||
.describedAs("command config property file path") |
||||
.ofType(String.class); |
||||
|
||||
options = parser.parse(args); |
||||
|
||||
CommandLineUtils.maybePrintHelpOrVersion(this, "This tool helps to delete records of the given partitions down to the specified offset."); |
||||
|
||||
CommandLineUtils.checkRequiredArgs(parser, options, bootstrapServerOpt, offsetJsonFileOpt); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,182 @@
@@ -0,0 +1,182 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.kafka.tools; |
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException; |
||||
import kafka.test.ClusterInstance; |
||||
import kafka.test.annotation.ClusterTest; |
||||
import kafka.test.annotation.ClusterTestDefaults; |
||||
import kafka.test.annotation.Type; |
||||
import kafka.test.junit.ClusterTestExtensions; |
||||
import org.apache.kafka.clients.admin.Admin; |
||||
import org.apache.kafka.clients.admin.AdminClientConfig; |
||||
import org.apache.kafka.clients.admin.NewTopic; |
||||
import org.apache.kafka.clients.producer.KafkaProducer; |
||||
import org.apache.kafka.clients.producer.ProducerConfig; |
||||
import org.apache.kafka.clients.producer.ProducerRecord; |
||||
import org.apache.kafka.common.TopicPartition; |
||||
import org.apache.kafka.common.serialization.StringSerializer; |
||||
import org.apache.kafka.server.common.AdminCommandFailedException; |
||||
import org.apache.kafka.server.common.AdminOperationException; |
||||
import org.junit.jupiter.api.Tag; |
||||
import org.junit.jupiter.api.Test; |
||||
import org.junit.jupiter.api.extension.ExtendWith; |
||||
|
||||
import java.io.IOException; |
||||
import java.nio.file.NoSuchFileException; |
||||
import java.util.Arrays; |
||||
import java.util.Collections; |
||||
import java.util.List; |
||||
import java.util.Map; |
||||
import java.util.Properties; |
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals; |
||||
import static org.junit.jupiter.api.Assertions.assertThrows; |
||||
import static org.junit.jupiter.api.Assertions.assertTrue; |
||||
|
||||
@ExtendWith(value = ClusterTestExtensions.class) |
||||
@ClusterTestDefaults(clusterType = Type.ALL) |
||||
@Tag("integration") |
||||
public class DeleteRecordsCommandTest { |
||||
|
||||
private final ClusterInstance cluster; |
||||
public DeleteRecordsCommandTest(ClusterInstance cluster) { |
||||
this.cluster = cluster; |
||||
} |
||||
|
||||
@ClusterTest |
||||
public void testCommand() throws Exception { |
||||
Properties adminProps = new Properties(); |
||||
|
||||
adminProps.put(AdminClientConfig.RETRIES_CONFIG, 1); |
||||
|
||||
try (Admin admin = cluster.createAdminClient(adminProps)) { |
||||
assertThrows( |
||||
AdminCommandFailedException.class, |
||||
() -> DeleteRecordsCommand.execute(admin, "{\"partitions\":[" + |
||||
"{\"topic\":\"t\", \"partition\":0, \"offset\":1}," + |
||||
"{\"topic\":\"t\", \"partition\":0, \"offset\":1}]" + |
||||
"}", System.out), |
||||
"Offset json file contains duplicate topic partitions: t-0" |
||||
); |
||||
|
||||
admin.createTopics(Collections.singleton(new NewTopic("t", 1, (short) 1))).all().get(); |
||||
|
||||
Properties props = new Properties(); |
||||
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); |
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); |
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); |
||||
|
||||
try (KafkaProducer<?, String> producer = new KafkaProducer<>(props)) { |
||||
producer.send(new ProducerRecord<>("t", "1")).get(); |
||||
producer.send(new ProducerRecord<>("t", "2")).get(); |
||||
producer.send(new ProducerRecord<>("t", "3")).get(); |
||||
} |
||||
|
||||
executeAndAssertOutput( |
||||
"{\"partitions\":[{\"topic\":\"t\", \"partition\":0, \"offset\":1}]}", |
||||
"partition: t-0\tlow_watermark: 1", |
||||
admin |
||||
); |
||||
|
||||
executeAndAssertOutput( |
||||
"{\"partitions\":[{\"topic\":\"t\", \"partition\":42, \"offset\":42}]}", |
||||
"partition: t-42\terror", |
||||
admin |
||||
); |
||||
} |
||||
} |
||||
|
||||
private static void executeAndAssertOutput(String json, String expOut, Admin admin) { |
||||
String output = ToolsTestUtils.captureStandardOut(() -> { |
||||
try { |
||||
DeleteRecordsCommand.execute(admin, json, System.out); |
||||
} catch (JsonProcessingException e) { |
||||
throw new RuntimeException(e); |
||||
} |
||||
}); |
||||
assertTrue(output.contains(expOut)); |
||||
} |
||||
} |
||||
|
||||
/** |
||||
* Unit test of {@link DeleteRecordsCommand} tool. |
||||
*/ |
||||
class DeleteRecordsCommandUnitTest { |
||||
@Test |
||||
public void testOffsetFileNotExists() { |
||||
assertThrows(IOException.class, () -> DeleteRecordsCommand.main(new String[]{ |
||||
"--bootstrap-server", "localhost:9092", |
||||
"--offset-json-file", "/not/existing/file" |
||||
})); |
||||
} |
||||
|
||||
@Test |
||||
public void testCommandConfigNotExists() { |
||||
assertThrows(NoSuchFileException.class, () -> DeleteRecordsCommand.main(new String[] { |
||||
"--bootstrap-server", "localhost:9092", |
||||
"--offset-json-file", "/not/existing/file", |
||||
"--command-config", "/another/not/existing/file" |
||||
})); |
||||
} |
||||
|
||||
@Test |
||||
public void testWrongVersion() { |
||||
assertCommandThrows(JsonProcessingException.class, "{\"version\":\"string\"}"); |
||||
assertCommandThrows(AdminOperationException.class, "{\"version\":2}"); |
||||
} |
||||
|
||||
@Test |
||||
public void testWrongPartitions() { |
||||
assertCommandThrows(AdminOperationException.class, "{\"version\":1}"); |
||||
assertCommandThrows(JsonProcessingException.class, "{\"partitions\":2}"); |
||||
assertCommandThrows(JsonProcessingException.class, "{\"partitions\":{}}"); |
||||
assertCommandThrows(JsonProcessingException.class, "{\"partitions\":[{}]}"); |
||||
assertCommandThrows(JsonProcessingException.class, "{\"partitions\":[{\"topic\":\"t\"}]}"); |
||||
assertCommandThrows(JsonProcessingException.class, "{\"partitions\":[{\"topic\":\"t\", \"partition\": \"\"}]}"); |
||||
assertCommandThrows(JsonProcessingException.class, "{\"partitions\":[{\"topic\":\"t\", \"partition\": 0}]}"); |
||||
assertCommandThrows(JsonProcessingException.class, "{\"partitions\":[{\"topic\":\"t\", \"offset\":0}]}"); |
||||
} |
||||
|
||||
@Test |
||||
public void testParse() throws Exception { |
||||
Map<TopicPartition, List<Long>> res = DeleteRecordsCommand.parseOffsetJsonStringWithoutDedup( |
||||
"{\"partitions\":[" + |
||||
"{\"topic\":\"t\", \"partition\":0, \"offset\":0}," + |
||||
"{\"topic\":\"t\", \"partition\":1, \"offset\":1, \"ignored\":\"field\"}," + |
||||
"{\"topic\":\"t\", \"partition\":0, \"offset\":2}," + |
||||
"{\"topic\":\"t\", \"partition\":0, \"offset\":0}" + |
||||
"]}" |
||||
); |
||||
|
||||
assertEquals(2, res.size()); |
||||
assertEquals(Arrays.asList(0L, 2L, 0L), res.get(new TopicPartition("t", 0))); |
||||
assertEquals(Collections.singletonList(1L), res.get(new TopicPartition("t", 1))); |
||||
} |
||||
|
||||
/** |
||||
* Asserts that {@link DeleteRecordsCommand#parseOffsetJsonStringWithoutDedup(String)} throws {@link AdminOperationException}. |
||||
* @param jsonData Data to check. |
||||
*/ |
||||
private static void assertCommandThrows(Class<? extends Exception> expectedException, String jsonData) { |
||||
assertThrows( |
||||
expectedException, |
||||
() -> DeleteRecordsCommand.parseOffsetJsonStringWithoutDedup(jsonData) |
||||
); |
||||
} |
||||
} |
Loading…
Reference in new issue