Browse Source

KAFKA-3522: Add internal RecordConverter interface (#6150)

Reviewers: Bill Bejeck <bill@confluent.io>, Guozhang Wang <guozhang@confluent.io>
pull/1894/merge
Matthias J. Sax 6 years ago committed by GitHub
parent
commit
ed7b67dd11
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 29
      streams/src/main/java/org/apache/kafka/streams/processor/internals/DefaultRecordConverter.java
  2. 19
      streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImpl.java
  3. 24
      streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorStateManager.java
  4. 13
      streams/src/main/java/org/apache/kafka/streams/processor/internals/StateRestorer.java
  5. 35
      streams/src/main/java/org/apache/kafka/streams/state/RecordConverter.java
  6. 10
      streams/src/main/java/org/apache/kafka/streams/state/internals/WrappedStateStore.java
  7. 216
      streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java
  8. 42
      streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java
  9. 6
      streams/src/test/java/org/apache/kafka/streams/processor/internals/StateRestorerTest.java
  10. 100
      streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java

29
streams/src/main/java/org/apache/kafka/streams/processor/internals/DefaultRecordConverter.java

@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.processor.internals;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.streams.state.RecordConverter;
public class DefaultRecordConverter implements RecordConverter {
@Override
public ConsumerRecord<byte[], byte[]> convert(final ConsumerRecord<byte[], byte[]> record) {
return record;
}
}

19
streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImpl.java

@ -32,6 +32,8 @@ import org.apache.kafka.streams.errors.StreamsException;
import org.apache.kafka.streams.processor.StateRestoreCallback; import org.apache.kafka.streams.processor.StateRestoreCallback;
import org.apache.kafka.streams.processor.StateRestoreListener; import org.apache.kafka.streams.processor.StateRestoreListener;
import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StateStore;
import org.apache.kafka.streams.state.RecordConverter;
import org.apache.kafka.streams.state.internals.WrappedStateStore;
import org.slf4j.Logger; import org.slf4j.Logger;
import java.io.File; import java.io.File;
@ -195,7 +197,17 @@ public class GlobalStateManagerImpl extends AbstractStateManager implements Glob
} }
} }
try { try {
restoreState(stateRestoreCallback, topicPartitions, highWatermarks, store.name()); final StateStore stateStore =
store instanceof WrappedStateStore ? ((WrappedStateStore) store).inner() : store;
final RecordConverter recordConverter =
stateStore instanceof RecordConverter ? (RecordConverter) stateStore : new DefaultRecordConverter();
restoreState(
stateRestoreCallback,
topicPartitions,
highWatermarks,
store.name(),
recordConverter);
globalStores.put(store.name(), store); globalStores.put(store.name(), store);
} finally { } finally {
globalConsumer.unsubscribe(); globalConsumer.unsubscribe();
@ -249,7 +261,8 @@ public class GlobalStateManagerImpl extends AbstractStateManager implements Glob
private void restoreState(final StateRestoreCallback stateRestoreCallback, private void restoreState(final StateRestoreCallback stateRestoreCallback,
final List<TopicPartition> topicPartitions, final List<TopicPartition> topicPartitions,
final Map<TopicPartition, Long> highWatermarks, final Map<TopicPartition, Long> highWatermarks,
final String storeName) { final String storeName,
final RecordConverter recordConverter) {
for (final TopicPartition topicPartition : topicPartitions) { for (final TopicPartition topicPartition : topicPartitions) {
globalConsumer.assign(Collections.singletonList(topicPartition)); globalConsumer.assign(Collections.singletonList(topicPartition));
final Long checkpoint = checkpointableOffsets.get(topicPartition); final Long checkpoint = checkpointableOffsets.get(topicPartition);
@ -273,7 +286,7 @@ public class GlobalStateManagerImpl extends AbstractStateManager implements Glob
final List<ConsumerRecord<byte[], byte[]>> restoreRecords = new ArrayList<>(); final List<ConsumerRecord<byte[], byte[]>> restoreRecords = new ArrayList<>();
for (final ConsumerRecord<byte[], byte[]> record : records.records(topicPartition)) { for (final ConsumerRecord<byte[], byte[]> record : records.records(topicPartition)) {
if (record.key() != null) { if (record.key() != null) {
restoreRecords.add(record); restoreRecords.add(recordConverter.convert(record));
} }
} }
offset = globalConsumer.position(topicPartition); offset = globalConsumer.position(topicPartition);

24
streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorStateManager.java

@ -24,6 +24,8 @@ import org.apache.kafka.streams.processor.StateRestoreCallback;
import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StateStore;
import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.TaskId;
import org.apache.kafka.streams.state.internals.OffsetCheckpoint; import org.apache.kafka.streams.state.internals.OffsetCheckpoint;
import org.apache.kafka.streams.state.RecordConverter;
import org.apache.kafka.streams.state.internals.WrappedStateStore;
import org.slf4j.Logger; import org.slf4j.Logger;
import java.io.File; import java.io.File;
@ -48,6 +50,7 @@ public class ProcessorStateManager extends AbstractStateManager {
private final Map<TopicPartition, Long> offsetLimits; private final Map<TopicPartition, Long> offsetLimits;
private final Map<TopicPartition, Long> standbyRestoredOffsets; private final Map<TopicPartition, Long> standbyRestoredOffsets;
private final Map<String, StateRestoreCallback> restoreCallbacks; // used for standby tasks, keyed by state topic name private final Map<String, StateRestoreCallback> restoreCallbacks; // used for standby tasks, keyed by state topic name
private final Map<String, RecordConverter> recordConverters; // used for standby tasks, keyed by state topic name
private final Map<String, String> storeToChangelogTopic; private final Map<String, String> storeToChangelogTopic;
private final List<TopicPartition> changelogPartitions = new ArrayList<>(); private final List<TopicPartition> changelogPartitions = new ArrayList<>();
@ -82,6 +85,7 @@ public class ProcessorStateManager extends AbstractStateManager {
standbyRestoredOffsets = new HashMap<>(); standbyRestoredOffsets = new HashMap<>();
this.isStandby = isStandby; this.isStandby = isStandby;
restoreCallbacks = isStandby ? new HashMap<>() : null; restoreCallbacks = isStandby ? new HashMap<>() : null;
recordConverters = isStandby ? new HashMap<>() : null;
this.storeToChangelogTopic = storeToChangelogTopic; this.storeToChangelogTopic = storeToChangelogTopic;
// load the checkpoint information // load the checkpoint information
@ -129,18 +133,28 @@ public class ProcessorStateManager extends AbstractStateManager {
final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic));
final StateStore stateStore =
store instanceof WrappedStateStore ? ((WrappedStateStore) store).inner() : store;
final RecordConverter recordConverter =
stateStore instanceof RecordConverter ? (RecordConverter) stateStore : new DefaultRecordConverter();
if (isStandby) { if (isStandby) {
log.trace("Preparing standby replica of persistent state store {} with changelog topic {}", storeName, topic); log.trace("Preparing standby replica of persistent state store {} with changelog topic {}", storeName, topic);
restoreCallbacks.put(topic, stateRestoreCallback); restoreCallbacks.put(topic, stateRestoreCallback);
recordConverters.put(topic, recordConverter);
} else { } else {
log.trace("Restoring state store {} from changelog topic {}", storeName, topic); log.trace("Restoring state store {} from changelog topic {}", storeName, topic);
final StateRestorer restorer = new StateRestorer( final StateRestorer restorer = new StateRestorer(
storePartition, storePartition,
new CompositeRestoreListener(stateRestoreCallback), new CompositeRestoreListener(stateRestoreCallback),
checkpointableOffsets.get(storePartition), checkpointableOffsets.get(storePartition),
offsetLimit(storePartition), offsetLimit(storePartition),
store.persistent(), store.persistent(),
storeName); storeName,
recordConverter
);
changelogReader.register(restorer); changelogReader.register(restorer);
} }
@ -181,8 +195,14 @@ public class ProcessorStateManager extends AbstractStateManager {
final RecordBatchingStateRestoreCallback restoreCallback = adapt(restoreCallbacks.get(storePartition.topic())); final RecordBatchingStateRestoreCallback restoreCallback = adapt(restoreCallbacks.get(storePartition.topic()));
if (!restoreRecords.isEmpty()) { if (!restoreRecords.isEmpty()) {
final RecordConverter converter = recordConverters.get(storePartition.topic());
final List<ConsumerRecord<byte[], byte[]>> convertedRecords = new ArrayList<>(restoreRecords.size());
for (final ConsumerRecord<byte[], byte[]> record : restoreRecords) {
convertedRecords.add(converter.convert(record));
}
try { try {
restoreCallback.restoreBatch(restoreRecords); restoreCallback.restoreBatch(convertedRecords);
} catch (final Exception e) { } catch (final Exception e) {
throw new ProcessorStateException(String.format("%sException caught while trying to restore state from %s", logPrefix, storePartition), e); throw new ProcessorStateException(String.format("%sException caught while trying to restore state from %s", logPrefix, storePartition), e);
} }

13
streams/src/main/java/org/apache/kafka/streams/processor/internals/StateRestorer.java

@ -19,7 +19,9 @@ package org.apache.kafka.streams.processor.internals;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.streams.processor.StateRestoreListener; import org.apache.kafka.streams.processor.StateRestoreListener;
import org.apache.kafka.streams.state.RecordConverter;
import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
public class StateRestorer { public class StateRestorer {
@ -31,6 +33,7 @@ public class StateRestorer {
private final String storeName; private final String storeName;
private final TopicPartition partition; private final TopicPartition partition;
private final CompositeRestoreListener compositeRestoreListener; private final CompositeRestoreListener compositeRestoreListener;
private final RecordConverter recordConverter;
private long checkpointOffset; private long checkpointOffset;
private long restoredOffset; private long restoredOffset;
@ -42,13 +45,15 @@ public class StateRestorer {
final Long checkpoint, final Long checkpoint,
final long offsetLimit, final long offsetLimit,
final boolean persistent, final boolean persistent,
final String storeName) { final String storeName,
final RecordConverter recordConverter) {
this.partition = partition; this.partition = partition;
this.compositeRestoreListener = compositeRestoreListener; this.compositeRestoreListener = compositeRestoreListener;
this.checkpointOffset = checkpoint == null ? NO_CHECKPOINT : checkpoint; this.checkpointOffset = checkpoint == null ? NO_CHECKPOINT : checkpoint;
this.offsetLimit = offsetLimit; this.offsetLimit = offsetLimit;
this.persistent = persistent; this.persistent = persistent;
this.storeName = storeName; this.storeName = storeName;
this.recordConverter = recordConverter;
} }
public TopicPartition partition() { public TopicPartition partition() {
@ -80,7 +85,11 @@ public class StateRestorer {
} }
void restore(final Collection<ConsumerRecord<byte[], byte[]>> records) { void restore(final Collection<ConsumerRecord<byte[], byte[]>> records) {
compositeRestoreListener.restoreBatch(records); final Collection<ConsumerRecord<byte[], byte[]>> convertedRecords = new ArrayList<>(records.size());
for (final ConsumerRecord<byte[], byte[]> record : records) {
convertedRecords.add(recordConverter.convert(record));
}
compositeRestoreListener.restoreBatch(convertedRecords);
} }
boolean isPersistent() { boolean isPersistent() {

35
streams/src/main/java/org/apache/kafka/streams/state/RecordConverter.java

@ -0,0 +1,35 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.state;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.streams.KeyValue;
/**
* {@code RecordConverter} translates a {@link ConsumerRecord} into a {@link KeyValue} pair.
*/
public interface RecordConverter {
/**
* Convert a given record into a key-value pair.
*
* @param record the consumer record
* @return the record as key-value pair
*/
ConsumerRecord<byte[], byte[]> convert(final ConsumerRecord<byte[], byte[]> record);
}

10
streams/src/main/java/org/apache/kafka/streams/state/internals/WrappedStateStore.java

@ -16,9 +16,11 @@
*/ */
package org.apache.kafka.streams.state.internals; package org.apache.kafka.streams.state.internals;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.streams.errors.InvalidStateStoreException; import org.apache.kafka.streams.errors.InvalidStateStoreException;
import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.ProcessorContext;
import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StateStore;
import org.apache.kafka.streams.state.RecordConverter;
/** /**
* A storage engine wrapper for utilities like logging, caching, and metering. * A storage engine wrapper for utilities like logging, caching, and metering.
@ -38,7 +40,7 @@ public interface WrappedStateStore extends StateStore {
*/ */
StateStore wrappedStore(); StateStore wrappedStore();
abstract class AbstractStateStore implements WrappedStateStore { abstract class AbstractStateStore implements WrappedStateStore, RecordConverter {
final StateStore innerState; final StateStore innerState;
protected AbstractStateStore(final StateStore inner) { protected AbstractStateStore(final StateStore inner) {
@ -94,5 +96,11 @@ public interface WrappedStateStore extends StateStore {
public StateStore wrappedStore() { public StateStore wrappedStore() {
return innerState; return innerState;
} }
@Override
public ConsumerRecord<byte[], byte[]> convert(final ConsumerRecord<byte[], byte[]> record) {
return ((RecordConverter) innerState).convert(record);
}
} }
} }

216
streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java

@ -31,8 +31,12 @@ import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.errors.LockException; import org.apache.kafka.streams.errors.LockException;
import org.apache.kafka.streams.errors.ProcessorStateException; import org.apache.kafka.streams.errors.ProcessorStateException;
import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.errors.StreamsException;
import org.apache.kafka.streams.processor.ProcessorContext;
import org.apache.kafka.streams.processor.StateRestoreCallback; import org.apache.kafka.streams.processor.StateRestoreCallback;
import org.apache.kafka.streams.processor.StateStore;
import org.apache.kafka.streams.state.internals.OffsetCheckpoint; import org.apache.kafka.streams.state.internals.OffsetCheckpoint;
import org.apache.kafka.streams.state.RecordConverter;
import org.apache.kafka.streams.state.internals.WrappedStateStore;
import org.apache.kafka.test.InternalMockProcessorContext; import org.apache.kafka.test.InternalMockProcessorContext;
import org.apache.kafka.test.MockStateRestoreListener; import org.apache.kafka.test.MockStateRestoreListener;
import org.apache.kafka.test.NoOpReadOnlyStore; import org.apache.kafka.test.NoOpReadOnlyStore;
@ -90,7 +94,7 @@ public class GlobalStateManagerImplTest {
private InternalMockProcessorContext processorContext; private InternalMockProcessorContext processorContext;
@Before @Before
public void before() throws IOException { public void before() {
final Map<String, String> storeToTopic = new HashMap<>(); final Map<String, String> storeToTopic = new HashMap<>();
storeToTopic.put(storeName1, t1.topic()); storeToTopic.put(storeName1, t1.topic());
@ -99,7 +103,7 @@ public class GlobalStateManagerImplTest {
storeToTopic.put(storeName4, t4.topic()); storeToTopic.put(storeName4, t4.topic());
store1 = new NoOpReadOnlyStore<>(storeName1, true); store1 = new NoOpReadOnlyStore<>(storeName1, true);
store2 = new NoOpReadOnlyStore<>(storeName2, true); store2 = new ConverterStore<>(storeName2, true);
store3 = new NoOpReadOnlyStore<>(storeName3); store3 = new NoOpReadOnlyStore<>(storeName3);
store4 = new NoOpReadOnlyStore<>(storeName4); store4 = new NoOpReadOnlyStore<>(storeName4);
@ -198,7 +202,7 @@ public class GlobalStateManagerImplTest {
@Test @Test
public void shouldThrowIllegalArgumentExceptionIfAttemptingToRegisterStoreTwice() { public void shouldThrowIllegalArgumentExceptionIfAttemptingToRegisterStoreTwice() {
stateManager.initialize(); stateManager.initialize();
initializeConsumer(2, 1, t1); initializeConsumer(2, 0, t1);
stateManager.register(store1, stateRestoreCallback); stateManager.register(store1, stateRestoreCallback);
try { try {
stateManager.register(store1, stateRestoreCallback); stateManager.register(store1, stateRestoreCallback);
@ -219,9 +223,138 @@ public class GlobalStateManagerImplTest {
} }
} }
@Test
public void shouldUseDefaultRecordConverterIfStoreDoesNotImplementRecordConverter() {
initializeConsumer(1, 0, t1);
stateManager.initialize();
stateManager.register(store1, stateRestoreCallback);
final KeyValue<byte[], byte[]> restoredRecord = stateRestoreCallback.restored.get(0);
assertEquals(3, restoredRecord.key.length);
assertEquals(5, restoredRecord.value.length);
}
@Test
public void shouldUseDefaultRecordConverterIfInnerStoreDoesNotImplementRecordConverter() {
initializeConsumer(1, 0, t1);
stateManager.initialize();
stateManager.register(new WrappedStateStore() {
@Override
public StateStore inner() {
return store1;
}
@Override
public StateStore wrappedStore() {
return store1;
}
@Override
public String name() {
return store1.name();
}
@Override
public void init(final ProcessorContext context, final StateStore root) {
store1.init(context, root);
}
@Override
public void flush() {
store1.flush();
}
@Override
public void close() {
store1.close();
}
@Override
public boolean persistent() {
return store1.persistent();
}
@Override
public boolean isOpen() {
return store1.isOpen();
}
}, stateRestoreCallback);
final KeyValue<byte[], byte[]> restoredRecord = stateRestoreCallback.restored.get(0);
assertEquals(3, restoredRecord.key.length);
assertEquals(5, restoredRecord.value.length);
}
@Test
public void shouldUseStoreAsRecordConverterIfStoreImplementsRecordConverter() {
initializeConsumer(1, 0, t2);
stateManager.initialize();
stateManager.register(store2, stateRestoreCallback);
final KeyValue<byte[], byte[]> restoredRecord = stateRestoreCallback.restored.get(0);
assertEquals(0, restoredRecord.key.length);
assertEquals(0, restoredRecord.value.length);
}
@Test
public void shouldUseStoreAsRecordConverterIfInnerStoreImplementsRecordConverter() {
initializeConsumer(1, 0, t2);
stateManager.initialize();
stateManager.register(new WrappedStateStore() {
@Override
public StateStore inner() {
return store2;
}
@Override
public StateStore wrappedStore() {
return store2;
}
@Override
public String name() {
return store2.name();
}
@Override
public void init(final ProcessorContext context, final StateStore root) {
store2.init(context, root);
}
@Override
public void flush() {
store2.flush();
}
@Override
public void close() {
store2.close();
}
@Override
public boolean persistent() {
return store2.persistent();
}
@Override
public boolean isOpen() {
return store2.isOpen();
}
}, stateRestoreCallback);
final KeyValue<byte[], byte[]> restoredRecord = stateRestoreCallback.restored.get(0);
assertEquals(0, restoredRecord.key.length);
assertEquals(0, restoredRecord.value.length);
}
@Test @Test
public void shouldRestoreRecordsUpToHighwatermark() { public void shouldRestoreRecordsUpToHighwatermark() {
initializeConsumer(2, 1, t1); initializeConsumer(2, 0, t1);
stateManager.initialize(); stateManager.initialize();
@ -231,7 +364,7 @@ public class GlobalStateManagerImplTest {
@Test @Test
public void shouldRecoverFromInvalidOffsetExceptionAndRestoreRecords() { public void shouldRecoverFromInvalidOffsetExceptionAndRestoreRecords() {
initializeConsumer(2, 1, t1); initializeConsumer(2, 0, t1);
consumer.setException(new InvalidOffsetException("Try Again!") { consumer.setException(new InvalidOffsetException("Try Again!") {
public Set<TopicPartition> partitions() { public Set<TopicPartition> partitions() {
return Collections.singleton(t1); return Collections.singleton(t1);
@ -252,7 +385,7 @@ public class GlobalStateManagerImplTest {
stateManager.register(store1, stateRestoreCallback); stateManager.register(store1, stateRestoreCallback);
assertThat(stateRestoreListener.restoreStartOffset, equalTo(1L)); assertThat(stateRestoreListener.restoreStartOffset, equalTo(1L));
assertThat(stateRestoreListener.restoreEndOffset, equalTo(5L)); assertThat(stateRestoreListener.restoreEndOffset, equalTo(6L));
assertThat(stateRestoreListener.totalNumRestored, equalTo(5L)); assertThat(stateRestoreListener.totalNumRestored, equalTo(5L));
@ -263,11 +396,11 @@ public class GlobalStateManagerImplTest {
@Test @Test
public void shouldRestoreRecordsFromCheckpointToHighwatermark() throws IOException { public void shouldRestoreRecordsFromCheckpointToHighwatermark() throws IOException {
initializeConsumer(5, 6, t1); initializeConsumer(5, 5, t1);
final OffsetCheckpoint offsetCheckpoint = new OffsetCheckpoint(new File(stateManager.baseDir(), final OffsetCheckpoint offsetCheckpoint = new OffsetCheckpoint(new File(stateManager.baseDir(),
ProcessorStateManager.CHECKPOINT_FILE_NAME)); ProcessorStateManager.CHECKPOINT_FILE_NAME));
offsetCheckpoint.write(Collections.singletonMap(t1, 6L)); offsetCheckpoint.write(Collections.singletonMap(t1, 5L));
stateManager.initialize(); stateManager.initialize();
stateManager.register(store1, stateRestoreCallback); stateManager.register(store1, stateRestoreCallback);
@ -279,9 +412,9 @@ public class GlobalStateManagerImplTest {
public void shouldFlushStateStores() { public void shouldFlushStateStores() {
stateManager.initialize(); stateManager.initialize();
// register the stores // register the stores
initializeConsumer(1, 1, t1); initializeConsumer(1, 0, t1);
stateManager.register(store1, stateRestoreCallback); stateManager.register(store1, stateRestoreCallback);
initializeConsumer(1, 1, t2); initializeConsumer(1, 0, t2);
stateManager.register(store2, stateRestoreCallback); stateManager.register(store2, stateRestoreCallback);
stateManager.flush(); stateManager.flush();
@ -293,7 +426,7 @@ public class GlobalStateManagerImplTest {
public void shouldThrowProcessorStateStoreExceptionIfStoreFlushFailed() { public void shouldThrowProcessorStateStoreExceptionIfStoreFlushFailed() {
stateManager.initialize(); stateManager.initialize();
// register the stores // register the stores
initializeConsumer(1, 1, t1); initializeConsumer(1, 0, t1);
stateManager.register(new NoOpReadOnlyStore(store1.name()) { stateManager.register(new NoOpReadOnlyStore(store1.name()) {
@Override @Override
public void flush() { public void flush() {
@ -308,12 +441,12 @@ public class GlobalStateManagerImplTest {
public void shouldCloseStateStores() throws IOException { public void shouldCloseStateStores() throws IOException {
stateManager.initialize(); stateManager.initialize();
// register the stores // register the stores
initializeConsumer(1, 1, t1); initializeConsumer(1, 0, t1);
stateManager.register(store1, stateRestoreCallback); stateManager.register(store1, stateRestoreCallback);
initializeConsumer(1, 1, t2); initializeConsumer(1, 0, t2);
stateManager.register(store2, stateRestoreCallback); stateManager.register(store2, stateRestoreCallback);
stateManager.close(Collections.<TopicPartition, Long>emptyMap()); stateManager.close(Collections.emptyMap());
assertFalse(store1.isOpen()); assertFalse(store1.isOpen());
assertFalse(store2.isOpen()); assertFalse(store2.isOpen());
} }
@ -321,7 +454,7 @@ public class GlobalStateManagerImplTest {
@Test @Test
public void shouldWriteCheckpointsOnClose() throws IOException { public void shouldWriteCheckpointsOnClose() throws IOException {
stateManager.initialize(); stateManager.initialize();
initializeConsumer(1, 1, t1); initializeConsumer(1, 0, t1);
stateManager.register(store1, stateRestoreCallback); stateManager.register(store1, stateRestoreCallback);
final Map<TopicPartition, Long> expected = Collections.singletonMap(t1, 25L); final Map<TopicPartition, Long> expected = Collections.singletonMap(t1, 25L);
stateManager.close(expected); stateManager.close(expected);
@ -332,7 +465,7 @@ public class GlobalStateManagerImplTest {
@Test(expected = ProcessorStateException.class) @Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateStoreExceptionIfStoreCloseFailed() throws IOException { public void shouldThrowProcessorStateStoreExceptionIfStoreCloseFailed() throws IOException {
stateManager.initialize(); stateManager.initialize();
initializeConsumer(1, 1, t1); initializeConsumer(1, 0, t1);
stateManager.register(new NoOpReadOnlyStore(store1.name()) { stateManager.register(new NoOpReadOnlyStore(store1.name()) {
@Override @Override
public void close() { public void close() {
@ -340,7 +473,7 @@ public class GlobalStateManagerImplTest {
} }
}, stateRestoreCallback); }, stateRestoreCallback);
stateManager.close(Collections.<TopicPartition, Long>emptyMap()); stateManager.close(Collections.emptyMap());
} }
@Test @Test
@ -370,7 +503,7 @@ public class GlobalStateManagerImplTest {
@Test @Test
public void shouldNotCloseStoresIfCloseAlreadyCalled() throws IOException { public void shouldNotCloseStoresIfCloseAlreadyCalled() throws IOException {
stateManager.initialize(); stateManager.initialize();
initializeConsumer(1, 1, t1); initializeConsumer(1, 0, t1);
stateManager.register(new NoOpReadOnlyStore("t1-store") { stateManager.register(new NoOpReadOnlyStore("t1-store") {
@Override @Override
public void close() { public void close() {
@ -380,17 +513,15 @@ public class GlobalStateManagerImplTest {
super.close(); super.close();
} }
}, stateRestoreCallback); }, stateRestoreCallback);
stateManager.close(Collections.<TopicPartition, Long>emptyMap()); stateManager.close(Collections.emptyMap());
stateManager.close(Collections.<TopicPartition, Long>emptyMap()); stateManager.close(Collections.emptyMap());
} }
@Test @Test
public void shouldAttemptToCloseAllStoresEvenWhenSomeException() throws IOException { public void shouldAttemptToCloseAllStoresEvenWhenSomeException() throws IOException {
stateManager.initialize(); stateManager.initialize();
initializeConsumer(1, 1, t1); initializeConsumer(1, 0, t1);
initializeConsumer(1, 1, t2);
final NoOpReadOnlyStore store = new NoOpReadOnlyStore("t1-store") { final NoOpReadOnlyStore store = new NoOpReadOnlyStore("t1-store") {
@Override @Override
public void close() { public void close() {
@ -400,10 +531,11 @@ public class GlobalStateManagerImplTest {
}; };
stateManager.register(store, stateRestoreCallback); stateManager.register(store, stateRestoreCallback);
initializeConsumer(1, 0, t2);
stateManager.register(store2, stateRestoreCallback); stateManager.register(store2, stateRestoreCallback);
try { try {
stateManager.close(Collections.<TopicPartition, Long>emptyMap()); stateManager.close(Collections.emptyMap());
} catch (final ProcessorStateException e) { } catch (final ProcessorStateException e) {
// expected // expected
} }
@ -443,9 +575,9 @@ public class GlobalStateManagerImplTest {
@Test @Test
public void shouldNotRemoveOffsetsOfUnUpdatedTablesDuringCheckpoint() { public void shouldNotRemoveOffsetsOfUnUpdatedTablesDuringCheckpoint() {
stateManager.initialize(); stateManager.initialize();
initializeConsumer(10, 1, t1); initializeConsumer(10, 0, t1);
stateManager.register(store1, stateRestoreCallback); stateManager.register(store1, stateRestoreCallback);
initializeConsumer(20, 1, t2); initializeConsumer(20, 0, t2);
stateManager.register(store2, stateRestoreCallback); stateManager.register(store2, stateRestoreCallback);
final Map<TopicPartition, Long> initialCheckpoint = stateManager.checkpointed(); final Map<TopicPartition, Long> initialCheckpoint = stateManager.checkpointed();
@ -461,12 +593,12 @@ public class GlobalStateManagerImplTest {
final HashMap<TopicPartition, Long> startOffsets = new HashMap<>(); final HashMap<TopicPartition, Long> startOffsets = new HashMap<>();
startOffsets.put(t1, 1L); startOffsets.put(t1, 1L);
final HashMap<TopicPartition, Long> endOffsets = new HashMap<>(); final HashMap<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(t1, 2L); endOffsets.put(t1, 3L);
consumer.updatePartitions(t1.topic(), Collections.singletonList(new PartitionInfo(t1.topic(), t1.partition(), null, null, null))); consumer.updatePartitions(t1.topic(), Collections.singletonList(new PartitionInfo(t1.topic(), t1.partition(), null, null, null)));
consumer.assign(Collections.singletonList(t1)); consumer.assign(Collections.singletonList(t1));
consumer.updateEndOffsets(endOffsets); consumer.updateEndOffsets(endOffsets);
consumer.updateBeginningOffsets(startOffsets); consumer.updateBeginningOffsets(startOffsets);
consumer.addRecord(new ConsumerRecord<>(t1.topic(), t1.partition(), 1, (byte[]) null, "null".getBytes())); consumer.addRecord(new ConsumerRecord<>(t1.topic(), t1.partition(), 1, null, "null".getBytes()));
final byte[] expectedKey = "key".getBytes(); final byte[] expectedKey = "key".getBytes();
final byte[] expectedValue = "value".getBytes(); final byte[] expectedValue = "value".getBytes();
consumer.addRecord(new ConsumerRecord<>(t1.topic(), t1.partition(), 2, expectedKey, expectedValue)); consumer.addRecord(new ConsumerRecord<>(t1.topic(), t1.partition(), 2, expectedKey, expectedValue));
@ -480,19 +612,19 @@ public class GlobalStateManagerImplTest {
@Test @Test
public void shouldCheckpointRestoredOffsetsToFile() throws IOException { public void shouldCheckpointRestoredOffsetsToFile() throws IOException {
stateManager.initialize(); stateManager.initialize();
initializeConsumer(10, 1, t1); initializeConsumer(10, 0, t1);
stateManager.register(store1, stateRestoreCallback); stateManager.register(store1, stateRestoreCallback);
stateManager.close(Collections.<TopicPartition, Long>emptyMap()); stateManager.close(Collections.emptyMap());
final Map<TopicPartition, Long> checkpointMap = stateManager.checkpointed(); final Map<TopicPartition, Long> checkpointMap = stateManager.checkpointed();
assertThat(checkpointMap, equalTo(Collections.singletonMap(t1, 11L))); assertThat(checkpointMap, equalTo(Collections.singletonMap(t1, 10L)));
assertThat(readOffsetsCheckpoint(), equalTo(checkpointMap)); assertThat(readOffsetsCheckpoint(), equalTo(checkpointMap));
} }
@Test @Test
public void shouldSkipGlobalInMemoryStoreOffsetsToFile() throws IOException { public void shouldSkipGlobalInMemoryStoreOffsetsToFile() throws IOException {
stateManager.initialize(); stateManager.initialize();
initializeConsumer(10, 1, t3); initializeConsumer(10, 0, t3);
stateManager.register(store3, stateRestoreCallback); stateManager.register(store3, stateRestoreCallback);
stateManager.close(Collections.emptyMap()); stateManager.close(Collections.emptyMap());
@ -666,9 +798,9 @@ public class GlobalStateManagerImplTest {
private void initializeConsumer(final long numRecords, final long startOffset, final TopicPartition topicPartition) { private void initializeConsumer(final long numRecords, final long startOffset, final TopicPartition topicPartition) {
final HashMap<TopicPartition, Long> startOffsets = new HashMap<>(); final HashMap<TopicPartition, Long> startOffsets = new HashMap<>();
startOffsets.put(topicPartition, 1L); startOffsets.put(topicPartition, startOffset);
final HashMap<TopicPartition, Long> endOffsets = new HashMap<>(); final HashMap<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(topicPartition, startOffset + numRecords - 1); endOffsets.put(topicPartition, startOffset + numRecords);
consumer.updatePartitions(topicPartition.topic(), Collections.singletonList(new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, null, null))); consumer.updatePartitions(topicPartition.topic(), Collections.singletonList(new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, null, null)));
consumer.assign(Collections.singletonList(topicPartition)); consumer.assign(Collections.singletonList(topicPartition));
consumer.updateEndOffsets(endOffsets); consumer.updateEndOffsets(endOffsets);
@ -694,4 +826,20 @@ public class GlobalStateManagerImplTest {
restored.add(KeyValue.pair(key, value)); restored.add(KeyValue.pair(key, value));
} }
} }
private class ConverterStore<K, V> extends NoOpReadOnlyStore<K, V> implements RecordConverter {
ConverterStore(final String name,
final boolean rocksdbStore) {
super(name, rocksdbStore);
}
@Override
public ConsumerRecord<byte[], byte[]> convert(final ConsumerRecord<byte[], byte[]> record) {
return new ConsumerRecord<>("", 0, 0L, "".getBytes(), "".getBytes());
}
}
} }

42
streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java

@ -30,6 +30,7 @@ import org.apache.kafka.streams.processor.StateStore;
import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.TaskId;
import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender;
import org.apache.kafka.streams.state.internals.OffsetCheckpoint; import org.apache.kafka.streams.state.internals.OffsetCheckpoint;
import org.apache.kafka.streams.state.RecordConverter;
import org.apache.kafka.test.MockBatchingStateRestoreListener; import org.apache.kafka.test.MockBatchingStateRestoreListener;
import org.apache.kafka.test.MockKeyValueStore; import org.apache.kafka.test.MockKeyValueStore;
import org.apache.kafka.test.NoOpProcessorContext; import org.apache.kafka.test.NoOpProcessorContext;
@ -157,6 +158,28 @@ public class ProcessorStateManagerTest {
} }
} }
@Test
public void shouldConvertDataOnRestoreIfStoreImplementsRecordConverter() throws Exception {
final TaskId taskId = new TaskId(0, 2);
final Integer intKey = 2;
final MockKeyValueStore persistentStore = getConverterStore();
final ProcessorStateManager stateMgr = getStandByStateManager(taskId);
try {
stateMgr.register(persistentStore, persistentStore.stateRestoreCallback);
stateMgr.updateStandbyStates(
persistentStorePartition,
singletonList(consumerRecord),
consumerRecord.offset()
);
assertThat(persistentStore.keys.size(), is(1));
assertTrue(persistentStore.keys.contains(intKey));
} finally {
stateMgr.close(Collections.emptyMap());
}
}
@Test @Test
public void testRegisterPersistentStore() throws IOException { public void testRegisterPersistentStore() throws IOException {
final TaskId taskId = new TaskId(0, 2); final TaskId taskId = new TaskId(0, 2);
@ -770,4 +793,23 @@ public class ProcessorStateManagerTest {
return new MockKeyValueStore("persistentStore", true); return new MockKeyValueStore("persistentStore", true);
} }
private MockKeyValueStore getConverterStore() {
return new ConverterStore("persistentStore", true);
}
private class ConverterStore extends MockKeyValueStore implements RecordConverter {
ConverterStore(final String name,
final boolean persistent) {
super(name, persistent);
}
@Override
public ConsumerRecord<byte[], byte[]> convert(final ConsumerRecord<byte[], byte[]> record) {
return new ConsumerRecord<>("", 0, 0L, new byte[]{0x0, 0x0, 0x0, 0x2}, "".getBytes());
}
}
} }

6
streams/src/test/java/org/apache/kafka/streams/processor/internals/StateRestorerTest.java

@ -41,7 +41,8 @@ public class StateRestorerTest {
null, null,
OFFSET_LIMIT, OFFSET_LIMIT,
true, true,
"storeName"); "storeName",
new DefaultRecordConverter());
@Before @Before
public void setUp() { public void setUp() {
@ -77,7 +78,8 @@ public class StateRestorerTest {
null, null,
0, 0,
true, true,
"storeName"); "storeName",
new DefaultRecordConverter());
assertTrue(restorer.hasCompleted(0, 10)); assertTrue(restorer.hasCompleted(0, 10));
} }

100
streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java

@ -106,7 +106,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
changelogReader.restore(active); changelogReader.restore(active);
assertTrue(functionCalled.get()); assertTrue(functionCalled.get());
} }
@ -143,7 +144,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
replay(active, task); replay(active, task);
changelogReader.restore(active); changelogReader.restore(active);
@ -167,7 +169,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
EasyMock.expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); EasyMock.expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
EasyMock.replay(active, task); EasyMock.replay(active, task);
@ -181,7 +184,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
// retry restore should succeed // retry restore should succeed
assertEquals(1, changelogReader.restore(active).size()); assertEquals(1, changelogReader.restore(active).size());
assertThat(callback.restored.size(), equalTo(messages)); assertThat(callback.restored.size(), equalTo(messages));
@ -200,12 +204,13 @@ public class StoreChangelogReaderTest {
consumer.assign(Collections.emptyList()); consumer.assign(Collections.emptyList());
final StateRestorer stateRestorer = new StateRestorer( final StateRestorer stateRestorer = new StateRestorer(
topicPartition, topicPartition,
restoreListener, restoreListener,
expiredCheckpoint, expiredCheckpoint,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName"); "storeName",
new DefaultRecordConverter());
changelogReader.register(stateRestorer); changelogReader.register(stateRestorer);
EasyMock.expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); EasyMock.expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
@ -234,7 +239,8 @@ public class StoreChangelogReaderTest {
5L, 5L,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
changelogReader.restore(active); changelogReader.restore(active);
assertThat(callback.restored.size(), equalTo(5)); assertThat(callback.restored.size(), equalTo(5));
@ -250,7 +256,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
replay(active, task); replay(active, task);
changelogReader.restore(active); changelogReader.restore(active);
@ -266,7 +273,8 @@ public class StoreChangelogReaderTest {
null, null,
3, 3,
true, true,
"storeName"); "storeName",
new DefaultRecordConverter());
changelogReader.register(restorer); changelogReader.register(restorer);
expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
replay(active, task); replay(active, task);
@ -293,21 +301,24 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName1")); "storeName1",
new DefaultRecordConverter()));
changelogReader.register(new StateRestorer( changelogReader.register(new StateRestorer(
one, one,
restoreListener1, restoreListener1,
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName2")); "storeName2",
new DefaultRecordConverter()));
changelogReader.register(new StateRestorer( changelogReader.register(new StateRestorer(
two, two,
restoreListener2, restoreListener2,
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName3")); "storeName3",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(one)).andStubReturn(task); expect(active.restoringTaskFor(one)).andStubReturn(task);
expect(active.restoringTaskFor(two)).andStubReturn(task); expect(active.restoringTaskFor(two)).andStubReturn(task);
@ -338,21 +349,24 @@ public class StoreChangelogReaderTest {
0L, 0L,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName1")); "storeName1",
new DefaultRecordConverter()));
changelogReader.register(new StateRestorer( changelogReader.register(new StateRestorer(
one, one,
restoreListener1, restoreListener1,
0L, 0L,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName2")); "storeName2",
new DefaultRecordConverter()));
changelogReader.register(new StateRestorer( changelogReader.register(new StateRestorer(
two, two,
restoreListener2, restoreListener2,
0L, 0L,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName3")); "storeName3",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(one)).andReturn(task); expect(active.restoringTaskFor(one)).andReturn(task);
expect(active.restoringTaskFor(two)).andReturn(task); expect(active.restoringTaskFor(two)).andReturn(task);
@ -386,7 +400,8 @@ public class StoreChangelogReaderTest {
0L, 0L,
5, 5,
true, true,
"storeName1")); "storeName1",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
replay(active, task); replay(active, task);
changelogReader.restore(active); changelogReader.restore(active);
@ -421,7 +436,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName"); "storeName",
new DefaultRecordConverter());
setupConsumer(0, topicPartition); setupConsumer(0, topicPartition);
changelogReader.register(restorer); changelogReader.register(restorer);
@ -440,7 +456,8 @@ public class StoreChangelogReaderTest {
endOffset, endOffset,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName"); "storeName",
new DefaultRecordConverter());
changelogReader.register(restorer); changelogReader.register(restorer);
@ -458,7 +475,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
replay(active, task); replay(active, task);
changelogReader.restore(active); changelogReader.restore(active);
@ -476,7 +494,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
false, false,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
replay(active, task); replay(active, task);
changelogReader.restore(active); changelogReader.restore(active);
@ -498,7 +517,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
false, false,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
replay(active, task); replay(active, task);
changelogReader.restore(active); changelogReader.restore(active);
@ -516,7 +536,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"store")); "store",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
replay(active, task); replay(active, task);
@ -537,7 +558,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
false, false,
"storeName")); "storeName",
new DefaultRecordConverter()));
final TopicPartition postInitialization = new TopicPartition("other", 0); final TopicPartition postInitialization = new TopicPartition("other", 0);
expect(active.restoringTaskFor(topicPartition)).andStubReturn(task); expect(active.restoringTaskFor(topicPartition)).andStubReturn(task);
@ -558,7 +580,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
false, false,
"otherStore")); "otherStore",
new DefaultRecordConverter()));
final Collection<TopicPartition> expected = Utils.mkSet(topicPartition, postInitialization); final Collection<TopicPartition> expected = Utils.mkSet(topicPartition, postInitialization);
consumer.assign(expected); consumer.assign(expected);
@ -581,7 +604,8 @@ public class StoreChangelogReaderTest {
null, null,
9L, 9L,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andReturn(task); expect(active.restoringTaskFor(topicPartition)).andReturn(task);
replay(active); replay(active);
@ -602,7 +626,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andReturn(task); expect(active.restoringTaskFor(topicPartition)).andReturn(task);
replay(active); replay(active);
@ -622,7 +647,8 @@ public class StoreChangelogReaderTest {
null, null,
Long.MAX_VALUE, Long.MAX_VALUE,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andReturn(task); expect(active.restoringTaskFor(topicPartition)).andReturn(task);
replay(active); replay(active);
@ -641,7 +667,8 @@ public class StoreChangelogReaderTest {
null, null,
5, 5,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andReturn(task); expect(active.restoringTaskFor(topicPartition)).andReturn(task);
replay(active); replay(active);
@ -661,7 +688,8 @@ public class StoreChangelogReaderTest {
null, null,
10, 10,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andReturn(task); expect(active.restoringTaskFor(topicPartition)).andReturn(task);
replay(active); replay(active);
@ -688,7 +716,8 @@ public class StoreChangelogReaderTest {
null, null,
6, 6,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andReturn(task); expect(active.restoringTaskFor(topicPartition)).andReturn(task);
replay(active); replay(active);
@ -710,7 +739,8 @@ public class StoreChangelogReaderTest {
null, null,
11, 11,
true, true,
"storeName")); "storeName",
new DefaultRecordConverter()));
expect(active.restoringTaskFor(topicPartition)).andReturn(task); expect(active.restoringTaskFor(topicPartition)).andReturn(task);
replay(active); replay(active);

Loading…
Cancel
Save