index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriterStageTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import io.mantisrx.connector.iceberg.sink.StageOverrideParameters;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.connector.iceberg.sink.writer.factory.IcebergWriterFactory;
import io.mantisrx.connector.iceberg.sink.writer.metrics.WriterMetrics;
import io.mantisrx.connector.iceberg.sink.writer.partitioner.Partitioner;
import io.mantisrx.connector.iceberg.sink.writer.partitioner.PartitionerFactory;
import io.mantisrx.connector.iceberg.sink.writer.pool.FixedIcebergWriterPool;
import io.mantisrx.connector.iceberg.sink.writer.pool.IcebergWriterPool;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.TestWorkerInfo;
import io.mantisrx.runtime.lifecycle.ServiceLocator;
import io.mantisrx.runtime.parameter.Parameters;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.types.Types;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import rx.Observable;
import rx.observers.TestSubscriber;
import rx.schedulers.TestScheduler;
class IcebergWriterStageTest {
private static final Schema SCHEMA =
new Schema(Types.NestedField.required(1, "id", Types.IntegerType.get()));
private TestScheduler scheduler;
private TestSubscriber<MantisDataFile> subscriber;
private IcebergWriterStage.Transformer transformer;
private Catalog catalog;
private Context context;
private IcebergWriterPool writerPool;
private Partitioner partitioner;
private Observable<MantisDataFile> flow;
private MantisRecord record;
@BeforeEach
void setUp() {
Record icebergRecord = GenericRecord.create(SCHEMA);
icebergRecord.setField("id", 1);
record = new MantisRecord(icebergRecord, null);
this.scheduler = new TestScheduler();
this.subscriber = new TestSubscriber<>();
// Writer
Parameters parameters = StageOverrideParameters.newParameters();
WriterConfig config = new WriterConfig(parameters, mock(Configuration.class));
WriterMetrics metrics = new WriterMetrics();
IcebergWriterFactory factory = FakeIcebergWriter::new;
this.writerPool = spy(new FixedIcebergWriterPool(
factory,
config.getWriterFlushFrequencyBytes(),
config.getWriterMaximumPoolSize()));
doReturn(Collections.singleton(record.getRecord())).when(writerPool).getFlushableWriters();
this.partitioner = mock(Partitioner.class);
when(partitioner.partition(icebergRecord)).thenReturn(icebergRecord);
this.transformer = new IcebergWriterStage.Transformer(
config,
metrics,
this.writerPool,
this.partitioner,
this.scheduler,
this.scheduler);
// Catalog
ServiceLocator serviceLocator = mock(ServiceLocator.class);
when(serviceLocator.service(Configuration.class)).thenReturn(mock(Configuration.class));
this.catalog = mock(Catalog.class);
Table table = mock(Table.class);
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).identity("id").build();
when(table.spec()).thenReturn(spec);
when(this.catalog.loadTable(any())).thenReturn(table);
when(serviceLocator.service(Catalog.class)).thenReturn(this.catalog);
when(serviceLocator.service(PartitionerFactory.class)).thenReturn(mock(PartitionerFactory.class));
// Mantis Context
this.context = mock(Context.class);
when(this.context.getParameters()).thenReturn(parameters);
when(this.context.getServiceLocator()).thenReturn(serviceLocator);
when(this.context.getWorkerInfo()).thenReturn(
new TestWorkerInfo("testJobName", "jobId", 1, 1, 1, MantisJobDurationType.Perpetual,
"host"));
// Flow
Observable<MantisRecord> source = Observable.interval(1, TimeUnit.MILLISECONDS, this.scheduler)
.map(i -> record);
this.flow = source.compose(this.transformer);
}
@Test
void shouldAddWriterOnNewPartition() throws IOException {
Record recordWithNewPartition = GenericRecord.create(SCHEMA);
recordWithNewPartition.setField("id", 2);
// Identity partitioning.
when(partitioner.partition(recordWithNewPartition)).thenReturn(recordWithNewPartition);
Observable<MantisRecord> source = Observable.just(record, record, new MantisRecord(recordWithNewPartition, null), record)
.concatMap(r -> Observable.just(r).delay(1, TimeUnit.MILLISECONDS, scheduler));
flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
// Same partition; no other thresholds (size, time) met.
scheduler.advanceTimeBy(2, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// New partition detected; no thresholds met yet.
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// Existing partition detected; no thresholds met yet.
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
verify(writerPool, times(1)).open(record.getRecord());
verify(writerPool, times(1)).open(recordWithNewPartition);
verify(writerPool, times(3)).write(eq(record.getRecord()), any());
verify(writerPool, times(1)).write(eq(recordWithNewPartition), any());
verify(writerPool, times(0)).close(any());
}
@Test
void shouldCloseOnSizeThreshold() throws IOException {
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(100, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(1);
verify(writerPool, times(100)).write(any(), any());
verify(writerPool, times(1)).close(record.getRecord());
}
@Test
void shouldNotCloseWhenUnderSizeThreshold() throws IOException {
doReturn(new HashSet<>()).when(writerPool).getFlushableWriters();
flow.subscribeOn(scheduler).subscribe(subscriber);
// Size is checked at row-group-size config, but under size-threshold, so no-op.
scheduler.advanceTimeBy(100, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
subscriber.assertNoTerminalEvent();
verify(writerPool, times(100)).write(eq(record.getRecord()), any());
verify(writerPool, times(0)).close(any());
}
@Test
void shouldCloseOnlyFlushableWritersOnSizeThreshold() throws IOException {
Record recordWithNewPartition = GenericRecord.create(SCHEMA);
when(partitioner.partition(recordWithNewPartition)).thenReturn(recordWithNewPartition);
Observable<MantisRecord> source = Observable.just(record, new MantisRecord(recordWithNewPartition, null))
.concatMap(r -> Observable.just(r).delay(1, TimeUnit.MILLISECONDS, scheduler))
.repeat();
flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(100, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(1);
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(1);
subscriber.assertNoTerminalEvent();
verify(writerPool, times(101)).write(any(), any());
verify(writerPool, times(1)).close(record.getRecord());
verify(writerPool, times(0)).close(recordWithNewPartition);
}
@Test
void shouldCloseAllWritersOnTimeThresholdWhenLowVolume() throws IOException {
Record recordWithNewPartition = GenericRecord.create(SCHEMA);
when(partitioner.partition(recordWithNewPartition)).thenReturn(recordWithNewPartition);
doReturn(new HashSet<>()).when(writerPool).getFlushableWriters();
// Low volume stream.
Observable<MantisRecord> source = Observable.just(record, new MantisRecord(recordWithNewPartition, null))
.concatMap(r -> Observable.just(r).delay(50, TimeUnit.MILLISECONDS, scheduler))
.repeat();
flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
// Over the size threshold, but not yet checked at row-group-size config.
scheduler.advanceTimeBy(50, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// Hits time threshold and there's data to write; proceed to close.
scheduler.advanceTimeBy(450, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(2);
subscriber.assertNoTerminalEvent();
verify(writerPool, times(10)).write(any(), any());
verify(writerPool, times(2)).close(any());
}
@Test
void shouldCloseAllWritersOnTimeThresholdWhenHighVolume() throws IOException {
Record recordWithNewPartition = GenericRecord.create(SCHEMA);
when(partitioner.partition(recordWithNewPartition)).thenReturn(recordWithNewPartition);
doReturn(new HashSet<>()).when(writerPool).getFlushableWriters();
Observable<MantisRecord> source = Observable.just(record, new MantisRecord(recordWithNewPartition, null))
.concatMap(r -> Observable.just(r).delay(1, TimeUnit.MILLISECONDS, scheduler))
.repeat();
flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// Size is checked at row-group-size config, but under size threshold, so no-op.
scheduler.advanceTimeBy(99, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// Hits time threshold; proceed to close.
scheduler.advanceTimeBy(400, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(2);
subscriber.assertNoTerminalEvent();
verify(writerPool, times(500)).write(any(), any());
verify(writerPool, times(2)).close(any());
}
@Test
void shouldNoOpOnTimeThresholdWhenNoData() throws IOException {
doReturn(new HashSet<>()).when(writerPool).getFlushableWriters();
// Low volume stream.
Observable<MantisRecord> source = Observable.interval(900, TimeUnit.MILLISECONDS, scheduler)
.map(i -> record);
flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
// No event yet.
scheduler.advanceTimeBy(500, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// 1 event, timer threshold met, size threshold not met: flush.
scheduler.advanceTimeBy(500, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(1);
// No event yet again, writer exists but is closed from previous flush, timer threshold met: noop.
scheduler.advanceTimeBy(500, TimeUnit.MILLISECONDS);
// Count should not increase.
subscriber.assertValueCount(1);
subscriber.assertNoErrors();
subscriber.assertNoTerminalEvent();
verify(writerPool, times(1)).open(any());
verify(writerPool, times(1)).write(any(), any());
// 2nd close is a noop.
verify(writerPool, times(1)).close(any());
}
@Test
void shouldNoOpWhenFailedToOpen() throws IOException {
doThrow(new IOException()).when(writerPool).open(any());
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertError(RuntimeException.class);
subscriber.assertTerminalEvent();
verify(writerPool).open(any());
subscriber.assertNoValues();
}
@Test
void shouldContinueOnWriteFailure() {
doThrow(new RuntimeException()).when(writerPool).write(any(), any());
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertNoTerminalEvent();
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertNoTerminalEvent();
verify(writerPool, times(2)).write(any(), any());
}
@Test
@Disabled("Will never terminate: Source terminates, but timer will continue to tick")
void shouldCloseOnTerminate() throws IOException {
Observable<MantisRecord> source = Observable.just(record);
Observable<MantisDataFile> flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.triggerActions();
subscriber.assertNoErrors();
verify(writerPool).open(record.getRecord());
verify(writerPool).write(any(), any());
verify(writerPool, times(2)).isClosed(record.getRecord());
verify(writerPool, times(1)).close(record.getRecord());
}
@Test
void shouldInitializeWithExistingTable() {
IcebergWriterStage stage = new IcebergWriterStage();
assertDoesNotThrow(() -> stage.init(context));
}
@Test
void shouldFailToInitializeWithMissingTable() {
when(catalog.loadTable(any())).thenThrow(new RuntimeException());
IcebergWriterStage stage = new IcebergWriterStage();
assertThrows(RuntimeException.class, () -> stage.init(context));
}
private static class FakeIcebergWriter implements IcebergWriter {
private static final DataFile DATA_FILE = new DataFiles.Builder(PartitionSpec.unpartitioned())
.withPath("/datafile.parquet")
.withFileSizeInBytes(1L)
.withRecordCount(1L)
.build();
private final Object object;
private Object fileAppender;
private StructLike partitionKey;
public FakeIcebergWriter() {
this.object = new Object();
this.fileAppender = null;
}
@Override
public void open() throws IOException {
open(null);
}
@Override
public void open(StructLike newPartitionKey) throws IOException {
fileAppender = object;
partitionKey = newPartitionKey;
}
@Override
public void write(MantisRecord record) {
}
@Override
public MantisDataFile close() throws IOException {
if (fileAppender != null) {
fileAppender = null;
return new MantisDataFile(DATA_FILE, null);
}
return null;
}
@Override
public boolean isClosed() {
return fileAppender == null;
}
@Override
public long length() {
return 0;
}
@Override
public StructLike getPartitionKey() {
return partitionKey;
}
}
}
| 8,500 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/writer/FixedIcebergWriterPoolTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import io.mantisrx.connector.iceberg.sink.StageOverrideParameters;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.connector.iceberg.sink.writer.factory.IcebergWriterFactory;
import io.mantisrx.connector.iceberg.sink.writer.pool.FixedIcebergWriterPool;
import io.mantisrx.connector.iceberg.sink.writer.pool.IcebergWriterPool;
import io.mantisrx.runtime.parameter.Parameters;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.types.Types;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
class FixedIcebergWriterPoolTest {
private static final Schema SCHEMA =
new Schema(Types.NestedField.required(1, "id", Types.IntegerType.get()));
private IcebergWriter writer;
private IcebergWriterPool writerPool;
private MantisRecord record;
private StructLike partition;
@BeforeEach
void setUp() {
Parameters parameters = StageOverrideParameters.newParameters();
WriterConfig config = new WriterConfig(parameters, mock(Configuration.class));
IcebergWriterFactory factory = mock(IcebergWriterFactory.class);
this.writer = mock(IcebergWriter.class);
when(this.writer.length()).thenReturn(Long.MAX_VALUE);
when(factory.newIcebergWriter()).thenReturn(this.writer);
this.writerPool = spy(new FixedIcebergWriterPool(
factory,
config.getWriterFlushFrequencyBytes(),
config.getWriterMaximumPoolSize()));
Record icebergRecord = GenericRecord.create(SCHEMA);
icebergRecord.setField("id", 1);
// Identity partitioning (without explicitly using a Partitioner).
this.partition = icebergRecord.copy();
record = new MantisRecord(icebergRecord, null);
}
@Test
void shouldOpenNewWriter() {
assertDoesNotThrow(() -> writerPool.open(record.getRecord()));
}
@Test
void shouldFailToOpenNewWriterWhenMaximumPoolSizeExceeded() {
writerPool = spy(new FixedIcebergWriterPool(mock(IcebergWriterFactory.class), 0, 0));
assertThrows(IOException.class, () -> writerPool.open(any()));
}
@Test
void shouldOpenWhenWriterExists() {
assertDoesNotThrow(() -> writerPool.open(record.getRecord()));
assertDoesNotThrow(() -> writerPool.open(record.getRecord()));
}
@Test
void shouldFailToWriteWhenNoWriterExists() {
assertThrows(RuntimeException.class, () -> writerPool.write(partition, record));
}
@Test
void shouldWriteWhenWriterExists() throws IOException {
writerPool.open(partition);
assertDoesNotThrow(() -> writerPool.write(partition, record));
}
@Test
void shouldFailToCloseWhenNoWriterExists() {
assertThrows(RuntimeException.class, () -> writerPool.close(record.getRecord()));
}
@Test
void shouldCloseWhenWriterExists() throws IOException {
writerPool.open(partition);
assertDoesNotThrow(() -> writerPool.close(partition));
}
@Test
void shouldGetFlushableWriters() throws IOException {
writerPool.open(partition);
assertFalse(writerPool.getFlushableWriters().isEmpty());
when(writer.length()).thenReturn(Long.MIN_VALUE);
assertTrue(writerPool.getFlushableWriters().isEmpty());
}
}
| 8,501 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/committer/IcebergCommitterStageTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import io.mantisrx.connector.iceberg.sink.StageOverrideParameters;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig;
import io.mantisrx.connector.iceberg.sink.committer.metrics.CommitterMetrics;
import io.mantisrx.connector.iceberg.sink.writer.MantisDataFile;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.lifecycle.ServiceLocator;
import io.mantisrx.runtime.parameter.Parameters;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import rx.Observable;
import rx.observers.TestSubscriber;
import rx.schedulers.TestScheduler;
class IcebergCommitterStageTest {
private TestScheduler scheduler;
private TestSubscriber<Map<String, Object>> subscriber;
private Catalog catalog;
private Context context;
private IcebergCommitter committer;
private IcebergCommitterStage.Transformer transformer;
@BeforeEach
void setUp() {
this.scheduler = new TestScheduler();
this.subscriber = new TestSubscriber<>();
Parameters parameters = StageOverrideParameters.newParameters();
CommitterConfig config = new CommitterConfig(parameters);
CommitterMetrics metrics = new CommitterMetrics();
this.committer = mock(IcebergCommitter.class);
transformer = new IcebergCommitterStage.Transformer(config, metrics, committer, scheduler);
ServiceLocator serviceLocator = mock(ServiceLocator.class);
when(serviceLocator.service(Configuration.class)).thenReturn(mock(Configuration.class));
this.catalog = mock(Catalog.class);
Table table = mock(Table.class);
when(table.spec()).thenReturn(PartitionSpec.unpartitioned());
when(this.catalog.loadTable(any())).thenReturn(table);
when(serviceLocator.service(Catalog.class)).thenReturn(this.catalog);
this.context = mock(Context.class);
when(this.context.getParameters()).thenReturn(parameters);
when(this.context.getServiceLocator()).thenReturn(serviceLocator);
}
@Test
void shouldCommitPeriodically() {
Map<String, Object> summary = new HashMap<>();
summary.put("test", "test");
when(committer.commit(any())).thenReturn(summary);
Observable<MantisDataFile> source = Observable.interval(1, TimeUnit.MINUTES, scheduler)
.map(i -> new MantisDataFile(mock(DataFile.class), Long.MIN_VALUE));
Observable<Map<String, Object>> flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(1, TimeUnit.MINUTES);
subscriber.assertNoValues();
subscriber.assertNotCompleted();
scheduler.advanceTimeBy(4, TimeUnit.MINUTES);
subscriber.assertValueCount(1);
scheduler.advanceTimeBy(5, TimeUnit.MINUTES);
subscriber.assertValueCount(2);
scheduler.advanceTimeBy(1, TimeUnit.MINUTES);
subscriber.assertValueCount(2);
subscriber.assertNoErrors();
verify(committer, times(2)).commit(any());
}
@Test
void shouldContinueOnCommitFailure() {
doThrow(new RuntimeException()).when(committer).commit(any());
Observable<MantisDataFile> source = Observable.interval(1, TimeUnit.MINUTES, scheduler)
.map(i -> new MantisDataFile(mock(DataFile.class), Long.MIN_VALUE));
Observable<Map<String, Object>> flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(5, TimeUnit.MINUTES);
subscriber.assertNoErrors();
subscriber.assertNotCompleted();
subscriber.assertValueCount(0);
verify(committer).commit(any());
}
@Test
void shouldInitializeWithExistingTable() {
IcebergCommitterStage stage = new IcebergCommitterStage();
assertDoesNotThrow(() -> stage.init(context));
}
@Test
void shouldFailToInitializeWithMissingTable() {
when(catalog.loadTable(any())).thenThrow(new RuntimeException());
IcebergCommitterStage stage = new IcebergCommitterStage();
assertThrows(RuntimeException.class, () -> stage.init(context));
}
}
| 8,502 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/codecs/IcebergCodecs.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.codecs;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.connector.iceberg.sink.writer.MantisDataFile;
import io.mantisrx.connector.iceberg.sink.writer.MantisRecord;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import javax.annotation.Nullable;
import lombok.Value;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Schema;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.data.avro.IcebergDecoder;
import org.apache.iceberg.data.avro.IcebergEncoder;
import org.apache.iceberg.exceptions.RuntimeIOException;
/**
* Encoders and decoders for working with Iceberg objects
* such as {@link Record}s and {@link DataFile}s.
*/
public class IcebergCodecs {
/**
* @return a codec for encoding/decoding Iceberg Records.
*/
public static Codec<Record> record(Schema schema) {
return new RecordCodec<>(schema);
}
public static Codec<MantisRecord> mantisRecord(Schema schema) {
return new MantisRecordCodec(schema);
}
/**
* @return a codec for encoding/decoding DataFiles.
*/
public static Codec<DataFile> dataFile() {
return new ObjectCodec<>(DataFile.class);
}
public static Codec<MantisDataFile> mantisDataFile() {
return new ObjectCodec<>(MantisDataFile.class);
}
private static class MantisRecordCodec implements Codec<MantisRecord> {
private final IcebergEncoder<Record> encoder;
private final IcebergDecoder<Record> decoder;
private final ObjectCodec<SerializableMantisRecord> objectCodec;
private MantisRecordCodec(Schema schema) {
this.encoder = new IcebergEncoder<>(schema);
this.decoder = new IcebergDecoder<>(schema);
this.objectCodec = new ObjectCodec<>(SerializableMantisRecord.class);
}
@Override
public MantisRecord decode(byte[] bytes) {
try {
SerializableMantisRecord serializableMantisRecord = objectCodec.decode(bytes);
return new MantisRecord(
decoder.decode(serializableMantisRecord.getRecord()),
serializableMantisRecord.getTimestamp());
} catch (IOException e) {
throw new RuntimeIOException("problem decoding Iceberg record", e);
}
}
@Override
public byte[] encode(MantisRecord value) {
try {
SerializableMantisRecord r =
new SerializableMantisRecord(
encoder.encode(value.getRecord()).array(),
value.getTimestamp());
return objectCodec.encode(r);
} catch (IOException e) {
throw new RuntimeIOException("problem encoding encoding Iceberg record", e);
}
}
}
@Value
private static class SerializableMantisRecord implements Serializable {
byte[] record;
@Nullable
Long timestamp;
}
private static class RecordCodec<T> implements Codec<T> {
private final IcebergEncoder<T> encoder;
private final IcebergDecoder<T> decoder;
private RecordCodec(Schema schema) {
this.encoder = new IcebergEncoder<>(schema);
this.decoder = new IcebergDecoder<>(schema);
}
@Override
public T decode(byte[] bytes) {
try {
return decoder.decode(bytes);
} catch (IOException e) {
throw new RuntimeIOException("problem decoding Iceberg record", e);
}
}
@Override
public byte[] encode(T value) {
try {
return encoder.encode(value).array();
} catch (IOException e) {
throw new RuntimeIOException("problem encoding encoding Iceberg record", e);
}
}
}
private static class ObjectCodec<T> implements Codec<T> {
private final Class<T> tClass;
private ObjectCodec(Class<T> tClass) {
this.tClass = tClass;
}
@Override
public T decode(byte[] bytes) {
try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
return tClass.cast(in.readObject());
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeException("Failed to convert bytes to DataFile", e);
}
}
@Override
public byte[] encode(T value) {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
try (ObjectOutputStream out = new ObjectOutputStream(bytes)) {
out.writeObject(value);
} catch (IOException e) {
throw new RuntimeException("Failed to write bytes for DataFile: " + value, e);
}
return bytes.toByteArray();
}
}
}
| 8,503 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/config/SinkConfig.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.config;
import static io.mantisrx.connector.iceberg.sink.config.SinkProperties.SINK_CATALOG;
import static io.mantisrx.connector.iceberg.sink.config.SinkProperties.SINK_DATABASE;
import static io.mantisrx.connector.iceberg.sink.config.SinkProperties.SINK_TABLE;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.runtime.parameter.Parameters;
import lombok.RequiredArgsConstructor;
/**
* Convenient base config used by {@link WriterConfig} and {@link CommitterConfig}.
*/
@RequiredArgsConstructor
public class SinkConfig {
private final String catalog;
private final String database;
private final String table;
/**
* Creates an instance from {@link Parameters} derived from the current Mantis Stage's {@code Context}.
*/
public SinkConfig(Parameters parameters) {
this.catalog = (String) parameters.get(SINK_CATALOG);
this.database = (String) parameters.get(SINK_DATABASE);
this.table = (String) parameters.get(SINK_TABLE);
}
/**
* Returns a String for Iceberg Catalog name.
*/
public String getCatalog() {
return catalog;
}
/**
* Returns a String for the database name in a catalog.
*/
public String getDatabase() {
return database;
}
/**
* Returns a String for the table within a database.
*/
public String getTable() {
return table;
}
}
| 8,504 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/config/SinkProperties.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.config;
/**
* Property key names and default values for the base Iceberg Sink config.
*/
public class SinkProperties {
private SinkProperties() {
}
/**
* Name of Iceberg Catalog.
*/
public static final String SINK_CATALOG = "sinkCatalog";
public static final String SINK_CATALOG_DESCRIPTION = "Name of Iceberg Catalog";
/**
* Name of database within Iceberg Catalog.
*/
public static final String SINK_DATABASE = "sinkDatabase";
public static final String SINK_DATABASE_DESCRIPTION = "Name of database within Iceberg Catalog";
/**
* Name of table within database.
*/
public static final String SINK_TABLE = "sinkTable";
public static final String SINK_TABLE_DESCRIPTION = "Name of table within database";
}
| 8,505 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriterStage.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import io.mantisrx.connector.iceberg.sink.codecs.IcebergCodecs;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties;
import io.mantisrx.connector.iceberg.sink.writer.factory.DefaultIcebergWriterFactory;
import io.mantisrx.connector.iceberg.sink.writer.factory.IcebergWriterFactory;
import io.mantisrx.connector.iceberg.sink.writer.metrics.WriterMetrics;
import io.mantisrx.connector.iceberg.sink.writer.partitioner.Partitioner;
import io.mantisrx.connector.iceberg.sink.writer.partitioner.PartitionerFactory;
import io.mantisrx.connector.iceberg.sink.writer.pool.FixedIcebergWriterPool;
import io.mantisrx.connector.iceberg.sink.writer.pool.IcebergWriterPool;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.WorkerInfo;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.scheduler.MantisRxSingleThreadScheduler;
import io.mantisrx.shaded.com.google.common.annotations.VisibleForTesting;
import io.mantisrx.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.io.IOException;
import java.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.io.LocationProvider;
import org.apache.iceberg.types.Types;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Scheduler;
import rx.exceptions.Exceptions;
import rx.schedulers.Schedulers;
/**
* Processing stage which writes records to Iceberg through a backing file store.
*/
public class IcebergWriterStage implements ScalarComputation<MantisRecord, MantisDataFile> {
private static final Logger logger = LoggerFactory.getLogger(IcebergWriterStage.class);
private Transformer transformer;
/**
* Returns a config for this stage which has encoding/decoding semantics and parameter definitions.
*/
public static ScalarToScalar.Config<MantisRecord, MantisDataFile> config() {
return new ScalarToScalar.Config<MantisRecord, MantisDataFile>()
.description("")
.codec(IcebergCodecs.mantisDataFile())
.serialInput()
.withParameters(parameters());
}
/**
* Returns a list of parameter definitions for this stage.
*/
public static List<ParameterDefinition<?>> parameters() {
return Arrays.asList(
new IntParameter().name(WriterProperties.WRITER_ROW_GROUP_SIZE)
.description(WriterProperties.WRITER_ROW_GROUP_SIZE_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(WriterProperties.WRITER_ROW_GROUP_SIZE_DEFAULT)
.build(),
new StringParameter().name(WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES)
.description(WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT)
.build(),
new StringParameter().name(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC)
.description(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT)
.build(),
new StringParameter().name(WriterProperties.WRITER_FILE_FORMAT)
.description(WriterProperties.WRITER_FILE_FORMAT_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(WriterProperties.WRITER_FILE_FORMAT_DEFAULT)
.build(),
new IntParameter().name(WriterProperties.WRITER_MAXIMUM_POOL_SIZE)
.description(WriterProperties.WRITER_MAXIMUM_POOL_SIZE_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(WriterProperties.WRITER_MAXIMUM_POOL_SIZE_DEFAULT)
.build()
);
}
/**
* Use this to instantiate a new transformer from a given {@link Context}.
*/
public static Transformer newTransformer(Context context) {
Configuration hadoopConfig = context.getServiceLocator().service(Configuration.class);
WriterConfig config = new WriterConfig(context.getParameters(), hadoopConfig);
Catalog catalog = context.getServiceLocator().service(Catalog.class);
TableIdentifier id = TableIdentifier.of(config.getCatalog(), config.getDatabase(), config.getTable());
Table table = catalog.loadTable(id);
WorkerInfo workerInfo = context.getWorkerInfo();
LocationProvider locationProvider = context.getServiceLocator().service(LocationProvider.class);
IcebergWriterFactory factory = new DefaultIcebergWriterFactory(config, workerInfo, table, locationProvider);
IcebergWriterPool writerPool = new FixedIcebergWriterPool(factory, config);
WriterMetrics metrics = new WriterMetrics();
PartitionerFactory partitionerFactory = context.getServiceLocator().service(PartitionerFactory.class);
Partitioner partitioner = partitionerFactory.getPartitioner(table);
return newTransformer(config, metrics, writerPool, partitioner, context.getWorkerInfo(), context.getClassLoader());
}
@VisibleForTesting
static Transformer newTransformer(
WriterConfig writerConfig,
WriterMetrics writerMetrics,
IcebergWriterPool writerPool,
Partitioner partitioner,
WorkerInfo workerInfo,
@Nullable ClassLoader loader) {
int workerIdx = workerInfo.getWorkerIndex();
String nameFormat = "IcebergWriter (" + (workerIdx + 1) + ")-%d";
ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder().setNameFormat(nameFormat);
if(loader != null) {
// if the job classloader is enabled, let's use the job classloader
// as the thread context classloader of these threads
ThreadFactory backingTf = r -> {
Thread thread = new Thread(r);
thread.setContextClassLoader(loader);
return thread;
};
tfBuilder.setThreadFactory(backingTf);
}
Scheduler executingService = new MantisRxSingleThreadScheduler(tfBuilder.build());
return new Transformer(writerConfig, writerMetrics, writerPool, partitioner,
Schedulers.computation(), executingService);
}
public IcebergWriterStage() {
}
/**
* Uses the provided Mantis Context to inject configuration and opens an underlying file appender.
* <p>
* This method depends on a Hadoop Configuration and Iceberg Catalog, both injected
* from the Context's service locator.
* <p>
* Note that this method expects an Iceberg Table to have been previously created out-of-band,
* otherwise initialization will fail. Users should prefer to create tables
* out-of-band so they can be versioned alongside their schemas.
*/
@Override
public void init(Context context) {
transformer = newTransformer(context);
}
@Override
public Observable<MantisDataFile> call(Context context, Observable<MantisRecord> recordObservable) {
return recordObservable.compose(transformer);
}
/**
* Reactive Transformer for writing records to Iceberg.
* <p>
* Users may use this class independently of this Stage, for example, if they want to
* {@link Observable#compose(Observable.Transformer)} this transformer with a flow into
* an existing Stage. One benefit of this co-location is to avoid extra network
* cost from worker-to-worker communication, trading off debuggability.
*/
public static class Transformer implements Observable.Transformer<MantisRecord, MantisDataFile> {
private static final Schema TIMEOUT_SCHEMA = new Schema(
Types.NestedField.required(1, "ts_utc_msec", Types.LongType.get()));
private static final MantisRecord TIMEOUT_RECORD =
new MantisRecord(GenericRecord.create(TIMEOUT_SCHEMA), null);
private final WriterConfig config;
private final WriterMetrics metrics;
private final Partitioner partitioner;
private final IcebergWriterPool writerPool;
private final Scheduler timerScheduler;
private final Scheduler transformerScheduler;
public Transformer(
WriterConfig config,
WriterMetrics metrics,
IcebergWriterPool writerPool,
Partitioner partitioner,
Scheduler timerScheduler,
Scheduler transformerScheduler) {
this.config = config;
this.metrics = metrics;
this.writerPool = writerPool;
this.partitioner = partitioner;
this.timerScheduler = timerScheduler;
this.transformerScheduler = transformerScheduler;
}
/**
* Opens an IcebergWriter FileAppender, writes records to a file. The appender flushes on size or time
* threshold.
* <p>
* Size Threshold:
* <p>
* The appender will periodically check the current file size as configured by
* {@link WriterConfig#getWriterRowGroupSize()}. If it's time to check, then the appender will flush on
* {@link WriterConfig#getWriterFlushFrequencyBytes()}.
* <p>
* Time Threshold:
* <p>
* The appender will periodically attempt to flush as configured by
* {@link WriterConfig#getWriterFlushFrequencyMsec()}. If this threshold is met, the appender will flush
* only if the appender has an open file. This avoids flushing unnecessarily if there are no events.
* Otherwise, a flush will happen, even if there are few events in the file. This effectively limits the
* upper-bound for allowed lateness.
* <p>
* Pair this writer with a progressive multipart file uploader backend for better latencies.
*/
@Override
public Observable<MantisDataFile> call(Observable<MantisRecord> source) {
Observable<MantisRecord> timer = Observable.interval(
config.getWriterFlushFrequencyMsec(), TimeUnit.MILLISECONDS, timerScheduler)
.map(i -> TIMEOUT_RECORD);
return source.mergeWith(timer)
.observeOn(transformerScheduler)
.scan(new Trigger(config.getWriterRowGroupSize()), (trigger, record) -> {
if (record.getRecord().struct().fields().equals(TIMEOUT_SCHEMA.columns())) {
// Timeout; track all writers even if they're not yet ready to be flushed.
trigger.trackAll(writerPool.getWriters());
} else {
StructLike partition = partitioner.partition(record.getRecord());
if (writerPool.isClosed(partition)) {
try {
logger.info("opening file for partition {}", partition);
writerPool.open(partition);
metrics.increment(WriterMetrics.OPEN_SUCCESS_COUNT);
} catch (IOException e) {
metrics.increment(WriterMetrics.OPEN_FAILURE_COUNT);
throw Exceptions.propagate(e);
}
}
try {
writerPool.write(partition, record);
trigger.increment();
// Check all writers to see if any are flushable and track them if so.
// We should check _all_ writers because a writer that should be flushable
// may not be flushed if an event for its partition doesn't show up for
// a period of time. This could cause a longer delay for that partition,
// especially since we only check at the trigger's count threshold.
if (trigger.isOverCountThreshold()) {
trigger.trackAll(writerPool.getFlushableWriters());
}
metrics.increment(WriterMetrics.WRITE_SUCCESS_COUNT);
} catch (RuntimeException e) {
metrics.increment(WriterMetrics.WRITE_FAILURE_COUNT);
logger.debug("error writing record {}", record);
}
}
return trigger;
})
.filter(Trigger::shouldFlush)
.map(trigger -> {
List<MantisDataFile> dataFiles = new ArrayList<>();
// Timer can still tick while no writers are open (i.e., no events), which means
// there won't be any tracked writers.
for (StructLike partition : trigger.getTrackedWriters()) {
try {
MantisDataFile dataFile = writerPool.close(partition);
dataFiles.add(dataFile);
} catch (IOException | RuntimeException e) {
metrics.increment(WriterMetrics.BATCH_FAILURE_COUNT);
logger.error("error writing DataFile", e);
}
}
trigger.reset();
return dataFiles;
})
.filter(dataFiles -> !dataFiles.isEmpty())
.flatMapIterable(t -> t)
.doOnNext(dataFile -> {
metrics.increment(WriterMetrics.BATCH_SUCCESS_COUNT);
logger.info("writing DataFile: {}", dataFile);
metrics.setGauge(WriterMetrics.BATCH_SIZE, dataFile.getDataFile().recordCount());
metrics.setGauge(WriterMetrics.BATCH_SIZE_BYTES, dataFile.getDataFile().fileSizeInBytes());
})
.doOnTerminate(() -> {
try {
logger.info("closing writer on rx terminate signal");
writerPool.closeAll();
} catch (IOException e) {
throw Exceptions.propagate(e);
}
})
.share();
}
private static class Trigger {
private final int countThreshold;
private final Set<StructLike> writers;
private int counter;
Trigger(int countThreshold) {
this.countThreshold = countThreshold;
writers = new HashSet<>();
}
void increment() {
counter++;
}
void reset() {
counter = 0;
writers.clear();
}
void trackAll(Set<StructLike> partitions) {
writers.addAll(partitions);
}
Set<StructLike> getTrackedWriters() {
return writers;
}
boolean isOverCountThreshold() {
return counter >= countThreshold;
}
boolean shouldFlush() {
return !writers.isEmpty();
}
@Override
public String toString() {
return "Trigger{"
+ " countThreshold=" + countThreshold
+ ", writers=" + writers
+ ", counter=" + counter
+ '}';
}
}
}
}
| 8,506 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/MantisRecord.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import javax.annotation.Nullable;
import lombok.Value;
import org.apache.iceberg.data.Record;
@Value
public class MantisRecord {
Record record;
@Nullable
Long timestamp;
}
| 8,507 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import java.io.IOException;
import java.io.UncheckedIOException;
import org.apache.iceberg.StructLike;
public interface IcebergWriter {
void open() throws IOException;
void open(StructLike newPartitionKey) throws IOException;
void write(MantisRecord record);
MantisDataFile close() throws IOException, UncheckedIOException;
boolean isClosed();
long length();
StructLike getPartitionKey();
}
| 8,508 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/MantisDataFile.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import java.io.Serializable;
import javax.annotation.Nullable;
import lombok.Value;
import org.apache.iceberg.DataFile;
@Value
public class MantisDataFile implements Serializable {
DataFile dataFile;
@Nullable
Long lowWatermark;
}
| 8,509 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/DefaultIcebergWriter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.runtime.WorkerInfo;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import java.util.function.BiFunction;
import javax.annotation.Nullable;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.data.parquet.GenericParquetWriter;
import org.apache.iceberg.io.FileAppender;
import org.apache.iceberg.io.LocationProvider;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.parquet.Parquet;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for writing {@link Record}s to Iceberg via a HDFS-compatible backend.
* For example, this class may be used with an S3 compatible filesystem library
* which progressively uploads (multipart) to S3 on each write operation for
* optimizing latencies.
* <p>
* Users have the flexibility to choose the semantics of opening, writing, and closing
* this Writer, for example, closing the underlying appender after some number
* of Bytes written and opening a new appender.
*/
public class DefaultIcebergWriter implements IcebergWriter {
private static final Logger logger = LoggerFactory.getLogger(DefaultIcebergWriter.class);
private final Map<String, String> tableProperties = new HashMap<>();
private final WriterConfig config;
private final WorkerInfo workerInfo;
private final Table table;
private final PartitionSpec spec;
private final FileFormat format;
private final LocationProvider locationProvider;
private FileAppender<Record> appender;
private OutputFile file;
private StructLike partitionKey;
@Nullable
private Long lowWatermark;
public DefaultIcebergWriter(
WriterConfig config,
WorkerInfo workerInfo,
Table table,
LocationProvider locationProvider) {
this.config = config;
this.workerInfo = workerInfo;
this.table = table;
this.spec = table.spec();
this.format = FileFormat.valueOf(config.getWriterFileFormat());
this.locationProvider = locationProvider;
this.tableProperties.putAll(table.properties());
if (!this.tableProperties.containsKey(PARQUET_COMPRESSION)) {
// ZSTD is the recommended default compression
tableProperties.put(PARQUET_COMPRESSION, CompressionCodecName.ZSTD.name());
}
}
/**
* Opens a {@link FileAppender} for a specific {@link FileFormat}.
* <p>
* A filename is automatically generated for this appender.
* <p>
* Supports Parquet. Avro, Orc, and others unsupported.
*/
@Override
public void open() throws IOException {
open(null);
}
/**
* Opens a {@link FileAppender} using a {@link StructLike} partition key
* for a specific {@link FileFormat}.
* <p>
* A filename is automatically generated for this appender.
* <p>
* Supports Parquet. Avro, Orc, and others unsupported.
*/
@Override
public void open(StructLike newPartitionKey) throws IOException {
partitionKey = newPartitionKey;
Path path = new Path(table.location(), generateFilename());
String location = locationProvider.newDataLocation(path.toString());
logger.info("opening new {} file appender {}", format, location);
file = table.io().newOutputFile(path.toString());
switch (format) {
case PARQUET:
appender = Parquet.write(file)
.schema(table.schema())
.createWriterFunc(GenericParquetWriter::buildWriter)
.setAll(tableProperties)
.overwrite()
.build();
lowWatermark = null;
break;
case AVRO:
default:
throw new UnsupportedOperationException("Cannot write using an unsupported file format " + format);
}
}
@Override
public void write(MantisRecord record) {
appender.add(record.getRecord());
lowWatermark = minNullSafe(lowWatermark, record.getTimestamp());
}
/**
* Closes the currently opened file appender and builds a DataFile.
* <p>
* Users are expected to {@link IcebergWriter#open()} a new file appender for this writer
* if they want to continue writing. Users can check for status of the file appender
* using {@link IcebergWriter#isClosed()}.
*
* @return a DataFile representing metadata about the records written.
*/
@Override
public MantisDataFile close() throws IOException, UncheckedIOException {
if (isClosed()) {
return null;
}
// Calls to FileAppender#close can fail if the backing file system fails to close.
// For example, this can happen for an S3-backed file system where it might fail
// to GET the status of the file. The file would have already been closed.
// Callers should open a new appender.
try {
appender.close();
final DataFile dataFile = DataFiles.builder(spec)
.withPath(file.location())
.withInputFile(file.toInputFile())
.withFileSizeInBytes(appender.length())
.withPartition(spec.fields().size() == 0 ? null : partitionKey)
.withMetrics(appender.metrics())
.withSplitOffsets(appender.splitOffsets())
.build();
return new MantisDataFile(dataFile, lowWatermark);
} finally {
appender = null;
file = null;
}
}
public boolean isClosed() {
return appender == null;
}
/**
* Returns the current file size (in Bytes) written using this writer's appender.
* <p>
* Users should be careful calling this method in a tight loop because it can
* be expensive depending on the file format, for example in Parquet.
*
* @return current file size (in Bytes).
*/
public long length() throws UncheckedIOException {
return appender == null ? 0 : appender.length();
}
/**
* Returns the partition key for which this record is partitioned in an Iceberg table.
*
* @return StructLike for partitioned tables; null for unpartitioned tables
*/
public StructLike getPartitionKey() {
return partitionKey;
}
/**
* Generate a Parquet filename with attributes which make it more friendly to determine
* the source of the file. For example, if the caller exits unexpectedly and leaves
* files in the system, it's possible to identify them through a recursive listing.
*/
private String generateFilename() {
return generateDataPath(
generatePartitionPath(
format.addExtension(String.format("%s_%s_%s_%s_%s",
workerInfo.getJobId(),
workerInfo.getStageNumber(),
workerInfo.getWorkerIndex(),
workerInfo.getWorkerNumber(),
UUID.randomUUID()))));
}
private String generateDataPath(String partitionPath) {
return String.format("data/%s", partitionPath);
}
private String generatePartitionPath(String filename) {
if (spec.isUnpartitioned()) {
return filename;
}
return String.format("/%s/%s", spec.partitionToPath(partitionKey), filename);
}
public static Long minNullSafe(@Nullable Long v1, @Nullable Long v2) {
return compareNullSafe(v1, v2, Math::min);
}
public static Long maxNullSafe(@Nullable Long v1, @Nullable Long v2) {
return compareNullSafe(v1, v2, Math::max);
}
private static Long compareNullSafe(
@Nullable Long v1, @Nullable Long v2, BiFunction<Long, Long, Long> comparator) {
if (v1 != null && v2 != null) {
return comparator.apply(v1, v2);
} else if (v1 != null) {
return v1;
} else {
return v2;
}
}
}
| 8,510 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/metrics/WriterMetrics.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.metrics;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
public class WriterMetrics {
public static final String OPEN_SUCCESS_COUNT = "openSuccessCount";
private final Counter openSuccessCount;
public static final String OPEN_FAILURE_COUNT = "openFailureCount";
private final Counter openFailureCount;
public static final String WRITE_SUCCESS_COUNT = "writeSuccessCount";
private final Counter writeSuccessCount;
public static final String WRITE_FAILURE_COUNT = "writeFailureCount";
private final Counter writeFailureCount;
public static final String BATCH_SUCCESS_COUNT = "batchSuccessCount";
private final Counter batchSuccessCount;
public static final String BATCH_FAILURE_COUNT = "batchFailureCount";
private final Counter batchFailureCount;
public static final String BATCH_SIZE = "batchSize";
private final Gauge batchSize;
public static final String BATCH_SIZE_BYTES = "batchSizeBytes";
private final Gauge batchSizeBytes;
public WriterMetrics() {
Metrics metrics = new Metrics.Builder()
.name(WriterMetrics.class.getCanonicalName())
.addCounter(OPEN_SUCCESS_COUNT)
.addCounter(OPEN_FAILURE_COUNT)
.addCounter(WRITE_SUCCESS_COUNT)
.addCounter(WRITE_FAILURE_COUNT)
.addCounter(BATCH_SUCCESS_COUNT)
.addCounter(BATCH_FAILURE_COUNT)
.addGauge(BATCH_SIZE)
.addGauge(BATCH_SIZE_BYTES)
.build();
metrics = MetricsRegistry.getInstance().registerAndGet(metrics);
openSuccessCount = metrics.getCounter(OPEN_SUCCESS_COUNT);
openFailureCount = metrics.getCounter(OPEN_FAILURE_COUNT);
writeSuccessCount = metrics.getCounter(WRITE_SUCCESS_COUNT);
writeFailureCount = metrics.getCounter(WRITE_FAILURE_COUNT);
batchSuccessCount = metrics.getCounter(BATCH_SUCCESS_COUNT);
batchFailureCount = metrics.getCounter(BATCH_FAILURE_COUNT);
batchSize = metrics.getGauge(BATCH_SIZE);
batchSizeBytes = metrics.getGauge(BATCH_SIZE_BYTES);
}
public void setGauge(final String metric, final long value) {
switch (metric) {
case BATCH_SIZE:
batchSize.set(value);
break;
case BATCH_SIZE_BYTES:
batchSizeBytes.set(value);
break;
default:
break;
}
}
public void increment(final String metric) {
switch (metric) {
case OPEN_SUCCESS_COUNT:
openSuccessCount.increment();
break;
case OPEN_FAILURE_COUNT:
openFailureCount.increment();
break;
case WRITE_SUCCESS_COUNT:
writeSuccessCount.increment();
break;
case WRITE_FAILURE_COUNT:
writeFailureCount.increment();
break;
case BATCH_SUCCESS_COUNT:
batchSuccessCount.increment();
break;
case BATCH_FAILURE_COUNT:
batchFailureCount.increment();
break;
default:
break;
}
}
public void increment(final String metric, final long value) {
switch (metric) {
case OPEN_SUCCESS_COUNT:
openSuccessCount.increment(value);
break;
case OPEN_FAILURE_COUNT:
openFailureCount.increment(value);
break;
case WRITE_SUCCESS_COUNT:
writeSuccessCount.increment(value);
break;
case WRITE_FAILURE_COUNT:
writeFailureCount.increment(value);
break;
case BATCH_SUCCESS_COUNT:
batchSuccessCount.increment(value);
break;
case BATCH_FAILURE_COUNT:
batchFailureCount.increment(value);
break;
default:
break;
}
}
}
| 8,511 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/config/WriterProperties.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.config;
import org.apache.iceberg.FileFormat;
/**
* Property key names and default values for an Iceberg Committer.
*/
public class WriterProperties {
private WriterProperties() {
}
/**
* Maximum number of rows that should exist in a file.
*/
public static final String WRITER_ROW_GROUP_SIZE = "writerRowGroupSize";
public static final int WRITER_ROW_GROUP_SIZE_DEFAULT = 100;
public static final String WRITER_ROW_GROUP_SIZE_DESCRIPTION =
String.format("Number of rows to chunk before checking for file size (default: %s)",
WRITER_ROW_GROUP_SIZE_DEFAULT);
/**
* Flush frequency by size (in Bytes).
*/
public static final String WRITER_FLUSH_FREQUENCY_BYTES = "writerFlushFrequencyBytes";
// TODO: Change to long.
public static final String WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT = "134217728"; // 128 MiB
public static final String WRITER_FLUSH_FREQUENCY_BYTES_DESCRIPTION =
String.format("Flush frequency by size in Bytes (default: %s)",
WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT);
/**
* Flush frequency by time (in milliseconds).
*/
public static final String WRITER_FLUSH_FREQUENCY_MSEC = "writerFlushFrequencyMsec";
// TODO: Change to long.
public static final String WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT = "60000"; // 1 min
public static final String WRITER_FLUSH_FREQUENCY_MSEC_DESCRIPTION =
String.format("Flush frequency by time in milliseconds (default: %s)",
WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT);
/**
* File format for writing data files to backing Iceberg store.
*/
public static final String WRITER_FILE_FORMAT = "writerFileFormat";
public static final String WRITER_FILE_FORMAT_DEFAULT = FileFormat.PARQUET.name();
public static final String WRITER_FILE_FORMAT_DESCRIPTION =
String.format("File format for writing data files to backing Iceberg store (default: %s)",
WRITER_FILE_FORMAT_DEFAULT);
/**
* Maximum number of writers that should exist per worker.
*/
public static final String WRITER_MAXIMUM_POOL_SIZE = "writerMaximumPoolSize";
public static final String WATERMARK_ENABLED = "watermarkEnabled";
public static final int WRITER_MAXIMUM_POOL_SIZE_DEFAULT = 5;
public static final String WRITER_MAXIMUM_POOL_SIZE_DESCRIPTION =
String.format("Maximum number of writers that should exist per worker (default: %s)",
WRITER_MAXIMUM_POOL_SIZE_DEFAULT);
}
| 8,512 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/config/WriterConfig.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.config;
import static io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties.*;
import io.mantisrx.connector.iceberg.sink.config.SinkConfig;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.hadoop.conf.Configuration;
/**
* Config for controlling Iceberg Writer semantics.
*/
public class WriterConfig extends SinkConfig {
private final int writerRowGroupSize;
private final long writerFlushFrequencyBytes;
private final long writerFlushFrequencyMsec;
private final String writerFileFormat;
private final int writerMaximumPoolSize;
private final Configuration hadoopConfig;
/**
* Creates an instance from {@link Parameters} derived from the current Mantis Stage's {@code Context}.
*/
public WriterConfig(Parameters parameters, Configuration hadoopConfig) {
super(parameters);
this.writerRowGroupSize = (int) parameters.get(WRITER_ROW_GROUP_SIZE);
this.writerFlushFrequencyBytes = Long.parseLong((String) parameters.get(WRITER_FLUSH_FREQUENCY_BYTES));
this.writerFlushFrequencyMsec = Long.parseLong((String) parameters.get(WRITER_FLUSH_FREQUENCY_MSEC));
this.writerFileFormat = (String) parameters.get(WRITER_FILE_FORMAT);
this.writerMaximumPoolSize = (int) parameters.get(WRITER_MAXIMUM_POOL_SIZE);
this.hadoopConfig = hadoopConfig;
}
public WriterConfig(String catalog, String database, String table, int writerRowGroupSize,
long writerFlushFrequencyBytes, long writerFlushFrequencyMsec,
String writerFileFormat, int writerMaximumPoolSize,
Configuration hadoopConfig) {
super(catalog, database, table);
this.writerRowGroupSize = writerRowGroupSize;
this.writerFlushFrequencyBytes = writerFlushFrequencyBytes;
this.writerFlushFrequencyMsec = writerFlushFrequencyMsec;
this.writerFileFormat = writerFileFormat;
this.writerMaximumPoolSize = writerMaximumPoolSize;
this.hadoopConfig = hadoopConfig;
}
/**
* Returns an int representing maximum number of rows that should exist in a file.
*/
public int getWriterRowGroupSize() {
return writerRowGroupSize;
}
/**
* Returns a long representing flush frequency by size in Bytes.
*/
public long getWriterFlushFrequencyBytes() {
return writerFlushFrequencyBytes;
}
/**
* Returns a long representing flush frequency by size in milliseconds.
*/
public long getWriterFlushFrequencyMsec() {
return writerFlushFrequencyMsec;
}
/**
* Returns the file format for Iceberg writers.
*/
public String getWriterFileFormat() {
return writerFileFormat;
}
/**
* Returns an int representing the maximum number of writers that should exist per worker.
*/
public int getWriterMaximumPoolSize() {
return writerMaximumPoolSize;
}
/**
* Returns a Hadoop configuration which has metadata for how and where to write files.
*/
public Configuration getHadoopConfig() {
return hadoopConfig;
}
}
| 8,513 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/partitioner/NoOpPartitioner.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.partitioner;
import org.apache.iceberg.StructLike;
/**
* Partitioner to use for unpartitioned Iceberg tables.
*/
public class NoOpPartitioner implements Partitioner {
@Override
public StructLike partition(StructLike record) {
return null;
}
}
| 8,514 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/partitioner/Partitioner.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.partitioner;
import org.apache.iceberg.StructLike;
public interface Partitioner {
StructLike partition(StructLike record);
}
| 8,515 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/partitioner/PartitionerFactory.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.partitioner;
import org.apache.iceberg.Table;
public interface PartitionerFactory {
Partitioner getPartitioner(Table table);
}
| 8,516 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/pool/FixedIcebergWriterPool.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.pool;
import io.mantisrx.connector.iceberg.sink.writer.IcebergWriter;
import io.mantisrx.connector.iceberg.sink.writer.MantisDataFile;
import io.mantisrx.connector.iceberg.sink.writer.MantisRecord;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.connector.iceberg.sink.writer.factory.IcebergWriterFactory;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.StructLike;
/**
* A service that delegates operations to {@link IcebergWriter}s.
*
* Writers can be added to the pool up to a maximum size, after which new writers will be rejected.
*/
public class FixedIcebergWriterPool implements IcebergWriterPool {
private final IcebergWriterFactory factory;
private final Map<StructLike, IcebergWriter> pool;
private final long flushFrequencyBytes;
private final int maximumPoolSize;
public FixedIcebergWriterPool(IcebergWriterFactory factory, WriterConfig writerConfig) {
this(factory, writerConfig.getWriterFlushFrequencyBytes(), writerConfig.getWriterMaximumPoolSize());
}
public FixedIcebergWriterPool(IcebergWriterFactory factory, long flushFrequencyBytes, int maximumPoolSize) {
this.factory = factory;
this.flushFrequencyBytes = flushFrequencyBytes;
this.maximumPoolSize = maximumPoolSize;
this.pool = new HashMap<>(this.maximumPoolSize);
}
@Override
public void open(StructLike partition) throws IOException {
if (pool.size() >= maximumPoolSize) {
throw new IOException("problem opening writer; maximum writer pool size (" + maximumPoolSize + ") exceeded");
}
if (!isClosed(partition)) {
return;
}
IcebergWriter writer = factory.newIcebergWriter();
writer.open(partition);
pool.put(partition, writer);
}
@Override
public void write(StructLike partition, MantisRecord record) {
IcebergWriter writer = pool.get(partition);
if (writer == null) {
throw new RuntimeException("writer does not exist in writer pool");
}
writer.write(record);
}
@Override
public MantisDataFile close(StructLike partition) throws IOException, UncheckedIOException {
IcebergWriter writer = pool.get(partition);
if (writer == null) {
throw new RuntimeException("writer does not exist in writer pool");
}
try {
return writer.close();
} finally {
pool.remove(partition);
}
}
/**
* Attempts to close all writers and produce {@link DataFile}s. If a writer is already closed, then it will
* produce a {@code null} which will be excluded from the resulting list.
*/
@Override
public List<MantisDataFile> closeAll() throws IOException, UncheckedIOException {
List<MantisDataFile> dataFiles = new ArrayList<>();
for (StructLike partition : pool.keySet()) {
MantisDataFile dataFile = close(partition);
if (dataFile != null) {
dataFiles.add(dataFile);
}
}
return dataFiles;
}
/**
* Returns a set of all writers in the pool.
*/
@Override
public Set<StructLike> getWriters() {
return pool.keySet();
}
/**
* Returns a set of writers whose lengths are greater than {@link WriterConfig#getWriterFlushFrequencyBytes()}.
*/
@Override
public Set<StructLike> getFlushableWriters() {
return pool.entrySet().stream()
.filter(entry -> entry.getValue().length() >= flushFrequencyBytes)
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
}
@Override
public boolean isClosed(StructLike partition) {
return !pool.containsKey(partition);
}
}
| 8,517 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/pool/IcebergWriterPool.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.pool;
import io.mantisrx.connector.iceberg.sink.writer.MantisDataFile;
import io.mantisrx.connector.iceberg.sink.writer.MantisRecord;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.List;
import java.util.Set;
import org.apache.iceberg.StructLike;
public interface IcebergWriterPool {
void open(StructLike partition) throws IOException;
void write(StructLike partition, MantisRecord record);
MantisDataFile close(StructLike partition) throws IOException, UncheckedIOException;
List<MantisDataFile> closeAll() throws IOException, UncheckedIOException;
Set<StructLike> getWriters();
Set<StructLike> getFlushableWriters();
boolean isClosed(StructLike partition);
}
| 8,518 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/factory/IcebergWriterFactory.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.factory;
import io.mantisrx.connector.iceberg.sink.writer.IcebergWriter;
public interface IcebergWriterFactory {
IcebergWriter newIcebergWriter();
}
| 8,519 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/factory/DefaultIcebergWriterFactory.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.factory;
import io.mantisrx.connector.iceberg.sink.writer.DefaultIcebergWriter;
import io.mantisrx.connector.iceberg.sink.writer.IcebergWriter;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.runtime.WorkerInfo;
import org.apache.iceberg.Table;
import org.apache.iceberg.io.LocationProvider;
public class DefaultIcebergWriterFactory implements IcebergWriterFactory {
private final WriterConfig config;
private final WorkerInfo workerInfo;
private final Table table;
private final LocationProvider locationProvider;
public DefaultIcebergWriterFactory(
WriterConfig config,
WorkerInfo workerInfo,
Table table,
LocationProvider locationProvider) {
this.config = config;
this.workerInfo = workerInfo;
this.table = table;
this.locationProvider = locationProvider;
}
@Override
public IcebergWriter newIcebergWriter() {
return new DefaultIcebergWriter(config, workerInfo, table, locationProvider);
}
}
| 8,520 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/IcebergCommitter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer;
import static io.mantisrx.connector.iceberg.sink.writer.DefaultIcebergWriter.maxNullSafe;
import static io.mantisrx.connector.iceberg.sink.writer.DefaultIcebergWriter.minNullSafe;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig;
import io.mantisrx.connector.iceberg.sink.committer.watermarks.WatermarkExtractor;
import io.mantisrx.connector.iceberg.sink.writer.MantisDataFile;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Table;
import org.apache.iceberg.Transaction;
/**
* Commits {@link DataFile}s for Iceberg tables.
*
* This class uses Iceberg's Table API and only supports Table#append operations.
*/
@Slf4j
public class IcebergCommitter {
private final Table table;
private final CommitterConfig config;
private final WatermarkExtractor watermarkExtractor;
public IcebergCommitter(
Table table,
CommitterConfig committerConfig,
WatermarkExtractor watermarkExtractor) {
this.table = table;
this.config = committerConfig;
this.watermarkExtractor = watermarkExtractor;
}
/**
* Uses Iceberg's Table API to append DataFiles and commit metadata to Iceberg.
*
* @return the current snapshot of the table.
*/
public Map<String, Object> commit(List<MantisDataFile> dataFiles) {
Transaction transaction = table.newTransaction();
AppendFiles tableAppender = transaction.newAppend();
dataFiles.stream().map(MantisDataFile::getDataFile).forEach(tableAppender::appendFile);
tableAppender.commit();
log.info(
"Iceberg committer {}.{} appended {} data files to transaction",
config.getDatabase(),
config.getTable(),
dataFiles.size());
Long currentWatermark = watermarkExtractor.getWatermark(transaction);
Long lowWatermark = null;
for (MantisDataFile flinkDataFile : dataFiles) {
lowWatermark = minNullSafe(lowWatermark, flinkDataFile.getLowWatermark());
}
final Long finalWatermark = maxNullSafe(currentWatermark, lowWatermark);
if (finalWatermark != null) {
watermarkExtractor.setWatermark(transaction, finalWatermark);
}
transaction.commitTransaction();
return table.currentSnapshot() == null ? new HashMap<>() : new HashMap<>(table.currentSnapshot().summary());
}
}
| 8,521 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/IcebergCommitterStage.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterProperties;
import io.mantisrx.connector.iceberg.sink.committer.metrics.CommitterMetrics;
import io.mantisrx.connector.iceberg.sink.committer.watermarks.WatermarkExtractor;
import io.mantisrx.connector.iceberg.sink.config.SinkProperties;
import io.mantisrx.connector.iceberg.sink.writer.MantisDataFile;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.codec.JacksonCodecs;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Scheduler;
import rx.schedulers.Schedulers;
/**
* Processing stage which commits table metadata to Iceberg on a time interval.
*/
public class IcebergCommitterStage implements ScalarComputation<MantisDataFile, Map<String, Object>> {
private static final Logger logger = LoggerFactory.getLogger(IcebergCommitterStage.class);
private Transformer transformer;
/**
* Returns a config for this stage which has encoding/decoding semantics and parameter definitions.
*/
public static ScalarToScalar.Config<MantisDataFile, Map<String, Object>> config() {
return new ScalarToScalar.Config<MantisDataFile, Map<String, Object>>()
.description("")
.codec(JacksonCodecs.mapStringObject())
.withParameters(parameters());
}
/**
* Returns a list of parameter definitions for this stage.
*/
public static List<ParameterDefinition<?>> parameters() {
return Arrays.asList(
new StringParameter().name(SinkProperties.SINK_CATALOG)
.description(SinkProperties.SINK_CATALOG_DESCRIPTION)
.validator(Validators.notNullOrEmpty())
.required()
.build(),
new StringParameter().name(SinkProperties.SINK_DATABASE)
.description(SinkProperties.SINK_DATABASE_DESCRIPTION)
.validator(Validators.notNullOrEmpty())
.required()
.build(),
new StringParameter().name(SinkProperties.SINK_TABLE)
.description(SinkProperties.SINK_TABLE_DESCRIPTION)
.validator(Validators.notNullOrEmpty())
.required()
.build(),
new StringParameter().name(CommitterProperties.COMMIT_FREQUENCY_MS)
.description(CommitterProperties.COMMIT_FREQUENCY_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(CommitterProperties.COMMIT_FREQUENCY_MS_DEFAULT)
.build(),
new StringParameter().name(CommitterProperties.WATERMARK_PROPERTY_KEY)
.description(CommitterProperties.WATERMARK_PROPERTY_DESCRIPTION)
.validator(Validators.alwaysPass())
.build()
);
}
/**
* Use this to instantiate a new transformer from a given {@link Context}.
*/
public static Transformer newTransformer(Context context) {
CommitterConfig config = new CommitterConfig(context.getParameters());
CommitterMetrics metrics = new CommitterMetrics();
Catalog catalog = context.getServiceLocator().service(Catalog.class);
TableIdentifier id = TableIdentifier.of(config.getCatalog(), config.getDatabase(), config.getTable());
Table table = catalog.loadTable(id);
WatermarkExtractor watermarkExtractor = context.getServiceLocator().service(WatermarkExtractor.class);
IcebergCommitter committer = new IcebergCommitter(table, config, watermarkExtractor);
return new Transformer(config, metrics, committer, Schedulers.computation());
}
public IcebergCommitterStage() {
}
/**
* Uses the provided Mantis Context to inject configuration and creates an underlying table appender.
*
* This method depends on a Hadoop Configuration and Iceberg Catalog, both injected
* from the Context's service locator.
*
* Note that this method expects an Iceberg Table to have been previously created out-of-band,
* otherwise initialization will fail. Users should prefer to create tables
* out-of-band so they can be versioned alongside their schemas.
*/
@Override
public void init(Context context) {
transformer = newTransformer(context);
}
@Override
public Observable<Map<String, Object>> call(Context context, Observable<MantisDataFile> dataFileObservable) {
return dataFileObservable.compose(transformer);
}
/**
* Reactive Transformer for committing metadata to Iceberg.
*
* Users may use this class independently of this Stage, for example, if they want to
* {@link Observable#compose(Observable.Transformer)} this transformer with a flow into
* an existing Stage. One benefit of this co-location is to avoid extra network
* cost from worker-to-worker communication, trading off debuggability.
*/
public static class Transformer implements Observable.Transformer<MantisDataFile, Map<String, Object>> {
private final CommitterConfig config;
private final CommitterMetrics metrics;
private final IcebergCommitter committer;
private final Scheduler scheduler;
public Transformer(CommitterConfig config,
CommitterMetrics metrics,
IcebergCommitter committer,
Scheduler scheduler) {
this.config = config;
this.metrics = metrics;
this.committer = committer;
this.scheduler = scheduler;
logger.info("Initialized IcebergCommitterStage with config: {}", config);
}
/**
* Periodically commits DataFiles to Iceberg as a batch.
*/
@Override
public Observable<Map<String, Object>> call(Observable<MantisDataFile> source) {
return source
.buffer(config.getCommitFrequencyMs(), TimeUnit.MILLISECONDS, scheduler)
.doOnNext(dataFiles -> metrics.increment(CommitterMetrics.INVOCATION_COUNT))
.filter(dataFiles -> !dataFiles.isEmpty())
.map(dataFiles -> {
try {
long start = scheduler.now();
Map<String, Object> summary = committer.commit(dataFiles);
long now = scheduler.now();
metrics.record(CommitterMetrics.COMMIT_LATENCY_MSEC, now - start, TimeUnit.MILLISECONDS);
metrics.setGauge(CommitterMetrics.COMMIT_BATCH_SIZE, dataFiles.size());
return summary;
} catch (RuntimeException e) {
metrics.increment(CommitterMetrics.COMMIT_FAILURE_COUNT);
logger.error("error committing to Iceberg table {}.{}.{}",
config.getCatalog(),
config.getDatabase(),
config.getTable(),
e);
return new HashMap<String, Object>();
}
})
.filter(summary -> !summary.isEmpty())
.doOnNext(summary -> {
metrics.increment(CommitterMetrics.COMMIT_SUCCESS_COUNT);
logger.info("committed to table {}.{}.{} with summary: {}",
config.getCatalog(),
config.getDatabase(),
config.getTable(),
summary);
});
}
}
}
| 8,522 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/metrics/CommitterMetrics.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.metrics;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.Timer;
import java.util.concurrent.TimeUnit;
public class CommitterMetrics {
public static final String INVOCATION_COUNT = "invocationCount";
private final Counter invocationCount;
public static final String COMMIT_SUCCESS_COUNT = "commitSuccessCount";
private final Counter commitSuccessCount;
public static final String COMMIT_FAILURE_COUNT = "commitFailureCount";
private final Counter commitFailureCount;
public static final String COMMIT_LATENCY_MSEC = "commitLatencyMsec";
private final Timer commitLatencyMsec;
public static final String COMMIT_BATCH_SIZE = "commitBatchSize";
private final Gauge commitBatchSize;
public CommitterMetrics() {
Metrics metrics = new Metrics.Builder()
.name(CommitterMetrics.class.getCanonicalName())
.addCounter(INVOCATION_COUNT)
.addCounter(COMMIT_SUCCESS_COUNT)
.addCounter(COMMIT_FAILURE_COUNT)
.addTimer(COMMIT_LATENCY_MSEC)
.addGauge(COMMIT_BATCH_SIZE)
.build();
metrics = MetricsRegistry.getInstance().registerAndGet(metrics);
invocationCount = metrics.getCounter(INVOCATION_COUNT);
commitSuccessCount = metrics.getCounter(COMMIT_SUCCESS_COUNT);
commitFailureCount = metrics.getCounter(COMMIT_FAILURE_COUNT);
commitLatencyMsec = metrics.getTimer(COMMIT_LATENCY_MSEC);
commitBatchSize = metrics.getGauge(COMMIT_BATCH_SIZE);
}
public void setGauge(final String metric, final long value) {
switch (metric) {
case COMMIT_BATCH_SIZE:
commitBatchSize.set(value);
break;
default:
break;
}
}
public void record(final String metric, final long amount, TimeUnit unit) {
switch (metric) {
case COMMIT_LATENCY_MSEC:
commitLatencyMsec.record(amount, unit);
break;
default:
break;
}
}
public void increment(final String metric) {
switch (metric) {
case INVOCATION_COUNT:
invocationCount.increment();
break;
case COMMIT_SUCCESS_COUNT:
commitSuccessCount.increment();
break;
case COMMIT_FAILURE_COUNT:
commitFailureCount.increment();
break;
default:
break;
}
}
public void increment(final String metric, final long value) {
switch (metric) {
case INVOCATION_COUNT:
invocationCount.increment(value);
break;
case COMMIT_SUCCESS_COUNT:
commitSuccessCount.increment(value);
break;
case COMMIT_FAILURE_COUNT:
commitFailureCount.increment(value);
break;
case COMMIT_BATCH_SIZE:
commitBatchSize.increment(value);
break;
default:
break;
}
}
}
| 8,523 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/config/CommitterProperties.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.config;
/**
* Property key names and default values for an Iceberg Committer.
*/
public class CommitterProperties {
private CommitterProperties() {
}
/**
* Iceberg committer frequency by time in milliseconds.
*/
public static final String COMMIT_FREQUENCY_MS = "commitFrequencyMs";
public static final String WATERMARK_PROPERTY_KEY = "watermarkPropertyKey";
// TODO: Change to long.
public static final String COMMIT_FREQUENCY_MS_DEFAULT = "300000"; // 5 min
public static final String COMMIT_FREQUENCY_DESCRIPTION =
String.format("Iceberg Committer frequency by time in milliseconds (default: %s)",
COMMIT_FREQUENCY_MS_DEFAULT);
public static final String WATERMARK_PROPERTY_DESCRIPTION =
"Property key name for watermark value (default: null)";
}
| 8,524 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/config/CommitterConfig.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.config;
import static io.mantisrx.connector.iceberg.sink.committer.config.CommitterProperties.COMMIT_FREQUENCY_MS;
import static io.mantisrx.connector.iceberg.sink.committer.config.CommitterProperties.WATERMARK_PROPERTY_KEY;
import io.mantisrx.connector.iceberg.sink.config.SinkConfig;
import io.mantisrx.runtime.parameter.Parameters;
import javax.annotation.Nullable;
/**
* Config for controlling Iceberg Committer semantics.
*/
public class CommitterConfig extends SinkConfig {
private final long commitFrequencyMs;
@Nullable
private final String watermarkPropertyKey;
/**
* Creates an instance from {@link Parameters} derived from the current Mantis Stage's {@code Context}.
*/
public CommitterConfig(Parameters parameters) {
super(parameters);
this.commitFrequencyMs = Long.parseLong((String) parameters.get(COMMIT_FREQUENCY_MS));
this.watermarkPropertyKey = (String) parameters.get(WATERMARK_PROPERTY_KEY, null);
}
/**
* Returns a long representing Iceberg committer frequency by time (milliseconds).
*/
public long getCommitFrequencyMs() {
return commitFrequencyMs;
}
@Nullable
public String getWatermarkPropertyKey() {
return watermarkPropertyKey;
}
}
| 8,525 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/watermarks/WatermarkExtractor.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.watermarks;
import javax.annotation.Nullable;
import org.apache.iceberg.Table;
import org.apache.iceberg.Transaction;
public interface WatermarkExtractor {
@Nullable
Long getWatermark(Table table);
@Nullable
default Long getWatermark(Transaction transaction) {
return getWatermark(transaction.table());
}
void setWatermark(Transaction transaction, Long watermark);
default void setWatermark(Table table, Long watermark) {
setWatermark(table.newTransaction(), watermark);
}
static WatermarkExtractor noop() {
return new WatermarkExtractor() {
@Nullable
@Override
public Long getWatermark(Table table) {
return null;
}
@Override
public void setWatermark(Transaction transaction, Long watermark) {
}
};
}
static WatermarkExtractor propertiesAware(final String propertyKey) {
return new PropertyAwareWatermarkExtractor(propertyKey);
}
}
| 8,526 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/watermarks/PropertyAwareWatermarkExtractor.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.watermarks;
import javax.annotation.Nullable;
import lombok.AccessLevel;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.iceberg.Table;
import org.apache.iceberg.Transaction;
import org.apache.iceberg.UpdateProperties;
@Slf4j
@RequiredArgsConstructor(access = AccessLevel.PACKAGE)
class PropertyAwareWatermarkExtractor implements WatermarkExtractor {
private final String propertyKey;
@Nullable
@Override
public Long getWatermark(Table table) {
try {
return Long.parseLong(table.properties().get(propertyKey));
} catch (Exception e) {
log.error("Failed to extract watermark from the table", e);
return null;
}
}
@Override
public void setWatermark(Transaction transaction, Long watermark) {
UpdateProperties updateProperties = transaction.updateProperties();
updateProperties.set(propertyKey, Long.toString(watermark));
updateProperties.commit();
log.info("Iceberg committer for table={} set VTTS watermark to {}", transaction.table(),
watermark);
}
}
| 8,527 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/MantisSourceJobConnectorFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
public class MantisSourceJobConnectorFactory {
public static MantisSourceJobConnector getConnector() {
return new MantisSourceJobConnector();
}
}
| 8,528 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/AbstractSourceJobSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.client.SinkConnectionsStatus;
import io.mantisrx.runtime.parameter.SinkParameters;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
public abstract class AbstractSourceJobSource extends AbstractJobSource {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractSourceJobSource.class);
/**
* @deprecated use {@link #getSourceJob(String, String, String, int, Optional)}, forPartition & toPartition params are not used and will be removed in next release
*/
@Deprecated
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId, int forPartition, int totalPartitions, int samplePerSec) {
LOGGER.info("Connecting to source job " + sourceJobName);
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, Optional.empty());
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId, int samplePerSec, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName);
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, new MantisSourceJobConnector.NoOpSinkConnectionsStatusObserver(), sinkParamsO);
}
/**
* @deprecated use {@link #getSourceJob(String, String, String, int, Observer, Optional)}
*/
@Deprecated
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId, int forPartition, int totalPartitions, int samplePerSec, Observer<SinkConnectionsStatus> sinkConnObs) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
boolean enableMetaMessages = false;
boolean enableCompressedBinaryInput = false;
return connectToQueryBasedJob(MantisSourceJobConnectorFactory.getConnector(), criterion, sourceJobName, clientId, samplePerSec, enableMetaMessages, enableCompressedBinaryInput, sinkConnObs, Optional.<SinkParameters>empty());
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId,
int samplePerSec, Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
boolean enableMetaMessages = false;
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, enableMetaMessages, sinkConnObs, sinkParamsO);
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId,
int samplePerSec, boolean enableMetaMessages, Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
boolean enableCompressedBinary = false;
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, enableMetaMessages, enableCompressedBinary, sinkConnObs, sinkParamsO);
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId,
int samplePerSec, boolean enableMetaMessages, boolean enableCompressedBinaryInput, Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
return connectToQueryBasedJob(MantisSourceJobConnectorFactory.getConnector(), criterion, sourceJobName, clientId, samplePerSec, enableMetaMessages, enableCompressedBinaryInput, sinkConnObs, sinkParamsO);
}
private MantisSSEJob connectToQueryBasedJob(MantisSourceJobConnector connector, String criterion,
String jobName, String clientId, int samplePerSec, boolean enableMetaMessages, boolean enableCompressedBinaryInput,
Observer<SinkConnectionsStatus> sinkConnObs,
Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to " + jobName);
if (criterion == null || criterion.isEmpty()) {
throw new RuntimeException("Criterion cannot be empty");
}
String subId = Integer.toString(criterion.hashCode());
SinkParameters defaultParams = getDefaultSinkParams(clientId, samplePerSec,
Optional.of(criterion), Optional.of(subId), enableMetaMessages, enableCompressedBinaryInput, 500);
return connector.connectToJob(jobName, sinkParamsO.orElse(defaultParams), sinkConnObs);
}
}
| 8,529 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/MantisSourceJobConnector.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import com.sampullara.cli.Args;
import com.sampullara.cli.Argument;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.client.SinkConnectionsStatus;
import io.mantisrx.client.examples.SubmitEphemeralJob;
import io.mantisrx.runtime.parameter.SinkParameters;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
import rx.Subscription;
/**
* Used to locate and connect to Mantis Source Jobs.
*/
public class MantisSourceJobConnector {
@Argument(alias = "p", description = "Specify a configuration file")
private static String propFile = "";
private static final Logger LOGGER = LoggerFactory.getLogger(MantisSourceJobConnector.class);
private final Properties props;
public static final String MANTIS_SOURCEJOB_CLIENT_ID_PARAM = "clientId";
public static final String MANTIS_SOURCEJOB_SUBSCRIPTION_ID = "subscriptionId";
public static final String MANTIS_SOURCEJOB_CLIENT_ID = "clientId";
public static final String MANTIS_SOURCEJOB_CRITERION = "criterion";
public static final String MANTIS_SOURCEJOB_NAME_PARAM = "sourceJobName";
public static final String MANTIS_SOURCEJOB_TARGET_KEY = "target";
public static final String MANTIS_SOURCEJOB_IS_BROADCAST_MODE = "isBroadcastMode";
public static final String MANTIS_SOURCEJOB_SAMPLE_PER_SEC_KEY = "sample";
public static final String MANTIS_ENABLE_PINGS = "enablePings";
public static final String MANTIS_ENABLE_META_MESSAGES = "enableMetaMessages";
public static final String MANTIS_META_MESSAGE_INTERVAL_SEC = "metaMessagesSec";
public static final String MANTIS_MQL_THREADING_PARAM = "mantis.mql.threading.enabled";
private static final String ZK_CONNECT_STRING = "mantis.zookeeper.connectString";
private static final String ZK_ROOT = "mantis.zookeeper.root";
private static final String ZK_LEADER_PATH = "mantis.zookeeper.leader.announcement.path";
public MantisSourceJobConnector(Properties props) {
this.props = props;
}
public MantisSourceJobConnector() {
props = new Properties();
final String defaultZkConnect = "127.0.0.1:2181";
final String defaultZkRoot = "/mantis/master";
final String defaultZkLeaderPath = "/leader";
String connectString;
String zookeeperRoot;
String zookeeperLeaderAnnouncementPath;
Map<String, String> env = System.getenv();
if (env == null || env.isEmpty()) {
connectString = defaultZkConnect;
zookeeperRoot = defaultZkRoot;
zookeeperLeaderAnnouncementPath = defaultZkLeaderPath;
} else {
connectString = env.getOrDefault(ZK_CONNECT_STRING, defaultZkConnect);
zookeeperRoot = env.getOrDefault(ZK_ROOT, defaultZkRoot);
zookeeperLeaderAnnouncementPath = env.getOrDefault(ZK_LEADER_PATH, defaultZkLeaderPath);
LOGGER.info("Mantis Zk settings read from ENV: connectString {} root {} path {}", env.get(ZK_CONNECT_STRING), env.get(ZK_ROOT), env.get(ZK_LEADER_PATH));
}
if (connectString != null && !connectString.isEmpty()
&& zookeeperRoot != null && !zookeeperRoot.isEmpty()
&& zookeeperLeaderAnnouncementPath != null && !zookeeperLeaderAnnouncementPath.isEmpty()) {
props.put(ZK_CONNECT_STRING, connectString);
props.put(ZK_ROOT, zookeeperRoot);
props.put(ZK_LEADER_PATH, zookeeperLeaderAnnouncementPath);
props.put("mantis.zookeeper.connectionTimeMs", "2000");
props.put("mantis.zookeeper.connection.retrySleepMs", "500");
props.put("mantis.zookeeper.connection.retryCount", "5");
} else {
throw new RuntimeException("Zookeeper properties not available!");
}
LOGGER.info("Mantis Zk settings used for Source Job connector: connectString {} root {} path {}", connectString, zookeeperRoot, zookeeperLeaderAnnouncementPath);
}
@Deprecated
public MantisSSEJob connecToJob(String jobName) {
return connectToJob(jobName, new SinkParameters.Builder().build(), new NoOpSinkConnectionsStatusObserver());
}
public MantisSSEJob connectToJob(String jobName, SinkParameters params) {
return connectToJob(jobName, params, new NoOpSinkConnectionsStatusObserver());
}
/**
* @deprecated forPartition and totalPartitions is not used internally, this API will be removed in next release
*/
@Deprecated
public MantisSSEJob connectToJob(String jobName, SinkParameters params, int forPartition, int totalPartitions) {
return connectToJob(jobName, params, new NoOpSinkConnectionsStatusObserver());
}
/**
* @deprecated forPartition and totalPartitions is not used internally, this API will be removed in next release
*/
@Deprecated
public MantisSSEJob connectToJob(String jobName, SinkParameters params, int forPartition, int totalPartitions, Observer<SinkConnectionsStatus> sinkObserver) {
return connectToJob(jobName, params, sinkObserver);
}
public MantisSSEJob connectToJob(
String jobName,
SinkParameters params,
Observer<SinkConnectionsStatus> sinkObserver) {
return new MantisSSEJob.Builder(props)
.name(jobName)
.sinkConnectionsStatusObserver(sinkObserver)
.onConnectionReset(throwable -> LOGGER.error("Reconnecting due to error: " + throwable.getMessage()))
.sinkParams(params)
.buildJobConnector();
}
static class NoOpSinkConnectionsStatusObserver implements Observer<SinkConnectionsStatus> {
@Override
public void onCompleted() {
LOGGER.warn("Got Completed on SinkConnectionStatus ");
}
@Override
public void onError(Throwable e) {
LOGGER.error("Got Error on SinkConnectionStatus ", e);
}
@Override
public void onNext(SinkConnectionsStatus t) {
LOGGER.info("Got Sink Connection Status update " + t);
}
}
public static void main(String[] args) {
try {
SinkParameters params = new SinkParameters.Builder().withParameter("subscriptionId", "id1").
withParameter("criterion", "select * where true").build();
Args.parse(MantisSourceJobConnector.class, args);
final CountDownLatch latch = new CountDownLatch(20);
MantisSourceJobConnector sourceJobConnector = new MantisSourceJobConnector();
MantisSSEJob job = sourceJobConnector.connectToJob("TestSourceJob", params);
Subscription subscription = job.connectAndGetObservable()
.doOnNext(o -> {
LOGGER.info("Got event: data: " + o.getEventAsString());
latch.countDown();
})
.subscribe();
Subscription s2 = job.connectAndGetObservable()
.doOnNext(event -> {
LOGGER.info(" 2nd: Got event: data: " + event.getEventAsString());
latch.countDown();
})
.subscribe();
try {
boolean await = latch.await(300, TimeUnit.SECONDS);
if (await)
System.out.println("PASSED");
else
System.err.println("FAILED!");
} catch (InterruptedException e) {
e.printStackTrace();
}
subscription.unsubscribe();
System.out.println("Unsubscribed");
} catch (IllegalArgumentException e) {
Args.usage(SubmitEphemeralJob.class);
System.exit(1);
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
}
}
| 8,530 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/SinkConnectionStatusObserver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import io.mantisrx.client.SinkConnectionsStatus;
import rx.Observer;
public interface SinkConnectionStatusObserver extends Observer<SinkConnectionsStatus> {
public abstract long getConnectedServerCount();
public abstract long getTotalServerCount();
public abstract long getReceivingDataCount();
public abstract boolean isConnectedToAllSinks();
}
| 8,531 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/AbstractJobSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import com.mantisrx.common.utils.MantisSSEConstants;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.client.SinkConnectionsStatus;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.runtime.source.Source;
import java.io.UnsupportedEncodingException;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
public abstract class AbstractJobSource implements Source<MantisServerSentEvent> {
private static final int DEFAULT_META_MSG_INTERVAL_MSEC = 500;
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractJobSource.class);
public SinkParameters getDefaultSinkParams(final String clientId,
final int samplePerSec,
final Optional<String> criterion,
final Optional<String> subscriptionId,
final boolean enableMetaMessages,
boolean enableCompressedBinaryInput, final long metaMessageInterval) {
SinkParameters.Builder defaultParamBuilder = new SinkParameters.Builder();
try {
defaultParamBuilder = defaultParamBuilder
.withParameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_CLIENT_ID_PARAM, clientId)
.withParameter(MantisSSEConstants.ENABLE_PINGS, "true");
if (samplePerSec >= 1) {
defaultParamBuilder = defaultParamBuilder.withParameter("sample", Integer.toString(samplePerSec));
}
if (criterion.isPresent()) {
defaultParamBuilder =
defaultParamBuilder.withParameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_CRITERION, criterion.get());
}
if (subscriptionId.isPresent()) {
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_SUBSCRIPTION_ID, subscriptionId.get());
}
if (enableMetaMessages) {
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSSEConstants.ENABLE_META_MESSAGES, Boolean.toString(true));
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSSEConstants.META_MESSAGES_SEC, Long.toString(metaMessageInterval));
}
if (enableCompressedBinaryInput) {
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSSEConstants.MANTIS_ENABLE_COMPRESSION, Boolean.toString(true));
}
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e.getMessage());
}
return defaultParamBuilder.build();
}
public MantisSSEJob getJob(String jobName, String clientId, int samplePerSec,
Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to job " + jobName + " obs " + sinkConnObs);
boolean enableMetaMessages = false;
boolean enableCompressedBinaryInput = false;
MantisSourceJobConnector connector = MantisSourceJobConnectorFactory.getConnector();
SinkParameters defaultParams = getDefaultSinkParams(clientId,
samplePerSec, Optional.<String>empty(), Optional.<String>empty(), enableMetaMessages, enableCompressedBinaryInput, DEFAULT_META_MSG_INTERVAL_MSEC);
return connector.connectToJob(jobName, sinkParamsO.orElse(defaultParams), sinkConnObs);
}
}
| 8,532 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/MultiSinkConnectionStatusObserver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import io.mantisrx.client.SinkConnectionsStatus;
import java.util.Iterator;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MultiSinkConnectionStatusObserver implements SinkConnectionStatusObserver {
private static final Logger LOGGER = LoggerFactory.getLogger(MultiSinkConnectionStatusObserver.class);
public static final MultiSinkConnectionStatusObserver INSTANCE = new MultiSinkConnectionStatusObserver();
private final ConcurrentHashMap<String, SinkConnectionStatusObserver> sinkObserverMap = new ConcurrentHashMap<>();
public void addSinkConnectionObserver(String name, SinkConnectionStatusObserver obs) {
sinkObserverMap.put(name, obs);
}
public void removeSinkConnectionObserver(String name) {
sinkObserverMap.remove(name);
}
public SinkConnectionStatusObserver getSinkConnectionObserver(String name) {
return sinkObserverMap.get(name);
}
// for testing
void removeAllSinkConnectionObservers() {
sinkObserverMap.clear();
}
/**
* Iterate through all member connectionObservers and sum up the connectedServer counts.
*/
@Override
public long getConnectedServerCount() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
int count = 0;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
count += ob.getConnectedServerCount();
}
LOGGER.info("Total connected server count" + count);
return count;
}
/**
* Iterate through all member connectionObservers and sum up the totalServer counts.
*/
@Override
public long getTotalServerCount() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
int count = 0;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
count += ob.getTotalServerCount();
}
LOGGER.info("Total server count" + count);
return count;
}
/**
* Iterate through all member connectionObservers and sum up the receiving data counts.
*/
@Override
public long getReceivingDataCount() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
int count = 0;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
count += ob.getConnectedServerCount();
}
LOGGER.info("Total receiving server count" + count);
return count;
}
/**
* Iterate through all member connectionObservers and return false if any of the constituent client connections
* are not complete.
*/
@Override
public boolean isConnectedToAllSinks() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
boolean connectedToAll = false;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
connectedToAll = ob.isConnectedToAllSinks();
if (!connectedToAll) {
LOGGER.warn("Not connected to sinks of all jobs");
break;
}
}
return connectedToAll;
}
@Override
public void onCompleted() {
// NO OP
}
@Override
public void onError(Throwable e) {
// NO OP
}
@Override
public void onNext(SinkConnectionsStatus t) {
// NO OP
}
}
| 8,533 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/DefaultSinkConnectionStatusObserver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import io.mantisrx.client.SinkConnectionsStatus;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DefaultSinkConnectionStatusObserver implements SinkConnectionStatusObserver {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultSinkConnectionStatusObserver.class);
private static final SinkConnectionStatusObserver INSTANCE = new DefaultSinkConnectionStatusObserver();
private final AtomicLong numConnected = new AtomicLong();
private final AtomicLong total = new AtomicLong();
private final AtomicLong receivingData = new AtomicLong();
public static synchronized SinkConnectionStatusObserver getInstance(boolean singleton) {
if (singleton) {
return INSTANCE;
} else {
return new DefaultSinkConnectionStatusObserver();
}
}
public static SinkConnectionStatusObserver getInstance() {
boolean singleton = true;
return getInstance(singleton);
}
@Override
public void onCompleted() {
LOGGER.error("SinkConnectionStatusObserver completed!");
}
@Override
public void onError(Throwable e) {
LOGGER.error("Got Error", e);
}
@Override
public void onNext(SinkConnectionsStatus t) {
LOGGER.info("Got SinkConnectionStatus update " + t);
numConnected.set(t.getNumConnected());
total.set(t.getTotal());
receivingData.set(t.getRecevingDataFrom());
}
@Override
public long getConnectedServerCount() {
return numConnected.get();
}
@Override
public long getTotalServerCount() {
return total.get();
}
@Override
public long getReceivingDataCount() {
return receivingData.get();
}
@Override
public boolean isConnectedToAllSinks() {
if (receivingData.get() > 0
&& numConnected.get() > 0
&& total.get() > 0
&& ((numConnected.get() == total.get()) && (total.get() == receivingData.get()))) {
return true;
}
LOGGER.warn("NOT connected to all sinks "
+ " connected : " + numConnected.get()
+ " total " + total.get()
+ " receiving Data " + receivingData.get());
return false;
}
}
| 8,534 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/source/JobSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.source;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.mantisrx.common.utils.Closeables;
import com.mantisrx.common.utils.MantisSSEConstants;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.connector.job.core.AbstractSourceJobSource;
import io.mantisrx.connector.job.core.DefaultSinkConnectionStatusObserver;
import io.mantisrx.connector.job.core.MantisSourceJobConnector;
import io.mantisrx.connector.job.core.MultiSinkConnectionStatusObserver;
import io.mantisrx.connector.job.core.SinkConnectionStatusObserver;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
@Slf4j
public class JobSource extends AbstractSourceJobSource implements Source<MantisServerSentEvent> {
private static final Logger LOGGER = LoggerFactory.getLogger(JobSource.class);
private static JsonParser parser = new JsonParser();
protected List<TargetInfo> targets;
private final List<MantisSSEJob> jobs = new ArrayList<>();
public JobSource(List<TargetInfo> targets) {
this.targets = targets;
}
// For backwards compatibility.
public JobSource() {
this(new ArrayList<>());
}
public JobSource(String targetInfoStr) {
this.targets = parseTargetInfo(targetInfoStr);
}
@Override
public List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = Lists.newArrayList();
params.add(new StringParameter()
.name(MantisSourceJobConnector.MANTIS_SOURCEJOB_TARGET_KEY)
.validator(Validators.notNullOrEmpty())
.defaultValue("{}")
.build());
return params;
}
@Override
public Observable<Observable<MantisServerSentEvent>> call(Context context, Index index) {
if (targets.isEmpty()) {
targets = parseInputParameters(context);
}
Observable<Observable<MantisServerSentEvent>> sourceObs = null;
int workerNo = context.getWorkerInfo().getWorkerNumber();
targets = enforceClientIdConsistency(targets, context.getJobId());
for (TargetInfo targetInfo : targets) {
MantisSSEJob job;
String sourceJobName = targetInfo.sourceJobName;
String criterion = targetInfo.criterion;
int samplePerSec = targetInfo.samplePerSec;
boolean enableMetaMessages = targetInfo.enableMetaMessages;
LOGGER.info("Processing job " + sourceJobName);
boolean singleton = false;
SinkConnectionStatusObserver obs = DefaultSinkConnectionStatusObserver.getInstance(singleton);
MultiSinkConnectionStatusObserver.INSTANCE.addSinkConnectionObserver(sourceJobName, obs);
String clientId = targetInfo.clientId;
if (targetInfo.isBroadcastMode) {
clientId = clientId + "_" + workerNo;
}
boolean enableCompressedBinary = targetInfo.enableCompressedBinary;
job = getSourceJob(sourceJobName, criterion, clientId, samplePerSec, enableMetaMessages, enableCompressedBinary, obs, Optional.<SinkParameters>empty());
jobs.add(job);
if (sourceObs == null) {
sourceObs = job.connectAndGet();
} else {
if (job != null) {
Observable<Observable<MantisServerSentEvent>> clientObs = job.connectAndGet();
if (clientObs != null) {
sourceObs = sourceObs.mergeWith(clientObs);
} else {
LOGGER.error("Could not connect to job " + sourceJobName);
}
} else {
LOGGER.error("Could not connect to job " + sourceJobName);
}
}
}
return sourceObs;
}
@Override
public void close() throws IOException {
try {
Closeables.combine(jobs).close();
} finally {
jobs.clear();
}
}
/**
* Use {@link io.mantisrx.runtime.parameter.SourceJobParameters.TargetInfo} instead.
*/
@Deprecated
public static class TargetInfo {
public String sourceJobName;
public String criterion;
public int samplePerSec;
public boolean isBroadcastMode;
public boolean enableMetaMessages;
public boolean enableCompressedBinary;
public String clientId;
public TargetInfo(String jobName,
String criterion,
String clientId,
int samplePerSec,
boolean isBroadcastMode,
boolean enableMetaMessages,
boolean enableCompressedBinary) {
this.sourceJobName = jobName;
this.criterion = criterion;
this.clientId = clientId;
this.samplePerSec = samplePerSec;
this.isBroadcastMode = isBroadcastMode;
this.enableMetaMessages = enableMetaMessages;
this.enableCompressedBinary = enableCompressedBinary;
}
}
protected static List<TargetInfo> parseInputParameters(Context ctx) {
String targetListStr = (String) ctx.getParameters()
.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_TARGET_KEY, "{}");
return parseTargetInfo(targetListStr);
}
/**
* Use {@link io.mantisrx.runtime.parameter.SourceJobParameters#parseTargetInfo(String)} instead.
*/
@Deprecated
protected static List<TargetInfo> parseTargetInfo(String targetListStr) {
List<TargetInfo> targetList = new ArrayList<TargetInfo>();
JsonObject requestObj = (JsonObject) parser.parse(targetListStr);
JsonArray arr = requestObj.get("targets").getAsJsonArray();
for (int i = 0; i < arr.size(); i++) {
int sample = -1;
boolean isBroadCastMode = false;
JsonObject srcObj = arr.get(i).getAsJsonObject();
String sName = srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_NAME_PARAM).getAsString();
String criterion = srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_CRITERION).getAsString();
String clientId = null;
if (srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_CLIENT_ID) != null) {
clientId = srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_CLIENT_ID).getAsString();
}
if (srcObj.get(MantisSSEConstants.SAMPLE) != null) {
sample = srcObj.get(MantisSSEConstants.SAMPLE).getAsInt();
}
if (srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_IS_BROADCAST_MODE) != null) {
isBroadCastMode =
srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_IS_BROADCAST_MODE).getAsBoolean();
}
boolean enableMetaMessages = false;
if (srcObj.get(MantisSSEConstants.ENABLE_META_MESSAGES) != null) {
enableMetaMessages = srcObj.get(MantisSSEConstants.ENABLE_META_MESSAGES).getAsBoolean();
}
boolean enableCompressedBinary = false;
if (srcObj.get(MantisSSEConstants.MANTIS_ENABLE_COMPRESSION) != null) {
enableCompressedBinary = true;
}
TargetInfo ti = new TargetInfo(
sName,
criterion,
clientId,
sample,
isBroadCastMode,
enableMetaMessages,
enableCompressedBinary);
targetList.add(ti);
LOGGER.info("sname: " + sName + " criterion: " + criterion + " isBroadcastMode " + isBroadCastMode);
}
return targetList;
}
/**
* Use {@link io.mantisrx.runtime.parameter.SourceJobParameters.TargetInfoBuilder} instead.
*/
@Deprecated
public static class TargetInfoBuilder {
private String sourceJobName;
private String criterion;
private String clientId;
private int samplePerSec = -1;
private boolean isBroadcastMode = false;
private boolean enableMetaMessages = false;
private boolean enableCompressedBinary = false;
public TargetInfoBuilder() {
}
public TargetInfoBuilder withSourceJobName(String srcJobName) {
this.sourceJobName = srcJobName;
return this;
}
public TargetInfoBuilder withQuery(String query) {
this.criterion = query;
return this;
}
public TargetInfoBuilder withSamplePerSec(int samplePerSec) {
this.samplePerSec = samplePerSec;
return this;
}
public TargetInfoBuilder withBroadCastMode() {
this.isBroadcastMode = true;
return this;
}
public TargetInfoBuilder withMetaMessagesEnabled() {
this.enableMetaMessages = true;
return this;
}
public TargetInfoBuilder withBinaryCompressionEnabled() {
this.enableCompressedBinary = true;
return this;
}
public TargetInfoBuilder withClientId(String clientId) {
this.clientId = clientId;
return this;
}
public TargetInfo build() {
return new TargetInfo(
sourceJobName,
criterion,
clientId,
samplePerSec,
isBroadcastMode,
enableMetaMessages,
enableCompressedBinary);
}
}
/**
* Use {@link io.mantisrx.runtime.parameter.SourceJobParameters#enforceClientIdConsistency(List, String)} instead.
*
* Ensures that a list of TargetInfo contains a sane set of sourceJobName, ClientId pairs.
* TODO: Currently mutates the list, which isn't problematic here, but it would be prudent to clean this up.
*
* @param targets A List of TargetInfo for which to validate and correct clientId inconsistencies.
*
* @return The original List modified to have consistent clientIds.
*/
@Deprecated
public static List<TargetInfo> enforceClientIdConsistency(List<TargetInfo> targets, String defaultClientId) {
targets.sort(Comparator.comparing(t -> t.criterion));
HashSet<Tuple2<String, String>> connectionPairs = new HashSet<>(targets.size());
for (TargetInfo target : targets) {
if (target.clientId == null) {
target.clientId = defaultClientId;
}
Tuple2<String, String> connectionPair = Tuple.of(target.sourceJobName, target.clientId);
int attempts = 0;
while (connectionPairs.contains(connectionPair)) {
connectionPair = Tuple.of(target.sourceJobName, target.clientId + "_" + ++attempts);
}
target.clientId = connectionPair._2;
connectionPairs.add(connectionPair);
}
return targets;
}
}
| 8,535 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/ParameterTestUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
import io.mantisrx.runtime.parameter.Parameters;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class ParameterTestUtils {
public static Parameters createParameters(Object... params) {
Map<String, Object> paramsMap = new HashMap();
Set<String> requiredParams = new HashSet<>();
List<Object> paramsList = Arrays.asList(params);
Iterator<Object> iterator = paramsList.iterator();
while (iterator.hasNext()) {
Object token = iterator.next();
if (token instanceof String) {
String paramkey = (String) token;
if (iterator.hasNext()) {
Object pVal = iterator.next();
paramsMap.put(paramkey, pVal);
requiredParams.add(paramkey);
}
} else {
throw new IllegalArgumentException("parameter key must be of type String, parameter key not supported with type " + token.getClass());
}
}
return new Parameters(paramsMap, requiredParams, requiredParams);
}
}
| 8,536 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/MantisKafkaConsumerConfigTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import java.util.Arrays;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.RangeAssignor;
import org.apache.kafka.common.metrics.JmxReporter;
import org.junit.Test;
import org.mockito.stubbing.Answer;
public class MantisKafkaConsumerConfigTest {
@Test
public void testDefaultConsumerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters();
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaConsumerConfig mantisKafkaConsumerConfig = new MantisKafkaConsumerConfig(context);
Map<String, Object> consumerProperties = mantisKafkaConsumerConfig.getConsumerProperties();
assertEquals(Boolean.valueOf(MantisKafkaConsumerConfig.DEFAULT_AUTO_COMMIT_ENABLED), consumerProperties.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_AUTO_COMMIT_INTERVAL_MS, consumerProperties.get(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_AUTO_OFFSET_RESET, consumerProperties.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_FETCH_MAX_WAIT_MS, consumerProperties.get(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_FETCH_MIN_BYTES, consumerProperties.get(ConsumerConfig.FETCH_MIN_BYTES_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_HEARTBEAT_INTERVAL_MS, consumerProperties.get(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_SESSION_TIMEOUT_MS, consumerProperties.get(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_KEY_DESERIALIZER, consumerProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_VALUE_DESERIALIZER, consumerProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_MAX_PARTITION_FETCH_BYTES, consumerProperties.get(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_RECEIVE_BUFFER_BYTES, consumerProperties.get(ConsumerConfig.RECEIVE_BUFFER_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_SEND_BUFFER_BYTES, consumerProperties.get(ConsumerConfig.SEND_BUFFER_CONFIG));
assertEquals(Arrays.asList(MantisKafkaConsumerConfig.DEFAULT_BOOTSTRAP_SERVERS_CONFIG), consumerProperties.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG));
assertEquals(Arrays.asList(JmxReporter.class.getName()), consumerProperties.get(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG));
assertEquals(Arrays.asList(RangeAssignor.class.getName()), consumerProperties.get(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG));
assertEquals(MantisKafkaConsumerConfig.getGroupId(), consumerProperties.get(ConsumerConfig.GROUP_ID_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_MAX_POLL_INTERVAL_MS, consumerProperties.get(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_MAX_POLL_RECORDS, consumerProperties.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_REQUEST_TIMEOUT_MS, consumerProperties.get(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG));
}
@Test
public void testJobParamOverrides() {
Context context = mock(Context.class);
String testTopic = "topic123";
String testConsumerGroupId = "testKafkaConsumer-1";
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, testConsumerGroupId,
KafkaSourceParameters.PREFIX + ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, 500);
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaConsumerConfig mantisKafkaConsumerConfig = new MantisKafkaConsumerConfig(context);
Map<String, Object> consumerProperties = mantisKafkaConsumerConfig.getConsumerProperties();
// MantisKafkaConsumerConfig only affects Kafka's ConsumerConfig defined properties
assertFalse(ConsumerConfig.configNames().contains(KafkaSourceParameters.TOPIC));
assertFalse(consumerProperties.containsKey(KafkaSourceParameters.TOPIC));
assertEquals("earliest", consumerProperties.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
assertEquals(testConsumerGroupId, consumerProperties.get(ConsumerConfig.GROUP_ID_CONFIG));
assertEquals(500, consumerProperties.get(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG));
}
}
| 8,537 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/MantisKafkaSourceConfigTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import org.junit.Test;
import org.mockito.stubbing.Answer;
public class MantisKafkaSourceConfigTest {
@Test
public void testDefaultConsumerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, "testTopic");
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
assertEquals(MantisKafkaSourceConfig.DEFAULT_CONSUMER_POLL_TIMEOUT_MS, mantisKafkaSourceConfig.getConsumerPollTimeoutMs());
assertEquals(CheckpointStrategyOptions.NONE, mantisKafkaSourceConfig.getCheckpointStrategy());
assertEquals(MantisKafkaConsumerConfig.DEFAULT_CHECKPOINT_INTERVAL_MS, mantisKafkaSourceConfig.getCheckpointIntervalMs());
assertEquals(MantisKafkaSourceConfig.DEFAULT_MAX_BYTES_IN_PROCESSING, mantisKafkaSourceConfig.getMaxBytesInProcessing());
assertEquals(ParserType.SIMPLE_JSON.getPropName(), mantisKafkaSourceConfig.getMessageParserType());
assertEquals(MantisKafkaSourceConfig.DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER, mantisKafkaSourceConfig.getNumConsumerInstances());
assertEquals(MantisKafkaSourceConfig.DEFAULT_PARSE_MSG_IN_SOURCE, mantisKafkaSourceConfig.getParseMessageInSource());
assertEquals(MantisKafkaSourceConfig.DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS, mantisKafkaSourceConfig.getRetryCheckpointCheckDelayMs());
assertEquals(MantisKafkaSourceConfig.DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN, mantisKafkaSourceConfig.getStaticPartitionAssignmentEnabled());
assertEquals(Optional.empty(), mantisKafkaSourceConfig.getTopicPartitionCounts());
assertEquals(Arrays.asList("testTopic"), mantisKafkaSourceConfig.getTopics());
}
@Test
public void testJobParamOverrides() {
Context context = mock(Context.class);
String testTopic = "topic123";
int checkpointIntervalOverride = 100;
boolean staticPartitionAssignEnableOverride = true;
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.CHECKPOINT_INTERVAL_MS, checkpointIntervalOverride,
KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN, staticPartitionAssignEnableOverride,
KafkaSourceParameters.TOPIC_PARTITION_COUNTS, testTopic+":1024");
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
assertEquals(checkpointIntervalOverride, mantisKafkaSourceConfig.getCheckpointIntervalMs());
assertEquals(staticPartitionAssignEnableOverride, mantisKafkaSourceConfig.getStaticPartitionAssignmentEnabled());
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put(testTopic, 1024);
assertEquals(Optional.ofNullable(topicPartitionCounts), mantisKafkaSourceConfig.getTopicPartitionCounts());
}
}
| 8,538 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/assignor/StaticPartitionAssignorTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.assignor;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StaticPartitionAssignorTest {
private static final Logger LOGGER = LoggerFactory.getLogger(StaticPartitionAssignorTest.class);
@Test
public void testStaticAssign1() {
Map<String, Integer> topicPartitionCounts = generateTopicPartitionCounts(15, 2);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 20;
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, topicPartitionCounts, totalNumConsumers);
assertTrue(assignedPartitions.size() >= 1 && assignedPartitions.size() <= 2);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
@Test
public void testStaticAssignMoreConsumersThanPartitions() {
Map<String, Integer> topicPartitionCounts = generateTopicPartitionCounts(15, 2);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 40;
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, topicPartitionCounts, totalNumConsumers);
// assertTrue(assignedPartitions.size() >= 1 && assignedPartitions.size() <= 2);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
private Map<String, Integer> generateTopicPartitionCounts(int numTopics, int partitionRange) {
Map<String, Integer> topicPartitionMap = new HashMap<>();
int partitionCnt = 1;
for (int i = 0; i < numTopics; i++) {
topicPartitionMap.put("topic_" + i, partitionCnt++);
if (partitionCnt == partitionRange + 1) {
partitionCnt = 1;
}
}
return topicPartitionMap;
}
@Test
public void testStaticAssign2() {
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("topic_0", 1400);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 20;
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, topicPartitionCounts, totalNumConsumers);
assertEquals(70, assignedPartitions.size());
assignmentMap.put("" + i, assignedPartitions);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
@Test
public void testStaticAssign3() {
String tpList = "testtopic:1,testtopic2:7,testtopic3:1,testtopic4:46";
Map<String, Integer> tpMap = new HashMap<>();
String[] topicPartitionTuples = tpList.split(",");
for (int i = 0; i < topicPartitionTuples.length; i++) {
String[] topPart = topicPartitionTuples[i].split(":");
tpMap.put(topPart[0], Integer.valueOf(topPart[1]));
}
int totalNumConsumers = 12;
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, tpMap, totalNumConsumers);
// assertEquals(70, assignedPartitions.size());
assignmentMap.put("" + i, assignedPartitions);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
@Test
public void invalidConsumerIndexTest() {
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("topic_0", 1400);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 20;
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
try {
partitionAssigner.assignPartitionsToConsumer(-1, topicPartitionCounts, totalNumConsumers);
fail();
} catch (IllegalArgumentException e) {
}
try {
partitionAssigner.assignPartitionsToConsumer(100, topicPartitionCounts, totalNumConsumers);
fail();
} catch (IllegalArgumentException e) {
}
}
@Test
public void invalidTotalConsumersTest() {
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("topic_0", 1400);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
try {
int totalNumConsumers = -1;
partitionAssigner.assignPartitionsToConsumer(1, topicPartitionCounts, totalNumConsumers);
fail();
} catch (IllegalArgumentException e) {
}
}
@Test
public void invalidTopicPartitionMapTest() {
Map<String, Integer> topicPartitionCounts = null;
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
try {
partitionAssigner.assignPartitionsToConsumer(1, topicPartitionCounts, 20);
fail();
} catch (NullPointerException e) {
}
}
}
| 8,539 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/sink/MantisKafkaProducerConfigTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import java.util.Arrays;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Test;
import org.mockito.stubbing.Answer;
public class MantisKafkaProducerConfigTest {
@Test
public void testDefaultKafkaProducerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters();
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaProducerConfig mantisKafkaProducerConfig= new MantisKafkaProducerConfig(context);
Map<String, Object> producerProperties = mantisKafkaProducerConfig.getProducerProperties();
assertEquals(ByteArraySerializer.class, producerProperties.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG));
assertEquals(ByteArraySerializer.class, producerProperties.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG));
assertEquals(Arrays.asList(MantisKafkaProducerConfig.DEFAULT_BOOTSTRAP_SERVERS_CONFIG), producerProperties.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG));
assertEquals(Arrays.asList(JmxReporter.class.getName()), producerProperties.get(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG));
}
@Test
public void testJobParamOverrides() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSinkJobParameters.PREFIX + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaProducerConfig mantisKafkaProducerConfig= new MantisKafkaProducerConfig(context);
Map<String, Object> producerProperties = mantisKafkaProducerConfig.getProducerProperties();
assertEquals(StringSerializer.class, producerProperties.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG));
assertEquals(ByteArraySerializer.class, producerProperties.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG));
}
}
| 8,540 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaDataNotification.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
public class KafkaDataNotification {
public enum Kind {
ACK,
NACK,
ERR
}
public static KafkaDataNotification ack(KafkaData event, long elapsedMillis) {
return new KafkaDataNotification(event, Kind.ACK, null, elapsedMillis);
}
public static KafkaDataNotification nack(KafkaData event, long elapsedMillis) {
return new KafkaDataNotification(event, Kind.NACK, null, elapsedMillis);
}
public static KafkaDataNotification error(KafkaData request, Throwable t, long elapsedMillis) {
return new KafkaDataNotification(request, Kind.ERR, t, elapsedMillis);
}
private final KafkaData value;
private final Kind kind;
private final Throwable error;
private long elapsedMillis;
protected KafkaDataNotification(KafkaData value, Kind kind, Throwable error, long elapsedMillis) {
this.value = value;
this.kind = kind;
this.error = error;
this.elapsedMillis = elapsedMillis;
}
public Throwable getError() {
return error;
}
public boolean hasError() {
return error != null;
}
/**
* @return
*/
public Kind getKind() {
return kind;
}
public KafkaData getValue() {
return value;
}
public boolean hasValue() {
return value != null;
}
public boolean isError() {
return kind.equals(Kind.ERR);
}
public boolean isSuccess() {
return kind.equals(Kind.ACK);
}
/**
* Time it took to execute the operation for which this notification is generated
*
* @return
*/
public long getElapsed() {
return elapsedMillis;
}
}
| 8,541 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaAckable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
import java.util.concurrent.TimeUnit;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
/**
* Ackable used to wrap the data read from kafka to allow providing feedback to the source when the payload is consumed.
*/
public class KafkaAckable {
private final Subject<KafkaDataNotification, KafkaDataNotification> subject;
private final KafkaData kafkaData;
private final long createTimeNano = System.nanoTime();
public KafkaAckable(KafkaData data, SerializedSubject<KafkaDataNotification, KafkaDataNotification> ackSubject) {
this.kafkaData = data;
this.subject = ackSubject;
}
public KafkaAckable(KafkaData data, Subject<KafkaDataNotification, KafkaDataNotification> ackSubject) {
this.kafkaData = data;
this.subject = ackSubject;
}
public void ack() {
KafkaDataNotification n = KafkaDataNotification.ack(getKafkaData(),
TimeUnit.MILLISECONDS.convert(System.nanoTime() - createTimeNano, TimeUnit.NANOSECONDS));
subject.onNext(n);
}
/**
* NACK indicating that the message was not processed and should be
* returned to the source.
*
*/
public void nack() {
KafkaDataNotification n = KafkaDataNotification.nack(getKafkaData(),
TimeUnit.MILLISECONDS.convert(System.nanoTime() - createTimeNano, TimeUnit.NANOSECONDS));
subject.onNext(n);
}
/**
* There was an error processing the message. Depending on the implementation
* of the source the message may either be,
* 1. Dropped
* 2. Replayed
* 3. Posted to a poison queue
* @param t
*/
public void error(Throwable t) {
KafkaDataNotification n = KafkaDataNotification.error(getKafkaData(), t,
TimeUnit.MILLISECONDS.convert(System.nanoTime() - createTimeNano, TimeUnit.NANOSECONDS));
subject.onNext(n);
}
/**
* @return Get the internal message being Ackable'd
*/
public KafkaData getKafkaData() {
return kafkaData;
}
}
| 8,542 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaData.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
import java.util.Arrays;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import org.apache.kafka.clients.consumer.ConsumerRecord;
public class KafkaData {
private final String topic;
private final int partition;
private final long offset;
private final byte[] rawBytes;
/* parsedEvent is present if the raw bytes were already decoded */
private volatile Optional<Map<String, Object>> parsedEvent;
private final Optional<String> key;
private final String streamId;
private int mantisKafkaConsumerId;
public KafkaData(String topic,
int partition,
long offset,
byte[] rawBytes,
Optional<Map<String, Object>> parsedEvent,
Optional<String> key,
int mantisKafkaConsumerId) {
this.topic = topic;
this.partition = partition;
this.offset = offset;
this.rawBytes = rawBytes;
this.parsedEvent = parsedEvent;
this.key = key;
this.mantisKafkaConsumerId = mantisKafkaConsumerId;
this.streamId = new StringBuilder(topic).append('-').append(partition).toString();
}
public KafkaData(ConsumerRecord<String, byte[]> m,
Optional<Map<String, Object>> parsedEvent,
Optional<String> key,
int mantisKafkaConsumerId) {
this(m.topic(), m.partition(), m.offset(), m.value(), parsedEvent, key, mantisKafkaConsumerId);
}
public String getTopic() {
return topic;
}
public int getPartition() {
return partition;
}
public long getOffset() {
return offset;
}
public byte[] getRawBytes() {
return rawBytes;
}
public int getMantisKafkaConsumerId() {
return mantisKafkaConsumerId;
}
public String getStreamId() {
return this.streamId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
KafkaData kafkaData = (KafkaData) o;
return partition == kafkaData.partition &&
offset == kafkaData.offset &&
mantisKafkaConsumerId == kafkaData.mantisKafkaConsumerId &&
topic.equals(kafkaData.topic) &&
Arrays.equals(rawBytes, kafkaData.rawBytes) &&
parsedEvent.equals(kafkaData.parsedEvent) &&
key.equals(kafkaData.key);
}
@Override
public int hashCode() {
int result = Objects.hash(topic, partition, offset, parsedEvent, key, mantisKafkaConsumerId);
result = 31 * result + Arrays.hashCode(rawBytes);
return result;
}
public Optional<Map<String, Object>> getParsedEvent() {
return parsedEvent;
}
public void setParsedEvent(final Map<String, Object> parsedEvent) {
this.parsedEvent = Optional.ofNullable(parsedEvent);
}
public Optional<String> getKey() {
return key;
}
@Override
public String toString() {
return "KafkaData{" +
"topic='" + topic + '\'' +
", partition=" + partition +
", offset=" + offset +
", rawBytes=" + Arrays.toString(rawBytes) +
", parsedEvent=" + parsedEvent +
", key=" + key +
'}';
}
}
| 8,543 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaSourceParameters.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
public class KafkaSourceParameters {
public static final String PREFIX= "kafka.source.consumer.";
public static final String CHECKPOINT_STRATEGY = "checkpointStrategy";
public static final String CONSUMER_POLL_TIMEOUT_MS = "consumerPollTimeoutMs";
public static final String NUM_KAFKA_CONSUMER_PER_WORKER = "numKafkaConsumerPerWorker";
public static final String TOPIC = PREFIX + "topic";
public static final String MAX_BYTES_IN_PROCESSING = "maxBytesInProcessing";
public static final String PARSER_TYPE = "messageParserType";
public static final String PARSE_MSG_IN_SOURCE = "parseMessageInKafkaConsumerThread";
public static final String RETRY_CHECKPOINT_CHECK_DELAY_MS = "retryCheckpointCheckDelayMs";
public static final String CHECKPOINT_INTERVAL_MS = "checkpointIntervalMs";
// Enable static partition assignment, this disables Kafka's default consumer group management
public static final String ENABLE_STATIC_PARTITION_ASSIGN = "enableStaticPartitionAssign";
// Number of partitions per topic, used only when Static Partition assignment is enabled
public static final String TOPIC_PARTITION_COUNTS = "numPartitionsPerTopic";
}
| 8,544 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/KafkaSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.CONSUMER_RECORD_OVERHEAD_BYTES;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_MAX_BYTES_IN_PROCESSING;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_PARSE_MSG_IN_SOURCE;
import com.netflix.spectator.api.Registry;
import io.mantisrx.connector.kafka.KafkaAckable;
import io.mantisrx.connector.kafka.KafkaData;
import io.mantisrx.connector.kafka.KafkaDataNotification;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategy;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.connector.kafka.source.checkpoint.trigger.CheckpointTrigger;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.connector.kafka.source.serde.ParseException;
import io.mantisrx.connector.kafka.source.serde.Parser;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.BooleanParameter;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.record.InvalidRecordException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Subscription;
import rx.observables.SyncOnSubscribe;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
/**
* Mantis Kafka Source wraps a kafka v2.2.+ consumer with back pressure semantics, the consumer polls data from kafka
* only as fast as the data is processed & ack'ed by the processing stage of the Mantis Job.
* <p>
* The {@value KafkaSourceParameters#NUM_KAFKA_CONSUMER_PER_WORKER} Job param decides the number of Kafka consumer instances spawned on each Mantis worker,
* Each kafka consumer instance runs in their own thread and poll data from kafka as part of the same consumer group
*/
public class KafkaSource implements Source<KafkaAckable> {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSource.class);
private final AtomicBoolean done = new AtomicBoolean();
private final Map<Integer, MantisKafkaConsumer<?>> idToConsumerMap = new HashMap<>();
private final Registry registry;
private final SerializedSubject<KafkaDataNotification, KafkaDataNotification> ackSubject =
new SerializedSubject<>(PublishSubject.create());
private Subscription ackSubjectSubscription;
public KafkaSource(final Registry registry) {
this.registry = registry;
}
private Observable<MantisKafkaConsumer<?>> createConsumers(final Context context,
final MantisKafkaSourceConfig kafkaSourceConfig,
final int totalNumWorkers) {
final List<MantisKafkaConsumer<?>> consumers = new ArrayList<>();
for (int i = 0; i < kafkaSourceConfig.getNumConsumerInstances(); i++) {
final int consumerIndex = context.getWorkerInfo().getWorkerIndex() + (totalNumWorkers * i);
MantisKafkaConsumer<?> mantisKafkaConsumer = new MantisKafkaConsumer.Builder()
.withKafkaSourceConfig(kafkaSourceConfig)
.withTotalNumConsumersForJob(totalNumWorkers * kafkaSourceConfig.getNumConsumerInstances())
.withContext(context)
.withConsumerIndex(consumerIndex)
.withRegistry(registry)
.build();
idToConsumerMap.put(mantisKafkaConsumer.getConsumerId(), mantisKafkaConsumer);
LOGGER.info("created consumer {}", mantisKafkaConsumer);
consumers.add(mantisKafkaConsumer);
}
return Observable.from(consumers);
}
private int getPayloadSize(ConsumerRecord<String, byte[]> record) {
return record.value().length + CONSUMER_RECORD_OVERHEAD_BYTES;
}
/**
* Create an observable with back pressure semantics from the consumer records fetched using consumer.
*
* @param mantisKafkaConsumer non thread-safe KafkaConsumer
* @param kafkaSourceConfig configuration for the Mantis Kafka Source
*/
private Observable<KafkaAckable> createBackPressuredConsumerObs(final MantisKafkaConsumer<?> mantisKafkaConsumer,
final MantisKafkaSourceConfig kafkaSourceConfig) {
CheckpointStrategy checkpointStrategy = mantisKafkaConsumer.getStrategy();
final CheckpointTrigger trigger = mantisKafkaConsumer.getTrigger();
final ConsumerMetrics consumerMetrics = mantisKafkaConsumer.getConsumerMetrics();
final TopicPartitionStateManager partitionStateManager = mantisKafkaConsumer.getPartitionStateManager();
int mantisKafkaConsumerId = mantisKafkaConsumer.getConsumerId();
SyncOnSubscribe<Iterator<ConsumerRecord<String, byte[]>>, KafkaAckable> syncOnSubscribe = SyncOnSubscribe.createStateful(
() -> {
final ConsumerRecords<String, byte[]> records = mantisKafkaConsumer.poll(kafkaSourceConfig.getConsumerPollTimeoutMs());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("topic listing for consumer {}", mantisKafkaConsumer.listTopics());
}
LOGGER.info("consumer subscribed to topic-partitions {}", mantisKafkaConsumer.assignment());
return records.iterator();
},
(consumerRecordIterator, observer) -> {
Iterator<ConsumerRecord<String, byte[]>> it = consumerRecordIterator;
final Set<TopicPartition> partitions = mantisKafkaConsumer.assignment();
if (trigger.shouldCheckpoint()) {
long startTime = System.currentTimeMillis();
final Map<TopicPartition, OffsetAndMetadata> checkpoint =
partitionStateManager.createCheckpoint(partitions);
checkpointStrategy.persistCheckpoint(checkpoint);
long now = System.currentTimeMillis();
consumerMetrics.recordCheckpointDelay(now - startTime);
consumerMetrics.incrementCommitCount();
trigger.reset();
}
if (!done.get()) {
try {
if (!consumerRecordIterator.hasNext()) {
final ConsumerRecords<String, byte[]> consumerRecords =
mantisKafkaConsumer.poll(kafkaSourceConfig.getConsumerPollTimeoutMs());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("poll returned {} records", consumerRecords.count());
}
it = consumerRecords.iterator();
}
if (it.hasNext()) {
final ConsumerRecord<String, byte[]> m = it.next();
final TopicPartition topicPartition = new TopicPartition(m.topic(), m.partition());
consumerMetrics.incrementInCount();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("updating read offset to " + m.offset() + " read " + m.value());
}
if (m.value() != null) {
try {
trigger.update(getPayloadSize(m));
if (kafkaSourceConfig.getParseMessageInSource()) {
final Parser parser = ParserType.parser(kafkaSourceConfig.getMessageParserType()).getParser();
if (parser.canParse(m.value())) {
final Map<String, Object> parsedKafkaValue = parser.parseMessage(m.value());
final KafkaData kafkaData = new KafkaData(m, Optional.ofNullable(parsedKafkaValue), Optional.ofNullable(m.key()), mantisKafkaConsumerId);
final KafkaAckable ackable = new KafkaAckable(kafkaData, ackSubject);
// record offset consumed in TopicPartitionStateManager before onNext to avoid race condition with Ack being processed before the consume is recorded
partitionStateManager.recordMessageRead(topicPartition, m.offset());
consumerMetrics.recordReadOffset(topicPartition, m.offset());
observer.onNext(ackable);
} else {
consumerMetrics.incrementParseFailureCount();
}
} else {
final KafkaData kafkaData = new KafkaData(m, Optional.empty(), Optional.ofNullable(m.key()), mantisKafkaConsumerId);
final KafkaAckable ackable = new KafkaAckable(kafkaData, ackSubject);
// record offset consumed in TopicPartitionStateManager before onNext to avoid race condition with Ack being processed before the consume is recorded
partitionStateManager.recordMessageRead(topicPartition, m.offset());
consumerMetrics.recordReadOffset(topicPartition, m.offset());
observer.onNext(ackable);
}
} catch (ParseException pe) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("failed to parse {}:{} message {}", m.topic(), m.partition(), m.value(), pe);
}
} else {
consumerMetrics.incrementKafkaMessageValueNullCount();
}
} else {
consumerMetrics.incrementWaitForDataCount();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Reached head of partition, waiting for more data");
}
TimeUnit.MILLISECONDS.sleep(200);
}
} catch (TimeoutException toe) {
consumerMetrics.incrementWaitForDataCount();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Reached head of partition waiting for more data");
}
} catch (OffsetOutOfRangeException oore) {
LOGGER.warn("offsets out of range " + oore.partitions() + " will seek to beginning", oore);
final Set<TopicPartition> topicPartitionSet = oore.partitions();
for (TopicPartition tp : topicPartitionSet) {
LOGGER.info("partition {} consumer position {}", tp, mantisKafkaConsumer.position(tp));
}
mantisKafkaConsumer.seekToBeginning(oore.partitions().toArray(new TopicPartition[oore.partitions().size()]));
} catch (InvalidRecordException ire) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("iterator error with invalid message. message will be dropped " + ire.getMessage());
} catch (KafkaException e) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("Other Kafka exception, message will be dropped. " + e.getMessage());
} catch (InterruptedException ie) {
LOGGER.error("consumer interrupted", ie);
Thread.currentThread().interrupt();
} catch (Exception e) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("caught exception", e);
}
} else {
mantisKafkaConsumer.close();
}
return it;
},
consumerRecordIterator -> {
LOGGER.info("closing Kafka consumer on unsubscribe" + mantisKafkaConsumer.toString());
mantisKafkaConsumer.close();
});
return Observable.create(syncOnSubscribe)
.subscribeOn(Schedulers.newThread())
.doOnUnsubscribe(() -> LOGGER.info("consumer {} stopped due to unsubscribe", mantisKafkaConsumerId))
.doOnError((t) -> {
LOGGER.error("consumer {} stopped due to error", mantisKafkaConsumerId, t);
consumerMetrics.incrementErrorCount();
})
.doOnTerminate(() -> LOGGER.info("consumer {} terminated", mantisKafkaConsumerId));
}
@Override
public Observable<Observable<KafkaAckable>> call(Context context, Index index) {
final int totalNumWorkers = index.getTotalNumWorkers();
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
startAckProcessor();
return Observable.create((Observable.OnSubscribe<Observable<KafkaAckable>>) child -> {
final Observable<MantisKafkaConsumer<?>> consumers =
createConsumers(context, mantisKafkaSourceConfig, totalNumWorkers);
consumers.subscribe(consumer -> {
final Observable<KafkaAckable> mantisKafkaAckableObs =
createBackPressuredConsumerObs(consumer, mantisKafkaSourceConfig);
child.onNext(mantisKafkaAckableObs);
});
})
.doOnUnsubscribe(() -> {
LOGGER.info("unsubscribed");
done.set(true);
}).doOnSubscribe(() -> {
LOGGER.info("subscribed");
done.set(false);
});
}
@Override
public void close() throws IOException {
done.set(true);
stopAckProcessor();
}
private void processAckNotification(final KafkaDataNotification notification) {
final KafkaData kafkaData = notification.getValue();
final TopicPartition topicPartition = new TopicPartition(kafkaData.getTopic(), kafkaData.getPartition());
MantisKafkaConsumer<?> mantisKafkaConsumer = idToConsumerMap.get(kafkaData.getMantisKafkaConsumerId());
if (mantisKafkaConsumer != null) {
mantisKafkaConsumer.getPartitionStateManager().recordMessageAck(topicPartition, kafkaData.getOffset());
if (!notification.isSuccess()) {
// TODO provide a hook for the user to add handling for messages that could not be processed
LOGGER.debug("Got negative acknowledgement {}", notification);
}
mantisKafkaConsumer.getConsumerMetrics().incrementProcessedCount();
} else {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("got Ack for consumer id {} not in idToConsumerMap (topic {})", kafkaData.getMantisKafkaConsumerId(), kafkaData.getTopic());
}
}
}
private void startAckProcessor() {
LOGGER.info("Acknowledgement processor started");
ackSubjectSubscription = ackSubject.subscribe((KafkaDataNotification notification) -> processAckNotification(notification));
}
private void stopAckProcessor() {
if (ackSubjectSubscription != null) {
ackSubjectSubscription.unsubscribe();
ackSubjectSubscription = null;
}
}
@Override
public List<ParameterDefinition<?>> getParameters() {
final List<ParameterDefinition<?>> params = new ArrayList<>();
params.add(new StringParameter()
.name(KafkaSourceParameters.TOPIC)
.description("Kafka topic to connect to")
.validator(Validators.notNullOrEmpty())
.required()
.build());
// Optional parameters
params.add(new StringParameter()
.name(KafkaSourceParameters.CHECKPOINT_STRATEGY)
.description("checkpoint strategy one of " + CheckpointStrategyOptions.values() + " (ensure enable.auto.commit param is set to false when enabling this)")
.defaultValue(CheckpointStrategyOptions.NONE)
.validator(Validators.alwaysPass())
.build());
params.add(new IntParameter()
.name(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER)
.description("No. of Kafka consumer instances per Mantis worker")
.validator(Validators.range(1, 16))
.defaultValue(DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER)
.build());
params.add(new IntParameter()
.name(KafkaSourceParameters.MAX_BYTES_IN_PROCESSING)
.description("The maximum amount of data per-consumer awaiting acks to trigger an offsets commit. " +
"These commits are in addition to any commits triggered by commitIntervalMs timer")
.defaultValue(DEFAULT_MAX_BYTES_IN_PROCESSING)
.validator(Validators.range(1, Integer.MAX_VALUE))
.build());
params.add(new IntParameter()
.name(KafkaSourceParameters.CONSUMER_POLL_TIMEOUT_MS)
.validator(Validators.range(100, 10_000))
.defaultValue(250)
.build());
params.add(new StringParameter()
.name(KafkaSourceParameters.PARSER_TYPE)
.validator(Validators.notNullOrEmpty())
.defaultValue(ParserType.SIMPLE_JSON.getPropName())
.build());
params.add(new BooleanParameter()
.name(KafkaSourceParameters.PARSE_MSG_IN_SOURCE)
.validator(Validators.alwaysPass())
.defaultValue(DEFAULT_PARSE_MSG_IN_SOURCE)
.build());
params.add(new BooleanParameter()
.name(KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN)
.validator(Validators.alwaysPass())
.defaultValue(DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN)
.description("Disable Kafka's default consumer group management and statically assign partitions to job workers. When enabling static partition assignments, disable auto-scaling and set the numPartitionsPerTopic job parameter")
.build());
params.add(new StringParameter()
.name(KafkaSourceParameters.TOPIC_PARTITION_COUNTS)
.validator(Validators.alwaysPass())
.defaultValue("")
.description("Configures number of partitions on a kafka topic when static partition assignment is enabled. Format <topic1>:<numPartitions Topic1>,<topic2>:<numPartitions Topic2> Example: nf_errors_log:9,clevent:450")
.build());
params.addAll(MantisKafkaConsumerConfig.getJobParameterDefinitions());
return params;
}
}
| 8,545 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/KafkaConsumerRebalanceListener.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategy;
import java.util.Collection;
import java.util.Optional;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class KafkaConsumerRebalanceListener<S> implements ConsumerRebalanceListener {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaConsumerRebalanceListener.class);
private final KafkaConsumer<?, ?> consumer;
private final TopicPartitionStateManager partitionStateManager;
private final CheckpointStrategy<S> checkpointStrategy;
public KafkaConsumerRebalanceListener(final KafkaConsumer<?, ?> consumer,
final TopicPartitionStateManager partitionStateManager,
final CheckpointStrategy<S> checkpointStrategy) {
this.consumer = consumer;
this.partitionStateManager = partitionStateManager;
this.checkpointStrategy = checkpointStrategy;
}
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
// When partitions are revoked, clear all partition state. We don't try to checkpoint here as we can be stuck indefinitely if the processing is slow and
// we try to wait for all acks to create a checkpoint and commit the offsets/state to data store.
LOGGER.info("partitions revoked, resetting partition state: {}", partitions.toString());
partitions.stream().forEach(tp -> partitionStateManager.resetCounters(tp));
}
/**
* Assumption is onPartitionsRevoked will always be called before onPartitionsAssigned
*/
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
LOGGER.info("new partitions assigned: {}", partitions.toString());
try {
for (TopicPartition tp : partitions) {
Optional<S> checkpointState = checkpointStrategy.loadCheckpoint(tp);
checkpointState
.filter(x -> x instanceof OffsetAndMetadata)
.map(OffsetAndMetadata.class::cast)
.ifPresent(oam -> {
long offset = oam.offset();
LOGGER.info("seeking consumer to checkpoint'ed offset {} for partition {} on assignment", offset, tp);
try {
consumer.seek(tp, offset);
} catch (Exception e) {
LOGGER.error("caught exception seeking consumer to offset {} on topic partition {}", offset, tp, e);
}
});
}
} catch (Exception e) {
LOGGER.error("caught exception on partition assignment {}", partitions, e);
}
}
}
| 8,546 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/TopicPartitionStateManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import io.netty.util.internal.ConcurrentSet;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TopicPartitionStateManager {
private static final Logger LOGGER = LoggerFactory.getLogger(TopicPartitionStateManager.class);
// Default to 20ms delay between retries for checkpoint ready
private final int checkpointReadyCheckDelayMs;
private final Counter waitingForAckCount;
public TopicPartitionStateManager(Registry registry, String kafkaClientId, int checkpointReadyCheckDelayMs) {
this.checkpointReadyCheckDelayMs = checkpointReadyCheckDelayMs;
this.waitingForAckCount = registry.counter("waitingOnAck", "client-id", kafkaClientId);
}
public static final long DEFAULT_LAST_READ_OFFSET = 0;
private class State {
private final AtomicLong lastReadOffset = new AtomicLong(DEFAULT_LAST_READ_OFFSET);
private final ConcurrentSet<Long> unAckedOffsets = new ConcurrentSet<>();
}
private final ConcurrentMap<TopicPartition, State> partitionState = new ConcurrentHashMap<>();
/**
* Track the message with this offset as read from Kafka but waiting on acknowledgement from the processing stage.
*
* @param tp TopicPartition the message was read from
* @param offset kafka offset for the message
*/
public void recordMessageRead(final TopicPartition tp, final long offset) {
// add to set
if (!partitionState.containsKey(tp)) {
partitionState.putIfAbsent(tp, new State());
}
partitionState.get(tp).unAckedOffsets.add(offset);
partitionState.get(tp).lastReadOffset.set(offset);
}
/**
* Records the message identified by this offset has been processed and ack'ed by the processing stage.
*
* @param tp TopicPartition the message was read from
* @param offset kafka offset for the message
*/
public void recordMessageAck(final TopicPartition tp, final long offset) {
// remove from set
if (!partitionState.containsKey(tp)) {
return;
}
partitionState.get(tp).unAckedOffsets.remove(offset);
}
/**
* Get last read offset from this topic partition.
*
* @param tp TopicPartition
*
* @return last offset read from give TopicPartition
*/
public Optional<Long> getLastOffset(final TopicPartition tp) {
if (!partitionState.containsKey(tp)) {
return Optional.empty();
}
return Optional.of(partitionState.get(tp).lastReadOffset.get());
}
private boolean allMessagesAcked(final TopicPartition tp) {
if (!partitionState.containsKey(tp)) {
// no messages, no acks needed
return true;
}
return partitionState.get(tp).unAckedOffsets.size() == 0;
}
public Map<TopicPartition, OffsetAndMetadata> createCheckpoint(final Collection<TopicPartition> partitions) {
if (partitionState.isEmpty()) {
return Collections.emptyMap();
}
final Map<TopicPartition, OffsetAndMetadata> checkpoint = new HashMap<>(partitions.size());
for (TopicPartition tp : partitions) {
while (!allMessagesAcked(tp)) {
try {
waitingForAckCount.increment();
Thread.sleep(checkpointReadyCheckDelayMs);
} catch (InterruptedException e) {
LOGGER.info("thread interrupted when creating checkpoint for {}", tp);
Thread.currentThread().interrupt();
throw new RuntimeException("thread interrupted when creating checkpoint", e);
}
}
final State pState = partitionState.get(tp);
final Optional<Long> lastOffset = Optional.ofNullable(pState != null ? pState.lastReadOffset.get() : null);
if (lastOffset.isPresent() && lastOffset.get() != DEFAULT_LAST_READ_OFFSET) {
checkpoint.put(tp, new OffsetAndMetadata(lastOffset.get() + 1, String.valueOf(System.currentTimeMillis())));
}
}
return checkpoint;
}
/* reset partition counters */
public void resetCounters(final TopicPartition tp) {
if (!partitionState.containsKey(tp)) {
return;
}
partitionState.get(tp).unAckedOffsets.clear();
partitionState.get(tp).lastReadOffset.set(DEFAULT_LAST_READ_OFFSET);
}
/* reset all counters */
public void resetCounters() {
LOGGER.info("resetting all counters");
if (partitionState.isEmpty()) {
return;
}
partitionState.values().stream().forEach(state -> {
state.unAckedOffsets.clear();
state.lastReadOffset.set(DEFAULT_LAST_READ_OFFSET);
});
}
}
| 8,547 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/MantisKafkaConsumer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.connector.kafka.source.assignor.StaticPartitionAssignor;
import io.mantisrx.connector.kafka.source.assignor.StaticPartitionAssignorImpl;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategy;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyFactory;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.connector.kafka.source.checkpoint.trigger.CheckpointTrigger;
import io.mantisrx.connector.kafka.source.checkpoint.trigger.CheckpointTriggerFactory;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.runtime.Context;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Subscription;
public class MantisKafkaConsumer<S> {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaConsumer.class);
private final int consumerId;
private final KafkaConsumer<String, byte[]> consumer;
private final CheckpointStrategy<S> strategy;
private final CheckpointTrigger trigger;
private final ConsumerMetrics consumerMetrics;
private final TopicPartitionStateManager partitionStateManager;
private final AtomicLong pollTimestamp = new AtomicLong(System.currentTimeMillis());
private final AtomicLong pollReturnedDataTimestamp = new AtomicLong(System.currentTimeMillis());
private volatile Subscription metricSubscription = null;
public MantisKafkaConsumer(final int consumerId,
final KafkaConsumer<String, byte[]> consumer,
final TopicPartitionStateManager partitionStateManager,
final CheckpointStrategy<S> strategy,
final CheckpointTrigger trigger,
final ConsumerMetrics metrics) {
this.consumerId = consumerId;
this.consumerMetrics = metrics;
this.consumer = consumer;
this.partitionStateManager = partitionStateManager;
this.strategy = strategy;
this.trigger = trigger;
setupMetricPublish();
}
private void setupMetricPublish() {
if (metricSubscription == null) {
this.metricSubscription = Observable.interval(1, TimeUnit.SECONDS).subscribe((tick) -> {
consumerMetrics.recordTimeSinceLastPollMs(timeSinceLastPollMs());
consumerMetrics.recordTimeSinceLastPollWithDataMs(timeSinceLastPollWithDataMs());
});
}
}
public int getConsumerId() {
return consumerId;
}
public KafkaConsumer<String, byte[]> getConsumer() {
return consumer;
}
public CheckpointStrategy<S> getStrategy() {
return strategy;
}
public CheckpointTrigger getTrigger() {
return trigger;
}
public TopicPartitionStateManager getPartitionStateManager() {
return partitionStateManager;
}
public long timeSinceLastPollMs() {
return (System.currentTimeMillis() - pollTimestamp.get());
}
public long timeSinceLastPollWithDataMs() {
return (System.currentTimeMillis() - pollReturnedDataTimestamp.get());
}
public ConsumerMetrics getConsumerMetrics() {
return consumerMetrics;
}
public void close() {
if (metricSubscription != null && !metricSubscription.isUnsubscribed()) {
metricSubscription.unsubscribe();
}
if (trigger.isActive()) {
final Set<TopicPartition> partitions = consumer.assignment();
LOGGER.warn("clearing partition state when closing consumer {}, partitions {}", this.toString(), partitions.toString());
partitions.stream().forEach(tp -> partitionStateManager.resetCounters(tp));
consumer.close();
trigger.shutdown();
}
}
/**
* {@link KafkaConsumer#poll(Duration)} ()}
*/
public ConsumerRecords<String, byte[]> poll(final long consumerPollTimeoutMs) {
final long now = System.currentTimeMillis();
pollTimestamp.set(now);
final ConsumerRecords<String, byte[]> consumerRecords = consumer.poll(Duration.ofMillis(consumerPollTimeoutMs));
if (consumerRecords.count() > 0) {
pollReturnedDataTimestamp.set(now);
}
return consumerRecords;
}
/**
* {@link KafkaConsumer#assignment()}
*/
public Set<TopicPartition> assignment() {
return consumer.assignment();
}
/**
* {@link KafkaConsumer#listTopics()}
*/
public Map<String, List<PartitionInfo>> listTopics() {
return consumer.listTopics();
}
/**
* {@link KafkaConsumer#position(TopicPartition)} ()}
*/
public long position(TopicPartition partition) {
return consumer.position(partition);
}
/**
* {@link KafkaConsumer#seekToBeginning(Collection)} ()}
*/
public void seekToBeginning(TopicPartition... partitions) {
consumer.seekToBeginning(Arrays.asList(partitions));
}
/**
* {@link KafkaConsumer#pause(Collection)} ()}
*/
public void pause(TopicPartition... partitions) {
LOGGER.debug("pausing {} partitions", partitions.length);
consumer.pause(Arrays.asList(partitions));
consumerMetrics.incrementPausePartitionCount();
}
/**
* {@link KafkaConsumer#resume(Collection)} ()}
*/
public void resume(TopicPartition... partitions) {
try {
LOGGER.debug("resuming {} partitions", partitions.length);
consumer.resume(Arrays.asList(partitions));
consumerMetrics.incrementResumePartitionCount();
} catch (IllegalStateException e) {
LOGGER.warn("resuming partitions failed", e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MantisKafkaConsumer that = (MantisKafkaConsumer) o;
return consumerId == that.consumerId &&
consumer.equals(that.consumer) &&
strategy.equals(that.strategy) &&
trigger.equals(that.trigger) &&
consumerMetrics.equals(that.consumerMetrics) &&
partitionStateManager.equals(that.partitionStateManager);
}
@Override
public int hashCode() {
return Objects.hash(consumerId, consumer, strategy, trigger, consumerMetrics, partitionStateManager);
}
@Override
public String toString() {
return "MantisKafkaConsumer{" +
"consumerId=" + consumerId +
", consumer=" + consumer +
", strategy=" + strategy +
", trigger=" + trigger +
'}';
}
static class Builder {
private Context context;
private int consumerIndex;
private int totalNumConsumersForJob;
private Registry registry;
private MantisKafkaSourceConfig kafkaSourceConfig;
private static final AtomicInteger consumerId = new AtomicInteger(0);
private final StaticPartitionAssignor staticPartitionAssignor = new StaticPartitionAssignorImpl();
public Builder withContext(Context context) {
this.context = context;
return this;
}
public Builder withKafkaSourceConfig(MantisKafkaSourceConfig kafkaSourceConfig) {
this.kafkaSourceConfig = kafkaSourceConfig;
return this;
}
public Builder withConsumerIndex(int consumerIndex) {
this.consumerIndex = consumerIndex;
return this;
}
public Builder withTotalNumConsumersForJob(int totalNumConsumersForJob) {
this.totalNumConsumersForJob = totalNumConsumersForJob;
return this;
}
public Builder withRegistry(Registry registry) {
this.registry = registry;
return this;
}
private void doStaticPartitionAssignment(final KafkaConsumer<String, byte[]> consumer,
final ConsumerRebalanceListener rebalanceListener,
final int consumerIndex,
final int totalNumConsumers,
final Map<String, Integer> topicPartitionCounts,
final Registry registry) {
if (totalNumConsumers <= 0) {
LOGGER.error("total num consumers {} is invalid", totalNumConsumers);
context.completeAndExit();
return;
}
if (consumerIndex < 0 || consumerIndex >= totalNumConsumers) {
LOGGER.error("consumerIndex {} is invalid (numConsumers: {})", consumerIndex, totalNumConsumers);
context.completeAndExit();
return;
}
final List<TopicPartition> topicPartitions = staticPartitionAssignor.assignPartitionsToConsumer(consumerIndex, topicPartitionCounts, totalNumConsumers);
if (topicPartitions.isEmpty()) {
LOGGER.error("topic partitions to assign list is empty");
throw new RuntimeException("static partition assignment is enabled and no topic partitions were assigned, please check numPartitionsPerTopic job param is set correctly and the job has num(kafka consumer) <= num(partition)");
} else {
LOGGER.info("Statically assigned topic partitions(): {}", topicPartitions);
topicPartitions.forEach(tp ->
registry.gauge("staticPartitionAssigned",
"topic", tp.topic(), "partition", String.valueOf(tp.partition())).set(1.0));
consumer.assign(topicPartitions);
// reuse onPartitionsAssigned() so the consumer can seek to checkpoint'ed offset from offset store
rebalanceListener.onPartitionsAssigned(topicPartitions);
}
}
public MantisKafkaConsumer<?> build() {
Preconditions.checkNotNull(context, "context");
Preconditions.checkNotNull(kafkaSourceConfig, "kafkaSourceConfig");
Preconditions.checkNotNull(registry, "registry");
Preconditions.checkArg(consumerIndex >= 0, "consumerIndex must be greater than or equal to 0");
Preconditions.checkArg(totalNumConsumersForJob > 0, "total number of consumers for job must be greater than 0");
final int kafkaConsumerId = consumerId.incrementAndGet();
Map<String, Object> consumerProps = kafkaSourceConfig.getConsumerConfig().getConsumerProperties();
final String clientId = String.format("%s-%d-%d", context.getJobId(), context.getWorkerInfo().getWorkerNumber(), kafkaConsumerId);
consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
// hard-coding key to String type and value to byte[]
final KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps);
final TopicPartitionStateManager partitionStateManager = new TopicPartitionStateManager(registry, clientId, kafkaSourceConfig.getRetryCheckpointCheckDelayMs());
final ConsumerMetrics metrics = new ConsumerMetrics(registry, kafkaConsumerId, context);
final CheckpointStrategy<?> strategy = CheckpointStrategyFactory.getNewInstance(context, consumer, kafkaSourceConfig.getCheckpointStrategy(), metrics);
if (kafkaSourceConfig.getStaticPartitionAssignmentEnabled()) {
final KafkaConsumerRebalanceListener kafkaConsumerRebalanceListener = new KafkaConsumerRebalanceListener(consumer, partitionStateManager, strategy);
kafkaSourceConfig.getTopicPartitionCounts().ifPresent(topicPartitionCounts -> {
doStaticPartitionAssignment(consumer, kafkaConsumerRebalanceListener, consumerIndex, totalNumConsumersForJob, topicPartitionCounts, registry);
});
} else {
if (kafkaSourceConfig.getCheckpointStrategy() != CheckpointStrategyOptions.NONE) {
consumer.subscribe(kafkaSourceConfig.getTopics(),
new KafkaConsumerRebalanceListener(consumer, partitionStateManager, strategy));
} else {
consumer.subscribe(kafkaSourceConfig.getTopics());
}
}
final CheckpointTrigger trigger = CheckpointTriggerFactory.getNewInstance(kafkaSourceConfig);
return new MantisKafkaConsumer<>(kafkaConsumerId, consumer, partitionStateManager, strategy, trigger, metrics);
}
}
}
| 8,548 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/MantisKafkaSourceConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.shaded.com.google.common.base.Splitter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MantisKafkaSourceConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaSourceConfig.class);
public static final int DEFAULT_CONSUMER_POLL_TIMEOUT_MS = 100;
public static final int DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS = 20;
public static final boolean DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN = false;
public static final int CONSUMER_RECORD_OVERHEAD_BYTES = 100;
public static final int DEFAULT_MAX_BYTES_IN_PROCESSING = 128_000_000;
public static final int DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER = 1;
public static final boolean DEFAULT_PARSE_MSG_IN_SOURCE = true;
private final List<String> topics;
private final int numConsumerInstances;
private final int consumerPollTimeoutMs;
private final int maxBytesInProcessing;
private final String messageParserType;
private final String checkpointStrategy;
private final Boolean parseMessageInSource;
private final int retryCheckpointCheckDelayMs;
private final int checkpointIntervalMs;
private final Boolean staticPartitionAssignmentEnabled;
private final Optional<Map<String, Integer>> topicPartitionCounts;
private final MantisKafkaConsumerConfig consumerConfig;
public MantisKafkaSourceConfig(Context context) {
final Parameters parameters = context.getParameters();
final String topicStr = (String) parameters.get(KafkaSourceParameters.TOPIC);
this.topics = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(topicStr);
this.numConsumerInstances = (int) parameters.get(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER, DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER);
this.consumerPollTimeoutMs = (int) parameters.get(KafkaSourceParameters.CONSUMER_POLL_TIMEOUT_MS, DEFAULT_CONSUMER_POLL_TIMEOUT_MS);
this.maxBytesInProcessing = (int) parameters.get(KafkaSourceParameters.MAX_BYTES_IN_PROCESSING, DEFAULT_MAX_BYTES_IN_PROCESSING);
this.messageParserType = (String) parameters.get(KafkaSourceParameters.PARSER_TYPE, ParserType.SIMPLE_JSON.getPropName());
this.checkpointStrategy = (String) parameters.get(KafkaSourceParameters.CHECKPOINT_STRATEGY, CheckpointStrategyOptions.NONE);
this.parseMessageInSource = (boolean) parameters.get(KafkaSourceParameters.PARSE_MSG_IN_SOURCE, DEFAULT_PARSE_MSG_IN_SOURCE);
this.retryCheckpointCheckDelayMs = (int) parameters.get(KafkaSourceParameters.RETRY_CHECKPOINT_CHECK_DELAY_MS, DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS);
this.checkpointIntervalMs = (int) parameters.get(KafkaSourceParameters.CHECKPOINT_INTERVAL_MS, MantisKafkaConsumerConfig.DEFAULT_CHECKPOINT_INTERVAL_MS);
this.staticPartitionAssignmentEnabled = (boolean) parameters.get(KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN, DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN);
if (staticPartitionAssignmentEnabled) {
final String topicPartitionsStr = (String) parameters.get(KafkaSourceParameters.TOPIC_PARTITION_COUNTS, "");
this.topicPartitionCounts = Optional.ofNullable(getTopicPartitionCounts(topicPartitionsStr, topics));
} else {
this.topicPartitionCounts = Optional.empty();
}
consumerConfig = new MantisKafkaConsumerConfig(context);
LOGGER.info("checkpointStrategy: {} numConsumerInstances: {} topics: {} consumerPollTimeoutMs: {} retryCheckpointCheckDelayMs {} consumer config: {}",
checkpointStrategy, numConsumerInstances, topics, consumerPollTimeoutMs, retryCheckpointCheckDelayMs, consumerConfig.values().toString());
}
private Map<String, Integer> getTopicPartitionCounts(String topicPartitionsStr, List<String> topicList) {
final List<String> topicPartitionCountList = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(topicPartitionsStr);
final Map<String, Integer> topicPartitionCounts = new HashMap<>();
// parse topic partition counts only if Static partition assignment is enabled
for (String tp : topicPartitionCountList) {
final String[] topicPartitionCount = tp.split(":");
if (topicPartitionCount.length == 2) {
final String topic = topicPartitionCount[0];
if (topicList.contains(topic)) {
topicPartitionCounts.put(topic, Integer.parseInt(topicPartitionCount[1]));
} else {
final String errorMsg = String.format("topic %s specified in Job Parameter '%s' does not match topics specified for Job Parameter '%s'",
topic, KafkaSourceParameters.TOPIC_PARTITION_COUNTS, KafkaSourceParameters.TOPIC);
LOGGER.error(errorMsg);
throw new RuntimeException(errorMsg);
}
} else {
final String errorMsg = String.format("failed to parse topic partition count string %s", tp);
LOGGER.error(errorMsg);
throw new RuntimeException(errorMsg);
}
}
// validate all topics have partition counts specified
final Set<String> partitionCountTopics = topicPartitionCounts.keySet();
if (!partitionCountTopics.containsAll(topicList) ||
!topicList.containsAll(partitionCountTopics)) {
final String errorMsg = String.format("topics '%s' specified for Job Parameter '%s' don't match topics '%s' specified for Job Parameter '%s'",
partitionCountTopics, KafkaSourceParameters.TOPIC_PARTITION_COUNTS, topicList, KafkaSourceParameters.TOPIC);
LOGGER.error(errorMsg);
throw new RuntimeException(errorMsg);
}
LOGGER.info("enableStaticPartitionAssignment: {} [ topic partition counts: {} ]", staticPartitionAssignmentEnabled, topicPartitionCounts);
return topicPartitionCounts;
}
public List<String> getTopics() {
return topics;
}
public int getNumConsumerInstances() {
return numConsumerInstances;
}
public int getConsumerPollTimeoutMs() {
return consumerPollTimeoutMs;
}
public int getMaxBytesInProcessing() {
return maxBytesInProcessing;
}
public String getMessageParserType() {
return messageParserType;
}
public String getCheckpointStrategy() {
return checkpointStrategy;
}
public Boolean getParseMessageInSource() {
return parseMessageInSource;
}
public int getRetryCheckpointCheckDelayMs() {
return retryCheckpointCheckDelayMs;
}
public int getCheckpointIntervalMs() {
return checkpointIntervalMs;
}
public Boolean getStaticPartitionAssignmentEnabled() {
return staticPartitionAssignmentEnabled;
}
public Optional<Map<String, Integer>> getTopicPartitionCounts() {
return topicPartitionCounts;
}
public MantisKafkaConsumerConfig getConsumerConfig() {
return consumerConfig;
}
}
| 8,549 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/MantisKafkaConsumerConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import io.mantisrx.common.MantisProperties;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.shaded.com.google.common.annotations.VisibleForTesting;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.RangeAssignor;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility Class for handling Kafka ConsumerConfig defaults and Job parameter overrides
*/
public class MantisKafkaConsumerConfig extends ConsumerConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaConsumerConfig.class);
public MantisKafkaConsumerConfig(Map<String, Object> props,
Context context) {
super(applyJobParamOverrides(context, props));
}
public MantisKafkaConsumerConfig(Context context) {
this(defaultProps(), context);
}
public static final String DEFAULT_AUTO_OFFSET_RESET = "latest";
public static final String DEFAULT_AUTO_COMMIT_ENABLED = "false";
public static final String DEFAULT_BOOTSTRAP_SERVERS_CONFIG = "localhost:9092";
public static final int DEFAULT_AUTO_COMMIT_INTERVAL_MS = 5000;
public static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 3000;
public static final int DEFAULT_SESSION_TIMEOUT_MS = 10_000;
public static final int DEFAULT_FETCH_MIN_BYTES = 1024;
public static final int DEFAULT_FETCH_MAX_WAIT_MS = 100;
public static final int DEFAULT_REQUEST_TIMEOUT_MS = 40000;
public static final int DEFAULT_CHECKPOINT_INTERVAL_MS = 5_000;
public static final int DEFAULT_MAX_POLL_INTERVAL_MS = 300_000;
public static final int DEFAULT_MAX_POLL_RECORDS = 500;
public static final int DEFAULT_MAX_PARTITION_FETCH_BYTES = 10_000_000;
public static final int DEFAULT_RECEIVE_BUFFER_BYTES = 32768;
public static final int DEFAULT_SEND_BUFFER_BYTES = 131072;
public static final Class<StringDeserializer> DEFAULT_KEY_DESERIALIZER = StringDeserializer.class;
public static final Class<ByteArrayDeserializer> DEFAULT_VALUE_DESERIALIZER = ByteArrayDeserializer.class;
public static Map<String, Object> defaultProps() {
final Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, DEFAULT_AUTO_COMMIT_ENABLED);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_AUTO_COMMIT_INTERVAL_MS));
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, DEFAULT_AUTO_OFFSET_RESET);
props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, String.valueOf(DEFAULT_FETCH_MAX_WAIT_MS));
props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, String.valueOf(DEFAULT_FETCH_MIN_BYTES));
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_HEARTBEAT_INTERVAL_MS));
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, String.valueOf(DEFAULT_SESSION_TIMEOUT_MS));
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, DEFAULT_KEY_DESERIALIZER);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, DEFAULT_VALUE_DESERIALIZER);
props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, String.valueOf(DEFAULT_MAX_PARTITION_FETCH_BYTES));
props.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, String.valueOf(DEFAULT_RECEIVE_BUFFER_BYTES));
props.put(ConsumerConfig.SEND_BUFFER_CONFIG, String.valueOf(DEFAULT_SEND_BUFFER_BYTES));
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, DEFAULT_BOOTSTRAP_SERVERS_CONFIG);
props.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, JmxReporter.class.getName());
props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(DEFAULT_REQUEST_TIMEOUT_MS));
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_RECORDS));
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_INTERVAL_MS));
props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RangeAssignor.class.getName());
return props;
}
/**
* Get kafka consumer group ID to use by default.
* @return default group ID to use for kafka consumer based on Mantis Job Id when running in cloud, else default to local consumer id
*/
@VisibleForTesting
static String getGroupId() {
String jobId = MantisProperties.getProperty("JOB_ID");
if (jobId != null && !jobId.isEmpty()) {
LOGGER.info("default consumer groupId to {} if not overridden by job param", "mantis-kafka-source-" + jobId);
return "mantis-kafka-source-" + jobId;
}
return "mantis-kafka-source-fallback-consumer-id";
}
private static Map<String, Object> applyJobParamOverrides(Context context, Map<String, Object> parsedValues) {
final Parameters parameters = context.getParameters();
if (!parsedValues.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
// set consumerGroupId if not already set
final String consumerGroupId = (String) parameters.get(KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, getGroupId());
parsedValues.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
}
for (String key : configNames()) {
Object value = parameters.get(KafkaSourceParameters.PREFIX + key, null);
if (value != null) {
LOGGER.info("job param override for key {} -> {}", key, value);
parsedValues.put(key, value);
}
}
return parsedValues;
}
/**
* Helper class to get all Kafka Consumer configs as Job Parameters to allow overriding Kafka consumer config settings at Job submit time.
*
* @return
*/
public static List<ParameterDefinition<?>> getJobParameterDefinitions() {
List<ParameterDefinition<?>> params = new ArrayList<>();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
ParameterDefinition.Builder<String> builder = new StringParameter()
.name(KafkaSourceParameters.PREFIX + key)
.validator(Validators.alwaysPass())
.description(KafkaSourceParameters.PREFIX + key);
if (defaultProps.containsKey(key)) {
Object value = defaultProps.get(key);
if (value instanceof Class) {
builder = builder.defaultValue(((Class) value).getCanonicalName());
} else {
builder = builder.defaultValue((String) value);
}
}
params.add(builder.build());
}
return params;
}
public String getConsumerConfigStr() {
return values().toString();
}
public Map<String, Object> getConsumerProperties() {
return values().entrySet().stream()
.filter(x -> x.getKey() != null && x.getValue() != null)
.collect(Collectors.toMap(x -> x.getKey(),
x -> (Object) x.getValue()));
}
}
| 8,550 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/metrics/ConsumerMetrics.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.metrics;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import io.mantisrx.runtime.Context;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
public class ConsumerMetrics {
private static final String METRICS_PREFIX = "MantisKafkaConsumer_";
private static final String METRIC_KAFKA_IN_COUNT = "kafkaInCount";
private static final String METRIC_KAFKA_PROCESSED_COUNT = "kafkaProcessedCount";
private static final String METRIC_KAFKA_ERROR_COUNT = "kafkaErrorCount";
private static final String METRIC_KAFKA_WAIT_FOR_DATA_COUNT = "kafkaWaitForDataCount";
private static final String METRIC_KAFKA_COMMIT_COUNT = "kafkaCommitCount";
private static final String METRIC_CHECKPOINT_DELAY = "checkpointDelay";
private static final String METRIC_PARSE_FAILURE_COUNT = "parseFailureCount";
private static final String METRIC_KAFKA_MSG_VALUE_NULL_COUNT = "kafkaMessageValueNull";
private static final String METRIC_TIME_SINCE_LAST_POLL_MS = "timeSinceLastPollMs";
private static final String METRIC_TIME_SINCE_LAST_POLL_WITH_DATA_MS = "timeSinceLastPollWithDataMs";
private static final String METRIC_KAFKA_PAUSE_PARTITIONS = "kafkaPausePartitions";
private static final String METRIC_KAFKA_RESUME_PARTITIONS = "kafkaResumePartitions";
private final Registry registry;
private final List<Tag> commonTags;
private final Counter kafkaInCount;
private final Counter kafkaProcessedCount;
private final Counter kafkaErrorCount;
private final Counter kafkaWaitForDataCount;
private final Counter kafkaCommitCount;
private final Counter parseFailureCount;
private final Counter kafkaPausePartitions;
private final Counter kafkaResumePartitions;
private final Counter kafkaMsgValueNullCount;
private final Gauge checkpointDelay;
private final Gauge timeSinceLastPollMs;
private final Gauge timeSinceLastPollWithDataMs;
private final ConcurrentMap<TopicPartition, Gauge> committedOffsets = new ConcurrentHashMap<>();
private final ConcurrentMap<TopicPartition, Gauge> readOffsets = new ConcurrentHashMap<>();
public ConsumerMetrics(final Registry registry, final int consumerId, final Context context) {
this.registry = registry;
this.commonTags = createCommonTags(context, consumerId);
this.kafkaErrorCount = registry.counter(createId(METRIC_KAFKA_ERROR_COUNT));
this.kafkaInCount = registry.counter(createId(METRIC_KAFKA_IN_COUNT));
this.kafkaProcessedCount = registry.counter(createId(METRIC_KAFKA_PROCESSED_COUNT));
this.kafkaWaitForDataCount = registry.counter(createId(METRIC_KAFKA_WAIT_FOR_DATA_COUNT));
this.kafkaCommitCount = registry.counter(createId(METRIC_KAFKA_COMMIT_COUNT));
this.checkpointDelay = registry.gauge(createId(METRIC_CHECKPOINT_DELAY));
this.timeSinceLastPollMs = registry.gauge(createId(METRIC_TIME_SINCE_LAST_POLL_MS));
this.timeSinceLastPollWithDataMs = registry.gauge(createId(METRIC_TIME_SINCE_LAST_POLL_WITH_DATA_MS));
this.parseFailureCount = registry.counter(createId(METRIC_PARSE_FAILURE_COUNT));
this.kafkaPausePartitions = registry.counter(createId(METRIC_KAFKA_PAUSE_PARTITIONS));
this.kafkaResumePartitions = registry.counter(createId(METRIC_KAFKA_RESUME_PARTITIONS));
this.kafkaMsgValueNullCount = registry.counter(createId(METRIC_KAFKA_MSG_VALUE_NULL_COUNT));
}
private List<Tag> createCommonTags(final Context context, final int consumerId) {
return Arrays.asList(Tag.of("mantisWorkerNum", Integer.toString(context.getWorkerInfo().getWorkerNumber())),
Tag.of("mantisWorkerIndex", Integer.toString(context.getWorkerInfo().getWorkerIndex())),
Tag.of("mantisJobName", context.getWorkerInfo().getJobClusterName()),
Tag.of("mantisJobId", context.getJobId()),
Tag.of("consumerId", String.valueOf(consumerId)));
}
private Id createId(final String metricName) {
return registry.createId(METRICS_PREFIX + metricName, commonTags);
}
public void recordCheckpointDelay(final long value) {
checkpointDelay.set(value);
}
public void recordTimeSinceLastPollMs(long value) {
timeSinceLastPollMs.set(value);
}
public void recordTimeSinceLastPollWithDataMs(long value) {
timeSinceLastPollWithDataMs.set(value);
}
public void incrementInCount() {
kafkaInCount.increment();
}
public void incrementProcessedCount() {
kafkaProcessedCount.increment();
}
public void incrementErrorCount() {
kafkaErrorCount.increment();
}
public void incrementWaitForDataCount() {
kafkaWaitForDataCount.increment();
}
public void incrementCommitCount() {
kafkaCommitCount.increment();
}
public void incrementParseFailureCount() {
parseFailureCount.increment();
}
public void incrementPausePartitionCount() {
kafkaPausePartitions.increment();
}
public void incrementResumePartitionCount() {
kafkaResumePartitions.increment();
}
public void incrementKafkaMessageValueNullCount() {
kafkaMsgValueNullCount.increment();
}
public void recordCommittedOffset(final Map<TopicPartition, OffsetAndMetadata> checkpoint) {
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : checkpoint.entrySet()) {
final TopicPartition tp = entry.getKey();
if (!committedOffsets.containsKey(tp)) {
ArrayList<Tag> tags = new ArrayList<>(commonTags);
tags.add(Tag.of("topic", tp.topic()));
tags.add(Tag.of("partition", String.valueOf(tp.partition())));
Gauge gauge = registry.gauge("committedOffsets", tags);
committedOffsets.putIfAbsent(tp, gauge);
}
committedOffsets.get(tp).set(entry.getValue().offset());
}
}
public void recordReadOffset(final TopicPartition tp, final long offset) {
if (!readOffsets.containsKey(tp)) {
ArrayList<Tag> tags = new ArrayList<>(commonTags);
tags.add(Tag.of("topic", tp.topic()));
tags.add(Tag.of("partition", String.valueOf(tp.partition())));
Gauge gauge = registry.gauge("minReadOffsets", tags);
readOffsets.putIfAbsent(tp, gauge);
}
readOffsets.get(tp).set(offset);
}
}
| 8,551 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CheckpointTrigger.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
public interface CheckpointTrigger {
/**
* true indicates checkpoint now, else don't.
*
* @return
*/
boolean shouldCheckpoint();
/**
* update internal state based on provided count (typically current message size).
*
* @param count
*/
void update(int count);
/**
* hook to reset all internal state after a checkpoint is persisted.
*/
void reset();
/**
* true indicates the trigger is in active and valid state.
*/
boolean isActive();
/**
* cleanup resources.
*/
void shutdown();
}
| 8,552 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CheckpointingDisabledTrigger.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
import java.util.concurrent.atomic.AtomicBoolean;
public class CheckpointingDisabledTrigger implements CheckpointTrigger {
private final AtomicBoolean isActive;
public CheckpointingDisabledTrigger() {
this.isActive = new AtomicBoolean(true);
}
@Override
public boolean shouldCheckpoint() {
return false;
}
@Override
public void update(final int count) {
// do nothing
}
@Override
public void reset() {
// do nothing
}
@Override
public boolean isActive() {
return isActive.get();
}
@Override
public void shutdown() {
if (isActive()) {
isActive.compareAndSet(true, false);
}
}
}
| 8,553 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CheckpointTriggerFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
import io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
public final class CheckpointTriggerFactory {
private CheckpointTriggerFactory() { }
/**
* Factory method to create instance of {@link CheckpointTrigger}
* @param kafkaSourceConfig mantis kafka source configuration
* @return {@link CheckpointTrigger} instance based on config
*/
public static CheckpointTrigger getNewInstance(final MantisKafkaSourceConfig kafkaSourceConfig) {
switch (kafkaSourceConfig.getCheckpointStrategy()) {
case CheckpointStrategyOptions.OFFSETS_ONLY_DEFAULT:
return new CountingCheckpointTrigger(kafkaSourceConfig.getMaxBytesInProcessing(), kafkaSourceConfig.getCheckpointIntervalMs());
case CheckpointStrategyOptions.NONE:
default:
return new CheckpointingDisabledTrigger();
}
}
}
| 8,554 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CountingCheckpointTrigger.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import rx.Observable;
import rx.Subscription;
import rx.functions.Action1;
/**
* Time and count based CheckpointTrigger that triggers a checkpoint either if accumulated count exceeds threshold or time
* sine last checkpoint exceeds the configured checkpoint trigger interval.
*/
public class CountingCheckpointTrigger implements CheckpointTrigger {
private final int threshold;
private final AtomicInteger counter;
private final AtomicBoolean checkpoint = new AtomicBoolean(false);
private final AtomicBoolean isActive;
private final Subscription checkpointOffsetsTimer;
public CountingCheckpointTrigger(final int threshold, final int triggerIntervalMs) {
this.threshold = threshold;
this.counter = new AtomicInteger(0);
this.isActive = new AtomicBoolean(true);
checkpointOffsetsTimer = Observable.interval(triggerIntervalMs, TimeUnit.MILLISECONDS).subscribe(new Action1<Long>() {
@Override
public void call(Long aLong) {
checkpoint.set(true);
}
});
}
@Override
public boolean shouldCheckpoint() {
return (counter.get() > threshold) || checkpoint.get();
}
@Override
public void update(final int count) {
counter.addAndGet(count);
}
@Override
public void reset() {
counter.set(0);
checkpoint.set(false);
}
@Override
public boolean isActive() {
return isActive.get();
}
@Override
public void shutdown() {
if (isActive()) {
checkpointOffsetsTimer.unsubscribe();
reset();
isActive.compareAndSet(true, false);
}
}
}
| 8,555 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/NoopCheckpointStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import io.mantisrx.runtime.Context;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.apache.kafka.common.TopicPartition;
public class NoopCheckpointStrategy implements CheckpointStrategy<Void> {
@Override
public void init(Map<String, String> properties) {
}
@Override
public boolean persistCheckpoint(Map<TopicPartition, Void> checkpoint) {
return true;
}
@Override
public Optional<Void> loadCheckpoint(TopicPartition tp) {
return Optional.empty();
}
@Override
public void init(Context context) {
// no-op
}
@Override
public Map<TopicPartition, Optional<Void>> loadCheckpoints(
List<TopicPartition> tpList) {
return Collections.emptyMap();
}
@Override
public String type() {
return CheckpointStrategyOptions.NONE;
}
}
| 8,556 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/CheckpointStrategyOptions.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
public final class CheckpointStrategyOptions {
/**
* Leverages Kafka for committing offsets.
*/
public static final String OFFSETS_ONLY_DEFAULT = "offsetsOnlyDefaultKafka";
/**
* Default CheckpointStrategy to disable committing offsets, note this would disable atleast once semantics as
* offsets are no longer committed to resume from after a worker/process failure.
*/
public static final String NONE = "disableCheckpointing";
private CheckpointStrategyOptions() {
}
public static String values() {
return OFFSETS_ONLY_DEFAULT + ", " + NONE;
}
}
| 8,557 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/KafkaOffsetCheckpointStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.runtime.Context;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import org.apache.kafka.clients.consumer.InvalidOffsetException;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Leverages the default Kafka facilities to commit offsets to Kafka using {@link KafkaConsumer#commitSync(Map) commitSync(Map)}.
*/
public class KafkaOffsetCheckpointStrategy implements CheckpointStrategy<OffsetAndMetadata> {
private static Logger logger = LoggerFactory.getLogger(KafkaOffsetCheckpointStrategy.class);
private final KafkaConsumer<?, ?> consumer;
private final ConsumerMetrics consumerMetrics;
public KafkaOffsetCheckpointStrategy(KafkaConsumer<?, ?> consumer, ConsumerMetrics metrics) {
this.consumer = consumer;
this.consumerMetrics = metrics;
}
@Override
public void init(Map<String, String> properties) {
}
@Override
public boolean persistCheckpoint(final Map<TopicPartition, OffsetAndMetadata> checkpoint) {
if (!checkpoint.isEmpty()) {
try {
logger.debug("committing offsets {}", checkpoint.toString());
consumer.commitSync(checkpoint);
consumerMetrics.recordCommittedOffset(checkpoint);
} catch (InvalidOffsetException ioe) {
logger.warn("failed to commit offsets " + checkpoint.toString() + " will seek to beginning", ioe);
final Set<TopicPartition> topicPartitionSet = ioe.partitions();
for (TopicPartition tp : topicPartitionSet) {
logger.info("partition " + tp.toString() + " consumer position " + consumer.position(tp));
}
consumer.seekToBeginning(ioe.partitions());
} catch (KafkaException cfe) {
// should not be retried
logger.warn("unrecoverable exception on commit offsets " + checkpoint.toString(), cfe);
return false;
}
}
return true;
}
@Override
public Optional<OffsetAndMetadata> loadCheckpoint(TopicPartition tp) {
logger.trace("rely on default kafka protocol to seek to last committed offset");
return Optional.empty();
}
@Override
public void init(Context context) {
// no-op
}
@Override
public Map<TopicPartition, Optional<OffsetAndMetadata>> loadCheckpoints(List<TopicPartition> tpList) {
Map<TopicPartition, Optional<OffsetAndMetadata>> mp = new HashMap<>();
for (TopicPartition tp : tpList) {
mp.put(tp, loadCheckpoint(tp));
}
return mp;
}
@Override
public String type() {
return CheckpointStrategyOptions.OFFSETS_ONLY_DEFAULT;
}
}
| 8,558 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/CheckpointStrategyFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.runtime.Context;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class CheckpointStrategyFactory {
private CheckpointStrategyFactory() { }
private static final Logger LOGGER = LoggerFactory.getLogger(CheckpointStrategyFactory.class);
/**
* Factory method to create instance of {@link CheckpointStrategy}
* @param context Mantis runtime context
* @param consumer Kafka consumer
* @param strategy checkpoint strategy string
* @param metrics consumer metrics
* @return instance of {@link CheckpointStrategy}
*/
public static CheckpointStrategy<?> getNewInstance(final Context context,
final KafkaConsumer<?, ?> consumer,
final String strategy,
final ConsumerMetrics metrics) {
switch (strategy) {
case CheckpointStrategyOptions.OFFSETS_ONLY_DEFAULT:
final KafkaOffsetCheckpointStrategy cs = new KafkaOffsetCheckpointStrategy(consumer, metrics);
cs.init(context);
return cs;
case CheckpointStrategyOptions.NONE:
default:
return new NoopCheckpointStrategy();
}
}
}
| 8,559 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/CheckpointStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import io.mantisrx.runtime.Context;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.apache.kafka.common.TopicPartition;
public interface CheckpointStrategy<S> {
/**
* initialization when creating the strategy.
*/
void init(Context context);
/**
* initialization when creating the strategy.
*/
void init(Map<String, String> initParams);
/**
* persist checkpoint state by TopicPartition.
*
* @param checkpoint
* @return true on persist success, false otherwise
*/
boolean persistCheckpoint(Map<TopicPartition, S> checkpoint);
/**
* return the persisted checkpoint state for topic-partition (if exists).
*
* @param tp topic-partition
*
* @return CheckpointState if persisted, else empty Optional
*/
Optional<S> loadCheckpoint(TopicPartition tp);
/**
* Bulk API to Load checkpoints.
*
* @param tpList list of TopicPartitions to load checkpointState
* @return
*/
Map<TopicPartition, Optional<S>> loadCheckpoints(List<TopicPartition> tpList);
/**
* Get checkpoint strategy type, one of {@link CheckpointStrategyOptions}
* @return {@link CheckpointStrategyOptions checkpointStrategy} implemented
*/
String type();
}
| 8,560 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/assignor/StaticPartitionAssignorImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.assignor;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Is invoked during initialization of the KafkaSource if Static partitioning ins enabled.
*/
public class StaticPartitionAssignorImpl implements StaticPartitionAssignor {
private static final Logger LOGGER = LoggerFactory.getLogger(StaticPartitionAssignorImpl.class);
/**
* Does a simple round robin assignment of each TopicName-PartitionNumber combination to the list of consumers
* Returns only the assignments for the current consumer.
*
* @param consumerIndex Current workers consumerIndex
* @param topicPartitionCounts Map of topic -> no of partitions
* @param totalNumConsumers Total number of consumers
*
* @return
*/
@Override
public List<TopicPartition> assignPartitionsToConsumer(int consumerIndex, Map<String, Integer> topicPartitionCounts, int totalNumConsumers) {
Objects.requireNonNull(topicPartitionCounts, "TopicPartitionCount Map cannot be null");
if (consumerIndex < 0) {
throw new IllegalArgumentException("Consumer Index cannot be negative " + consumerIndex);
}
if (totalNumConsumers < 0) {
throw new IllegalArgumentException("Total Number of consumers cannot be negative " + totalNumConsumers);
}
if (consumerIndex >= totalNumConsumers) {
throw new IllegalArgumentException("Consumer Index " + consumerIndex + " cannot be greater than or equal to Total Number of consumers " + totalNumConsumers);
}
List<TopicPartition> topicPartitions = new ArrayList<>();
int currConsumer = 0;
for (Map.Entry<String, Integer> topicPartitionCount : topicPartitionCounts.entrySet()) {
final String topic = topicPartitionCount.getKey();
final Integer numPartitions = topicPartitionCount.getValue();
if (numPartitions <= 0) {
LOGGER.warn("Number of partitions is " + numPartitions + " for Topic " + topic + " skipping");
continue;
}
for (int i = 0; i < numPartitions; i++) {
if (currConsumer == totalNumConsumers) {
currConsumer = 0;
}
if (currConsumer == consumerIndex) {
topicPartitions.add(new TopicPartition(topic, i));
}
currConsumer++;
}
}
return topicPartitions;
}
}
| 8,561 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/assignor/StaticPartitionAssignor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.assignor;
import java.util.List;
import java.util.Map;
import org.apache.kafka.common.TopicPartition;
public interface StaticPartitionAssignor {
List<TopicPartition> assignPartitionsToConsumer(int consumerIndex,
Map<String, Integer> topicPartitionCounts,
int totalNumConsumers);
}
| 8,562 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/MapDeserializerBase.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import java.util.Map;
import org.apache.kafka.common.serialization.Deserializer;
public abstract class MapDeserializerBase implements Parser, Deserializer<Map<String, Object>> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public Map<String, Object> deserialize(String topic, byte[] data) {
if (data == null)
return null;
else if (canParse(data))
return parseMessage(data);
else throw new UnsupportedOperationException("Message cannot be deserialized with parser");
}
@Override
public void close() {
}
}
| 8,563 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/Parser.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.Map;
public interface Parser {
/**
* Determine if the payload byte array is parsable.
*
* @param message
*
* @return boolean indicate if payload is parsable
*/
boolean canParse(byte[] message);
/**
* parse a payload byte array into a map.
*
* @param message
*
* @return map
*
* @throws ParseException
*/
Map<String, Object> parseMessage(byte[] message) throws ParseException;
/**
* Returns partial readable payload information, if encoding is not supported fallback to Base64.
*
* @param payload
*
* @return string message
*
* @throws UnsupportedEncodingException
*/
default String getPartialPayLoadForLogging(byte[] payload) {
String msg = new String(payload, StandardCharsets.UTF_8);
return msg.length() <= 128 ? msg : msg.substring(0, 127);
}
}
| 8,564 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/ParseException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
public class ParseException extends RuntimeException {
/**
* genreated id
*/
private static final long serialVersionUID = 7066656417880807188L;
public ParseException(String message) {
super(message);
}
public ParseException(Throwable cause) {
super(cause);
}
public ParseException(String message, Throwable cause) {
super(message, cause);
}
}
| 8,565 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/SimpleJsonDeserializer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SimpleJsonDeserializer extends MapDeserializerBase {
private final static Logger LOGGER = LoggerFactory.getLogger(SimpleJsonDeserializer.class);
private final ObjectMapper jsonMapper = new ObjectMapper();
private final TypeReference<Map<String, Object>> typeRef =
new TypeReference<Map<String, Object>>() {};
@Override
public boolean canParse(byte[] message) {
// no easy way of pre-determine if the json is valid without actually parsing it (unlike chaski format message).
// so we'll always assume the message can be parsed and move onto deserialization phase
return true;
}
@Override
public Map<String, Object> parseMessage(byte[] message) throws ParseException {
Map<String, Object> result;
try {
result = jsonMapper.readValue(message, typeRef);
} catch (Exception ex) {
LOGGER.error("Json parser failed to parse message! PAYLOAD:" + getPartialPayLoadForLogging(message), ex);
throw new ParseException("Json not able to parse raw message", ex);
}
return result;
}
}
| 8,566 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/ParserType.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
/**
* Parser types supported for Kafka message payloads.
*/
public enum ParserType {
SIMPLE_JSON("simplejson", new SimpleJsonDeserializer());
private String propName;
private Parser parser;
ParserType(String propName, Parser parserInstance) {
this.propName = propName;
this.parser = parserInstance;
}
public String getPropName() {
return propName;
}
public Parser getParser() {
return parser;
}
public boolean equalsName(String otherName) {
return (otherName != null) && propName.equals(otherName);
}
@Override
public String toString() {
return this.propName;
}
public static ParserType parser(String parserType) {
if ("simplejson".equals(parserType)) {
return SIMPLE_JSON;
} else {
throw new IllegalArgumentException("Invalid parser type");
}
}
}
| 8,567 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/sink/KafkaSinkJobParameters.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
public class KafkaSinkJobParameters {
public static final String PREFIX = "kafka.sink.producer.";
public static final String TOPIC = PREFIX + "topic";
}
| 8,568 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/sink/KafkaSink.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import com.netflix.spectator.api.Registry;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.sink.SelfDocumentingSink;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Subscription;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
public class KafkaSink<T> implements SelfDocumentingSink<T> {
private static final Logger logger = LoggerFactory.getLogger(KafkaSink.class);
private final Func1<T, byte[]> encoder;
private final Registry registry;
private final AtomicReference<KafkaProducer<byte[], byte[]>> kafkaProducerAtomicRef = new AtomicReference<>(null);
private Subscription subscription;
KafkaSink(Registry registry, Func1<T, byte[]> encoder) {
this.encoder = encoder;
this.registry = registry;
}
@Override
public void call(Context context, PortRequest ignore, Observable<T> dataO) {
if (kafkaProducerAtomicRef.get() == null) {
MantisKafkaProducerConfig mantisKafkaProducerConfig = new MantisKafkaProducerConfig(context);
Map<String, Object> producerProperties = mantisKafkaProducerConfig.getProducerProperties();
KafkaProducer<byte[], byte[]> kafkaProducer = new KafkaProducer<>(producerProperties);
kafkaProducerAtomicRef.compareAndSet(null, kafkaProducer);
logger.info("Kafka Producer initialized");
}
KafkaProducer<byte[], byte[]> kafkaProducer = kafkaProducerAtomicRef.get();
Parameters parameters = context.getParameters();
String topic = (String)parameters.get(KafkaSinkJobParameters.TOPIC);
subscription = dataO.map(encoder::call)
.flatMap((dataBytes) ->
Observable.from(kafkaProducer.send(new ProducerRecord<>(topic, dataBytes)))
.subscribeOn(Schedulers.io()))
.subscribe();
}
@Override
public List<ParameterDefinition<?>> getParameters() {
final List<ParameterDefinition<?>> params = new ArrayList<>();
params.add(new StringParameter()
.name(KafkaSinkJobParameters.TOPIC)
.description("Kafka topic to write to")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.addAll(MantisKafkaProducerConfig.getJobParameterDefinitions());
return params;
}
@Override
public Metadata metadata() {
StringBuilder description = new StringBuilder();
description.append("Writes the output of the job into the configured Kafka topic");
return new Metadata.Builder()
.name("Mantis Kafka Sink")
.description(description.toString())
.build();
}
@Override
public void close() {
subscription.unsubscribe();
}
}
| 8,569 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/sink/MantisKafkaProducerConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MantisKafkaProducerConfig extends ProducerConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaProducerConfig.class);
public static final String DEFAULT_BOOTSTRAP_SERVERS_CONFIG = "localhost:9092";
public static final String DEFAULT_ACKS_CONFIG = "all";
public static final int DEFAULT_RETRIES_CONFIG = 1;
public MantisKafkaProducerConfig(Map<String, Object> props,
Context context) {
super(applyJobParamOverrides(context, props));
}
public MantisKafkaProducerConfig(Context context) {
this(defaultProps(), context);
}
@Override
protected Map<String, Object> postProcessParsedConfig(Map<String, Object> parsedValues) {
return super.postProcessParsedConfig(parsedValues);
}
public static Map<String, Object> defaultProps() {
final Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, DEFAULT_BOOTSTRAP_SERVERS_CONFIG);
props.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, JmxReporter.class.getName());
props.put(ProducerConfig.ACKS_CONFIG, DEFAULT_ACKS_CONFIG);
props.put(ProducerConfig.RETRIES_CONFIG, DEFAULT_RETRIES_CONFIG);
return props;
}
private static Map<String, Object> applyJobParamOverrides(Context context, Map<String, Object> parsedValues) {
final Parameters parameters = context.getParameters();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
Object value = parameters.get(KafkaSinkJobParameters.PREFIX + key, null);
if (value != null) {
LOGGER.info("job param override for key {} -> {}", key, value);
parsedValues.put(key, value);
}
}
final String bootstrapBrokers = (String) parameters.get(KafkaSinkJobParameters.PREFIX + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, defaultProps.get(BOOTSTRAP_SERVERS_CONFIG));
parsedValues.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
final String clientId = (String) parameters.get(KafkaSinkJobParameters.PREFIX + ProducerConfig.CLIENT_ID_CONFIG, context.getJobId());
parsedValues.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
return parsedValues;
}
public Map<String, Object> getProducerProperties() {
return values().entrySet().stream()
.filter(x -> x.getKey() != null && x.getValue() != null)
.collect(Collectors.toMap(x -> x.getKey(),
x -> (Object) x.getValue()));
}
/**
* Helper class to get all Kafka Producer configs as Job Parameters to allow overriding Kafka producer config settings at Job submit time.
*
* @return
*/
public static List<ParameterDefinition<?>> getJobParameterDefinitions() {
List<ParameterDefinition<?>> params = new ArrayList<>();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
ParameterDefinition.Builder<String> builder = new StringParameter()
.name(KafkaSinkJobParameters.PREFIX + key)
.validator(Validators.alwaysPass())
.description(KafkaSinkJobParameters.PREFIX + key);
if (defaultProps.containsKey(key)) {
Object value = defaultProps.get(key);
if (value instanceof Class) {
builder = builder.defaultValue(((Class) value).getCanonicalName());
} else {
builder = builder.defaultValue((String) value);
}
}
params.add(builder.build());
}
return params;
}
}
| 8,570 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/TestWorkerInfo.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import io.mantisrx.common.WorkerPorts;
public class TestWorkerInfo extends WorkerInfo {
public TestWorkerInfo(
String jobName,
String jobId,
int stageNumber,
int workerIndex,
int workerNumber,
MantisJobDurationType durationType,
String host) {
super(
jobName,
jobId,
stageNumber,
workerIndex,
workerNumber,
durationType,
host,
new WorkerPorts(1, 2, 3, 4, 5));
}
}
| 8,571 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/core/MantisStreamImplTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.core;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.graph.ImmutableValueGraph;
import io.mantisrx.shaded.com.google.common.graph.ValueGraphBuilder;
import java.util.Set;
import junit.framework.TestCase;
import org.junit.Test;
public class MantisStreamImplTest extends TestCase {
@Test
public void testGraphApi() {
ImmutableValueGraph.Builder<String, Integer> graphBuilder = ValueGraphBuilder.directed().allowsSelfLoops(true).immutable();
graphBuilder.putEdgeValue("a", "b", 10);
graphBuilder.putEdgeValue("a", "a", 10);
graphBuilder.putEdgeValue("b", "c", 20);
graphBuilder.putEdgeValue("a", "d", 10);
graphBuilder.putEdgeValue("d", "b", 10);
ImmutableValueGraph<String, Integer> graph = graphBuilder.build();
Set<String> anbrs = graph.successors("a");
for (String a : graph.nodes()) {
System.out.printf("node %s, adjnodes %s, nbrs %s, edges %s\n", a, graph.adjacentNodes(a), graph.successors(a), graph.incidentEdges(a));
graph.successors(a).forEach(nbr -> System.out.printf("edge for %s -> %s ::: %s\n", a, nbr, graph.edgeValue(a, nbr)));
}
System.out.println("done!");
}
@Test
public void testTopSort() {
/**
* For the following graph,
* c
* ^
* a^ -> d -> b
* | ^
* + -------- |
*/
ImmutableValueGraph.Builder<String, Integer> graphBuilder = ValueGraphBuilder.directed().allowsSelfLoops(true).immutable();
graphBuilder.putEdgeValue("a", "b", 10);
graphBuilder.putEdgeValue("a", "a", 10);
graphBuilder.putEdgeValue("a", "d", 10);
graphBuilder.putEdgeValue("b", "c", 20);
graphBuilder.putEdgeValue("d", "b", 10);
ImmutableValueGraph<String, Integer> graph = graphBuilder.build();
Iterable<String> nodes = MantisStreamImpl.topSortTraversal(graph);
assertEquals(ImmutableList.of("a", "d", "b", "c"), nodes);
}
}
| 8,572 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/core | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/core/functions/FunctionCombinatorTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.core.functions;
import static org.junit.Assert.assertEquals;
import io.mantisrx.common.MantisGroup;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.computation.GroupToScalarComputation;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.core.WindowSpec;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import org.junit.Test;
import rx.Observable;
public class FunctionCombinatorTest {
@Test
public void testSimple() {
FunctionCombinator<Integer, Integer> fns = new FunctionCombinator<Integer, Void>(false)
.add((MapFunction<Integer, Long>) e -> e + 1L)
.add((FilterFunction<Long>) e -> e % 2 == 0)
.add((MapFunction<Long, Long>) e -> e * e)
.add((MapFunction<Long, String>) e -> e + "1")
.add((MapFunction<String, Integer>) Integer::parseInt);
ScalarComputation<Integer, Integer> scalar = fns.makeScalarStage();
ImmutableList<Integer> items = ImmutableList.of(0, 1, 2, 3, 4, 5, 6);
Observable<Integer> result = scalar.call(new Context(), Observable.from(items));
List<Integer> collect = new ArrayList<>();
result.forEach(collect::add);
assertEquals(ImmutableList.of(41, 161, 361), collect);
}
@Test
public void testKeyedFunction() {
FunctionCombinator<Integer, Integer> fns = new FunctionCombinator<Integer, Void>(true)
.add((MapFunction<Integer, Long>) e -> e + 1L)
.add((FilterFunction<Long>) e -> e % 2 == 0)
.add((MapFunction<Long, Long>) e -> e * e)
.add((MapFunction<Long, String>) e -> e + "1")
.add((MapFunction<String, Integer>) Integer::parseInt);
GroupToScalarComputation<String, Integer, Integer> scalar = fns.makeGroupToScalarStage();
List<Integer> elems = ImmutableList.of(0, 1, 2, 3, 4, 5, 6);
List<MantisGroup<String, Integer>> build = ImmutableList.<MantisGroup<String, Integer>>builder()
.addAll(elems.stream().map(x -> new MantisGroup<>("k1", x)).collect(Collectors.toList()))
.addAll(elems.stream().map(x -> new MantisGroup<>("k2", x + 10)).collect(Collectors.toList()))
.build();
Observable<Integer> result = scalar.call(new Context(), Observable.from(build));
List<Integer> collect = new ArrayList<>();
result.forEach(collect::add);
assertEquals(ImmutableList.of(41, 161, 361, 1441, 1961, 2561), collect);
}
@Test
public void testKeyedWindowWithReduce() {
FunctionCombinator<Integer, Integer> fns = new FunctionCombinator<Integer, Void>(true)
.add((MapFunction<Integer, Long>) e -> e + 1L)
.add((FilterFunction<Long>) e -> e % 2 == 0)
.add((MapFunction<Long, Long>) e -> e * e)
.add((MapFunction<Long, String>) e -> e + "1")
.add((MapFunction<String, Integer>) Integer::parseInt)
.add(new WindowFunction<>(WindowSpec.count(2)))
.add(new ReduceFunction<Integer, Integer>() {
@Override
public Integer initialValue() {
return 0;
}
@Override
public Integer reduce(Integer acc, Integer elem) {
return acc + elem;
}
});
GroupToScalarComputation<String, Integer, Integer> scalar = fns.makeGroupToScalarStage();
List<Integer> elems = ImmutableList.of(0, 1, 2, 3, 4, 5, 6);
List<MantisGroup<String, Integer>> build = ImmutableList.<MantisGroup<String, Integer>>builder()
.addAll(elems.stream().map(x -> new MantisGroup<>("k1", x)).collect(Collectors.toList()))
.addAll(elems.stream().map(x -> new MantisGroup<>("k2", x + 10)).collect(Collectors.toList()))
.build();
Observable<Integer> result = scalar.call(new Context(), Observable.from(build));
List<Integer> collect = new ArrayList<>();
result.forEach(collect::add);
assertEquals(ImmutableList.of(202, 3402, 361, 2561), collect);
}
}
| 8,573 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/RequestProcessor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.QueryStringDecoder;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import mantis.io.reactivex.netty.protocol.http.server.HttpServerRequest;
import mantis.io.reactivex.netty.protocol.http.server.HttpServerResponse;
import mantis.io.reactivex.netty.protocol.http.server.RequestHandler;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Scheduler.Worker;
import rx.Subscriber;
import rx.functions.Action0;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
public class RequestProcessor implements RequestHandler<ByteBuf, ByteBuf> {
public static final List<String> smallStreamContent;
public static final List<String> largeStreamContent;
public static final String SINGLE_ENTITY_RESPONSE = "Hello world";
static {
List<String> smallStreamListLocal = new ArrayList<String>();
for (int i = 0; i < 3; i++) {
smallStreamListLocal.add("line " + i);
}
smallStreamContent = Collections.unmodifiableList(smallStreamListLocal);
List<String> largeStreamListLocal = new ArrayList<String>();
for (int i = 0; i < 1000; i++) {
largeStreamListLocal.add("line " + i);
}
largeStreamContent = Collections.unmodifiableList(largeStreamListLocal);
}
private static Observable<Void> sendStreamingResponse(HttpServerResponse<ByteBuf> response, List<String> data) {
response.getHeaders().add(HttpHeaders.Names.CONTENT_TYPE, "text/event-stream");
response.getHeaders().add(HttpHeaders.Names.TRANSFER_ENCODING, "chunked");
for (String line : data) {
byte[] contentBytes = ("data:" + line + "\n\n").getBytes();
response.writeBytes(contentBytes);
}
return response.flush();
}
public Observable<Void> handleSingleEntity(HttpServerResponse<ByteBuf> response) {
byte[] responseBytes = SINGLE_ENTITY_RESPONSE.getBytes();
return response.writeBytesAndFlush(responseBytes);
}
public Observable<Void> handleStreamWithoutChunking(HttpServerResponse<ByteBuf> response) {
response.getHeaders().add(HttpHeaders.Names.CONTENT_TYPE, "text/event-stream");
for (String contentPart : smallStreamContent) {
response.writeString("data:");
response.writeString(contentPart);
response.writeString("\n\n");
}
return response.flush();
}
public Observable<Void> handleStream(HttpServerResponse<ByteBuf> response) {
return sendStreamingResponse(response, smallStreamContent);
}
public Observable<Void> handleLargeStream(HttpServerResponse<ByteBuf> response) {
return sendStreamingResponse(response, largeStreamContent);
}
public Observable<Void> simulateTimeout(HttpServerRequest<ByteBuf> httpRequest, HttpServerResponse<ByteBuf> response) {
String uri = httpRequest.getUri();
QueryStringDecoder decoder = new QueryStringDecoder(uri);
List<String> timeout = decoder.parameters().get("timeout");
byte[] contentBytes;
HttpResponseStatus status = HttpResponseStatus.NO_CONTENT;
if (null != timeout && !timeout.isEmpty()) {
try {
Thread.sleep(Integer.parseInt(timeout.get(0)));
contentBytes = "".getBytes();
} catch (Exception e) {
contentBytes = e.getMessage().getBytes();
status = HttpResponseStatus.INTERNAL_SERVER_ERROR;
}
} else {
status = HttpResponseStatus.BAD_REQUEST;
contentBytes = "Please provide a timeout parameter.".getBytes();
}
response.setStatus(status);
return response.writeBytesAndFlush(contentBytes);
}
public Observable<Void> handlePost(final HttpServerRequest<ByteBuf> request, final HttpServerResponse<ByteBuf> response) {
return request.getContent().flatMap(new Func1<ByteBuf, Observable<Void>>() {
@Override
public Observable<Void> call(ByteBuf t1) {
String content = t1.toString(Charset.defaultCharset());
response.getHeaders().add(HttpHeaders.Names.CONTENT_TYPE, "text/event-stream");
response.getHeaders().add(HttpHeaders.Names.TRANSFER_ENCODING, "chunked");
return response.writeBytesAndFlush(("data: " + content + "\n\n").getBytes());
}
}
);
}
public Observable<Void> handleCloseConnection(final HttpServerResponse<ByteBuf> response) {
response.getHeaders().add("Connection", "close");
byte[] responseBytes = SINGLE_ENTITY_RESPONSE.getBytes();
return response.writeBytesAndFlush(responseBytes);
}
public Observable<Void> handleKeepAliveTimeout(final HttpServerResponse<ByteBuf> response) {
response.getHeaders().add("Keep-Alive", "timeout=1");
byte[] responseBytes = SINGLE_ENTITY_RESPONSE.getBytes();
return response.writeBytesAndFlush(responseBytes);
}
public Observable<Void> redirectGet(HttpServerRequest<ByteBuf> request, final HttpServerResponse<ByteBuf> response) {
response.getHeaders().add("Location", "http://localhost:" + request.getQueryParameters().get("port").get(0) + "/test/singleEntity");
response.setStatus(HttpResponseStatus.MOVED_PERMANENTLY);
return response.writeAndFlush(Unpooled.EMPTY_BUFFER);
}
public Observable<Void> redirectPost(HttpServerRequest<ByteBuf> request, final HttpServerResponse<ByteBuf> response) {
response.getHeaders().add("Location", "http://localhost:" + request.getQueryParameters().get("port").get(0) + "/test/post");
response.setStatus(HttpResponseStatus.MOVED_PERMANENTLY);
return response.writeAndFlush(Unpooled.EMPTY_BUFFER);
}
public Observable<Void> sendInfiniteStream(final HttpServerResponse<ByteBuf> response) {
response.getHeaders().add(HttpHeaders.Names.CONTENT_TYPE, "text/event-stream");
response.getHeaders().add(HttpHeaders.Names.TRANSFER_ENCODING, "chunked");
return Observable.create(new OnSubscribe<Void>() {
final AtomicLong counter = new AtomicLong();
Worker worker = Schedulers.computation().createWorker();
public void call(Subscriber<? super Void> subscriber) {
worker.schedulePeriodically(
new Action0() {
@Override
public void call() {
System.out.println("In infinte stream");
byte[] contentBytes = ("data:" + "line " + counter.getAndIncrement() + "\n\n").getBytes();
response.writeBytes(contentBytes);
response.flush();
}
},
0,
100,
TimeUnit.MILLISECONDS
);
}
});
}
private Observable<Void> sendFiniteStream(final HttpServerRequest<ByteBuf> request, final HttpServerResponse<ByteBuf> response) {
String uri = request.getUri();
QueryStringDecoder decoder = new QueryStringDecoder(uri);
List<String> maxCounts = decoder.parameters().get("count");
if (null != maxCounts && !maxCounts.isEmpty()) {
final int maxCount = Integer.parseInt(maxCounts.get(0));
response.getHeaders().add(HttpHeaders.Names.CONTENT_TYPE, "text/event-stream");
response.getHeaders().add(HttpHeaders.Names.TRANSFER_ENCODING, "chunked");
return Observable.create(new OnSubscribe<Void>() {
final AtomicLong counter = new AtomicLong();
Worker worker = Schedulers.computation().createWorker();
public void call(final Subscriber<? super Void> subscriber) {
worker.schedule(
new Action0() {
@Override
public void call() {
byte[] contentBytes = ("data:" + "line " + counter.getAndIncrement() + "\n\n").getBytes();
response.writeBytes(contentBytes);
response.flush();
if (counter.get() < maxCount) {
worker.schedule(this, 10, TimeUnit.MILLISECONDS);
} else {
subscriber.unsubscribe();
}
}
},
100,
TimeUnit.MILLISECONDS
);
}
});
} else {
HttpResponseStatus status = HttpResponseStatus.BAD_REQUEST;
byte[] contentBytes = "Please provide a 'count' parameter to specify how many events to emit before causing an error.".getBytes();
response.setStatus(status);
return response.writeBytesAndFlush(contentBytes);
}
}
@Override
public Observable<Void> handle(HttpServerRequest<ByteBuf> request, HttpServerResponse<ByteBuf> response) {
String uri = request.getUri();
if (uri.contains("test/singleEntity")) {
// in case of redirect, uri starts with /test/singleEntity
return handleSingleEntity(response);
} else if (uri.startsWith("test/stream")) {
return handleStream(response);
} else if (uri.startsWith("test/nochunk_stream")) {
return handleStreamWithoutChunking(response);
} else if (uri.startsWith("test/largeStream")) {
return handleLargeStream(response);
} else if (uri.startsWith("test/timeout")) {
return simulateTimeout(request, response);
} else if (uri.startsWith("test/postContent") && request.getHttpMethod().equals(HttpMethod.POST)) {
// String content = request.getContent().toBlocking().first().toString(Charset.defaultCharset());
// if(content == null || ! content.equals("test/postContent")) {
// throw new IllegalArgumentException("the posted content should be same as the URI: "+uri);
// }
// return handleStream(response);
return handlePost(request, response);
} else if (uri.startsWith("test/postStream") && request.getHttpMethod().equals(HttpMethod.POST)) {
return handleStream(response);
} else if (uri.contains("test/post")) {
return handlePost(request, response);
} else if (uri.startsWith("test/closeConnection")) {
return handleCloseConnection(response);
} else if (uri.startsWith("test/keepAliveTimeout")) {
return handleKeepAliveTimeout(response);
} else if (uri.startsWith("test/redirect") && request.getHttpMethod().equals(HttpMethod.GET)) {
return redirectGet(request, response);
} else if (uri.startsWith("test/redirectPost") && request.getHttpMethod().equals(HttpMethod.POST)) {
return redirectPost(request, response);
} else if (uri.startsWith("test/infStream")) {
return sendInfiniteStream(response);
} else if (uri.startsWith("test/finiteStream")) {
return sendFiniteStream(request, response);
} else {
response.setStatus(HttpResponseStatus.NOT_FOUND);
return response.flush();
}
}
}
| 8,574 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/HttpSourceTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.*;
import static org.junit.Assert.*;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.*;
import io.mantisrx.runtime.Metadata.Builder;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.sink.ServerSentEventsSink;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import io.mantisrx.runtime.source.http.impl.HttpClientFactories;
import io.mantisrx.runtime.source.http.impl.HttpRequestFactories;
import io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import mantis.io.reactivex.netty.client.RxClient.ServerInfo;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.junit.*;
import rx.Observable;
import rx.Subscription;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
public class HttpSourceTest {
private final static int SEED_PORT = 4000;
private final static int PORT_RANGE = 1000;
private static LocalServerProvider localServerProvider;
private static EventType[] expectedEvents = new EventType[] {CONNECTION_ATTEMPTED, SUBSCRIPTION_ESTABLISHED, CONNECTION_UNSUBSCRIBED, CONNECTION_ESTABLISHED, SERVER_FOUND, SOURCE_COMPLETED, SUBSCRIPTION_ENDED};
private static Set<EventType> EXPECTED_EVENTS_SETS = new HashSet<>(Arrays.asList(expectedEvents));
// Just make sure the unused port is out side the range of possible ports: [SEED_PORT, SEED_PORT + PORT_RANGE)
static {
Logger rootLogger = Logger.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
rootLogger.addAppender(new ConsoleAppender(
new PatternLayout("%-6r [%p] %c - %m%n")));
}
private TestSourceObserver sourceObserver = new TestSourceObserver();
@BeforeClass
public static void init() {
int portStart = new Random().nextInt(PORT_RANGE) + SEED_PORT;
localServerProvider = new LocalServerProvider();
localServerProvider.start(3, portStart);
}
@AfterClass
public static void shutdown() throws Exception {
localServerProvider.shutDown();
}
@Before
public void setup() {
sourceObserver = new TestSourceObserver();
}
@Test
public void canStreamFromMultipleServers() throws Exception {
HttpSource<ServerSentEvent, ServerSentEvent> source = HttpSources
.source(
HttpClientFactories.sseClientFactory(),
HttpRequestFactories.createGetFactory("test/stream"))
.withServerProvider(localServerProvider)
.withActivityObserver(sourceObserver)
.build();
sourcingStream(source);
}
private void sourceEchoStreamFromPost(HttpSource<ServerSentEvent, ServerSentEvent> source, String postContent) throws Exception {
final AtomicInteger counter = new AtomicInteger();
final CountDownLatch done = new CountDownLatch(1);
final ConcurrentHashMap<String, AtomicInteger> result = new ConcurrentHashMap<>();
Observable.merge(source.call(new Context(), new Index(1, 1)))
.doOnNext(new Action1<ServerSentEvent>() {
@Override
public void call(ServerSentEvent event) {
counter.incrementAndGet();
String msg = event.contentAsString();
result.putIfAbsent(msg, new AtomicInteger());
result.get(msg).incrementAndGet();
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("Unexpected failure: " + throwable);
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
done.countDown();
}
})
.subscribe();
long waitSeconds = 30000;
boolean timedout = !done.await(waitSeconds, TimeUnit.SECONDS);
if (timedout) {
fail(String.format("Waited at least %d seconds for the test to finish. Something is wrong", waitSeconds));
}
Assert.assertEquals(
String.format("%d servers => the result has %d times of a single echo",
localServerProvider.serverSize(),
localServerProvider.serverSize()),
localServerProvider.serverSize(),
counter.get());
assertEquals(String.format("%d servers => %d identical copies per message",
localServerProvider.serverSize(),
localServerProvider.serverSize()),
localServerProvider.serverSize(),
result.get(postContent).get());
for (ServerInfo server : localServerProvider.getServerInfos()) {
assertEquals("There should be one completion per server", 1, sourceObserver.getCount(server, EventType.SOURCE_COMPLETED));
assertEquals("There should be one un-subscription per server", 1, sourceObserver.getCount(server, EventType.CONNECTION_UNSUBSCRIBED));
assertEquals("There should be no error", 0, sourceObserver.getCount(server, EventType.SUBSCRIPTION_FAILED));
assertEquals("There should be one connection per server", 1, sourceObserver.getCount(server, EventType.CONNECTION_ESTABLISHED));
}
assertEquals(1, sourceObserver.getCompletionCount());
assertEquals(0, sourceObserver.getErrorCount());
Set<EventType> events = sourceObserver.getEvents();
assertEquals(EXPECTED_EVENTS_SETS, events);
for (EventType event : events) {
assertEquals("Each event should be recorded exactly once per server", localServerProvider.serverSize(), sourceObserver.getEventCount(event));
}
}
private void sourcingStream(HttpSource<ServerSentEvent, ServerSentEvent> source) throws InterruptedException {
final AtomicInteger counter = new AtomicInteger();
final CountDownLatch done = new CountDownLatch(1);
final ConcurrentHashMap<String, AtomicInteger> result = new ConcurrentHashMap<>();
Observable.merge(source.call(new Context(), new Index(1, 1)))
.doOnNext(new Action1<ServerSentEvent>() {
@Override
public void call(ServerSentEvent event) {
counter.incrementAndGet();
String msg = event.contentAsString();
result.putIfAbsent(msg, new AtomicInteger());
result.get(msg).incrementAndGet();
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("Unexpected failure: " + throwable);
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
done.countDown();
}
})
.subscribe();
long waitSeconds = 30000;
boolean timedout = !done.await(waitSeconds, TimeUnit.SECONDS);
if (timedout) {
fail(String.format("Waited at least %d seconds for the test to finish. Something is wrong", waitSeconds));
}
Assert.assertEquals(
String.format("%d servers => the result has %d times of a single stream",
localServerProvider.serverSize(),
localServerProvider.serverSize()),
counter.get(),
RequestProcessor.smallStreamContent.size() * localServerProvider.serverSize());
for (String data : RequestProcessor.smallStreamContent) {
assertEquals(String.format("%d servers => %d identical copies per message", localServerProvider.serverSize(), localServerProvider.serverSize()), localServerProvider.serverSize(), result.get(data).get());
}
for (ServerInfo server : localServerProvider.getServerInfos()) {
assertEquals("There should be one completion per server", 1, sourceObserver.getCount(server, EventType.SOURCE_COMPLETED));
assertEquals("There should be one un-subscription per server", 1, sourceObserver.getCount(server, EventType.CONNECTION_UNSUBSCRIBED));
assertEquals("There should be no error", 0, sourceObserver.getCount(server, EventType.SUBSCRIPTION_FAILED));
assertEquals("There should be one connection per server", 1, sourceObserver.getCount(server, EventType.CONNECTION_ESTABLISHED));
}
assertEquals(1, sourceObserver.getCompletionCount());
assertEquals(0, sourceObserver.getErrorCount());
Set<EventType> events = sourceObserver.getEvents();
assertEquals(EXPECTED_EVENTS_SETS, events);
for (EventType event : events) {
assertEquals("Each event should be recorded exactly once per server", localServerProvider.serverSize(), sourceObserver.getEventCount(event));
}
}
@Test
public void canStreamFromPostingToMultipleServers() throws Exception {
HttpSource<ServerSentEvent, ServerSentEvent> source = HttpSources
.source(
HttpClientFactories.sseClientFactory(),
HttpRequestFactories.createPostFactory("test/postStream"))
.withServerProvider(localServerProvider)
.withActivityObserver(sourceObserver)
.build();
sourcingStream(source);
}
@Test
public void canStreamFromPostingWithContentToMultipleServers() throws Exception {
String postContent = "test/postcontent";
HttpSource<ServerSentEvent, ServerSentEvent> source = HttpSources
.source(
HttpClientFactories.sseClientFactory(),
HttpRequestFactories.createPostFactory("test/postContent", postContent.getBytes()))
.withServerProvider(localServerProvider)
.withActivityObserver(sourceObserver)
.build();
sourceEchoStreamFromPost(source, postContent);
}
@Test
public void pollingSourceWillWork() throws Exception {
ServerInfo server = localServerProvider.getServerInfos().get(0);
HttpSource<ByteBuf, String> source = HttpSources
.pollingSource(server.getHost(), server.getPort(), "test/singleEntity")
.withActivityObserver(sourceObserver)
.build();
final AtomicInteger counter = new AtomicInteger();
final int maxRepeat = 10;
final CountDownLatch done = new CountDownLatch(maxRepeat);
final ConcurrentHashMap<String, AtomicInteger> result = new ConcurrentHashMap<>();
Subscription subscription =
Observable.merge(source.call(new Context(), new Index(1, 1)))
.doOnNext(new Action1<String>() {
@Override
public void call(String content) {
assertEquals(RequestProcessor.SINGLE_ENTITY_RESPONSE, content);
counter.incrementAndGet();
result.putIfAbsent(content, new AtomicInteger());
result.get(content).incrementAndGet();
done.countDown();
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("Unexpected failure: " + throwable);
}
})
.subscribe();
long waitSeconds = 3;
boolean timedout = !done.await(waitSeconds, TimeUnit.SECONDS);
if (timedout) {
fail(String.format("Waited at least %d seconds for the test to finish. Something is wrong", waitSeconds));
}
Assert.assertEquals(String.format("%d servers => the result has %d times of a single stream", localServerProvider.serverSize(), localServerProvider.serverSize()), counter.get(), maxRepeat);
assertTrue(
String.format("There should be at least %d completions after %d repeats (The last one may not have completion. Actual completion count: %d",
maxRepeat - 1,
maxRepeat,
sourceObserver.getCount(server, EventType.SOURCE_COMPLETED)),
maxRepeat - 1 <= sourceObserver.getCount(server, EventType.SOURCE_COMPLETED));
assertEquals("There should be no error", 0, sourceObserver.getCount(server, EventType.SUBSCRIPTION_FAILED));
assertEquals("There should be " + maxRepeat + " connection establishment in total", maxRepeat, sourceObserver.getCount(server, EventType.CONNECTION_ESTABLISHED));
assertEquals("There should no final completion", 0, sourceObserver.getCompletionCount());
assertEquals(0, sourceObserver.getErrorCount());
Set<EventType> events = sourceObserver.getEvents();
assertTrue("Polling Source always has subscriptions, so there won't be subscription_ended event. But other events should all be there", EXPECTED_EVENTS_SETS.containsAll(events));
}
@Test
public void testResubscribeShouldAlwaysWork() throws Exception {
HttpSource<ServerSentEvent, ServerSentEvent> source = HttpSources
.source(
HttpClientFactories.sseClientFactory(),
HttpRequestFactories.createGetFactory("test/stream"))
.withServerProvider(localServerProvider)
.withActivityObserver(sourceObserver)
.build();
int totalCount = 5;
final CountDownLatch latch = new CountDownLatch(totalCount);
Observable<ServerSentEvent> stream = Observable.merge(source.call(new Context(), new Index(1, 1)));
Subscription sub = stream.subscribe(new Action1<ServerSentEvent>() {
@Override
public void call(ServerSentEvent event) {
latch.countDown();
}
});
long waitSeconds = 10;
boolean countedDown = latch.await(waitSeconds, TimeUnit.SECONDS);
if (!countedDown) {
fail(String.format("Waited too long to receive %d events within %d seconds. Total counted: %d", totalCount, waitSeconds, latch.getCount()));
}
sub.unsubscribe();
final CountDownLatch newLatch = new CountDownLatch(totalCount);
sub = stream.subscribe(new Action1<ServerSentEvent>() {
@Override
public void call(ServerSentEvent event) {
newLatch.countDown();
}
});
countedDown = newLatch.await(5, TimeUnit.SECONDS);
if (!countedDown) {
fail("Waited too long to receive enough events. Counted: " + latch.getCount());
}
sub.unsubscribe();
}
/**
* TODO: To test the resubscription, we need to run curl http://localhost:port. When we see stream of lines,
* terminate the curl connection, and then rerun curl. The second curl command should get stuck. This
* test is not automated yet because running an HTTP client (including a raw URLConnection) in the test
* can't reproduce the test for some reason.
*
* @throws Exception
*/
@Test
@Ignore(value = "This is meant to be run as an integration test. " +
"Also has test exclusion set for this package")
public void testWithJobExecutionWillWorkForResubscription() throws Exception {
final HttpSource<ServerSentEvent, ServerSentEvent> source = HttpSources
.source(
HttpClientFactories.sseClientFactory(),
HttpRequestFactories.createGetFactory("test/infStream"))
.withServerProvider(localServerProvider)
.withActivityObserver(sourceObserver)
.build();
final ServerSentEventsSink<String> sink = new ServerSentEventsSink<>(
new Func1<String, String>() {
@Override
public String call(String o) {
return o;
}
});
LocalJobExecutorNetworked.execute(new HttpEchoJob(source, sink).getJobInstance());
}
@Test
@Ignore(value = "This is meant to be run as an integration test. " +
"Also has test exclusion set for this package")
public void testDummySource() throws Exception {
Source<String> dummySource = new Source<String>() {
@Override
public Observable<Observable<String>> call(Context context, Index index) {
return Observable.just(Observable.interval(1, TimeUnit.SECONDS)
.map(new Func1<Long, String>() {
@Override
public String call(Long aLong) {
return aLong.toString();
}
}));
}
@Override
public void close() throws IOException {
}
};
LocalJobExecutorNetworked.execute(new DummyEchoJob(dummySource).getJobInstance());
}
public static class DummyEchoJob extends MantisJobProvider<String> {
private final Source<String> source;
public DummyEchoJob(Source<String> source) {
this.source = source;
}
@Override
public Job<String> getJobInstance() {
return MantisJob
.source(source)
.stage(
new ScalarComputation<String, String>() {
@Override
public Observable<String> call(Context context, Observable<String> stream) {
return stream.map(new Func1<String, String>() {
@Override
public String call(String event) {
return "echoed: " + event;
}
});
}
},
new ScalarToScalar.Config<String, String>()
.codec(Codecs.string())
.description("Just a test config"))
.sink(new ServerSentEventsSink<>(
new Func1<String, String>() {
@Override
public String call(String o) {
return o;
}
}))
.metadata(new Builder()
.description("Counts frequency of words as they are observed.")
.build())
.create();
}
}
public static class HttpEchoJob extends MantisJobProvider<String> {
private final HttpSource<ServerSentEvent, ServerSentEvent> source;
private final ServerSentEventsSink<String> sink;
public HttpEchoJob(HttpSource<ServerSentEvent, ServerSentEvent> source, ServerSentEventsSink<String> sink) {
this.source = source;
this.sink = sink;
}
public int getSinkPort() {
return sink.getServerPort();
}
@Override
public Job<String> getJobInstance() {
return MantisJob
.source(source)
.stage(
new ScalarComputation<ServerSentEvent, String>() {
@Override
public Observable<String> call(Context context, Observable<ServerSentEvent> stream) {
return stream.map(new Func1<ServerSentEvent, String>() {
@Override
public String call(ServerSentEvent event) {
return event.contentAsString();
}
});
}
},
new ScalarToScalar.Config<ServerSentEvent, String>()
.codec(Codecs.string())
.description("Just a test config")
)
.sink(sink)
.metadata(new Builder()
.description("Counts frequency of words as they are observed.")
.build())
.create();
}
}
}
| 8,575 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/ContextualHttpSourceTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.CONNECTION_ATTEMPTED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.CONNECTION_ESTABLISHED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.CONNECTION_UNSUBSCRIBED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.SERVER_FOUND;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.SOURCE_COMPLETED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.SUBSCRIPTION_ENDED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.SUBSCRIPTION_ESTABLISHED;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.http.impl.HttpClientFactories;
import io.mantisrx.runtime.source.http.impl.HttpRequestFactories;
import io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType;
import io.mantisrx.runtime.source.http.impl.ServerContext;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import mantis.io.reactivex.netty.client.RxClient.ServerInfo;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import rx.Observable;
import rx.functions.Action0;
import rx.functions.Action1;
public class ContextualHttpSourceTest {
private final static int SEED_PORT = 4000;
private final static int PORT_RANGE = 1000;
private static LocalServerProvider localServerProvider;
private static EventType[] expectedEvents = new EventType[] {CONNECTION_ATTEMPTED, SUBSCRIPTION_ESTABLISHED, CONNECTION_UNSUBSCRIBED, CONNECTION_ESTABLISHED, SERVER_FOUND, SOURCE_COMPLETED, SUBSCRIPTION_ENDED};
private static Set<EventType> EXPECTED_EVENTS_SETS = new HashSet<>(Arrays.asList(expectedEvents));
private TestSourceObserver sourceObserver = new TestSourceObserver();
// Just make sure the unused port is out side the range of possible ports: [SEED_PORT, SEED_PORT + PORT_RANGE)
@BeforeClass
public static void init() {
int portStart = new Random().nextInt(PORT_RANGE) + SEED_PORT;
localServerProvider = new LocalServerProvider();
localServerProvider.start(3, portStart);
}
@AfterClass
public static void shutdown() throws Exception {
localServerProvider.shutDown();
}
@Before
public void setup() {
sourceObserver = new TestSourceObserver();
}
@Test
public void canStreamFromMultipleServersWithCorrectContext() throws Exception {
ContextualHttpSource<ServerSentEvent> source = HttpSources
.contextualSource(
HttpClientFactories.sseClientFactory(),
HttpRequestFactories.createGetFactory("test/stream"))
.withServerProvider(localServerProvider)
.withActivityObserver(sourceObserver)
.build();
final AtomicInteger counter = new AtomicInteger();
final CountDownLatch done = new CountDownLatch(1);
final ConcurrentHashMap<String, AtomicInteger> result = new ConcurrentHashMap<>();
final CopyOnWriteArraySet<ServerInfo> connectedServers = new CopyOnWriteArraySet<>();
Observable.merge(source.call(new Context(), new Index(1, 1)))
.doOnNext(new Action1<ServerContext<ServerSentEvent>>() {
@Override
public void call(ServerContext<ServerSentEvent> pair) {
assertTrue(pair.getValue().contentAsString().contains("line"));
counter.incrementAndGet();
String msg = pair.getValue().contentAsString();
result.putIfAbsent(msg, new AtomicInteger());
result.get(msg).incrementAndGet();
connectedServers.add(pair.getServer());
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("Unexpected failure: " + throwable);
}
})
.doOnCompleted(new Action0() {
@Override
public void call() {
System.out.println("completed");
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
done.countDown();
}
})
.subscribe();
long waitSeconds = 3;
boolean timedout = !done.await(waitSeconds, TimeUnit.SECONDS);
if (timedout) {
fail(String.format("Waited at least %d seconds for the test to finish. Something is wrong", waitSeconds));
}
assertEquals("There should be as many as provided servers", localServerProvider.serverSize(), connectedServers.size());
Assert.assertEquals(String.format("%d servers => the result has %d times of a single stream", localServerProvider.serverSize(), localServerProvider.serverSize()), counter.get(), RequestProcessor.smallStreamContent.size() * localServerProvider.serverSize());
for (String data : RequestProcessor.smallStreamContent) {
assertEquals(String.format("%d servers => %d identical copies per message", localServerProvider.serverSize(), localServerProvider.serverSize()), localServerProvider.serverSize(), result.get(data).get());
}
for (ServerInfo server : localServerProvider.getServerInfos()) {
assertEquals("There should be one completion per server", 1, sourceObserver.getCount(server, EventType.SOURCE_COMPLETED));
assertEquals("There should be one un-subscription per server", 1, sourceObserver.getCount(server, EventType.CONNECTION_UNSUBSCRIBED));
assertEquals("There should be no error", 0, sourceObserver.getCount(server, EventType.SUBSCRIPTION_FAILED));
assertEquals("There should be one connection per server", 1, sourceObserver.getCount(server, EventType.CONNECTION_ESTABLISHED));
}
assertEquals("There should be one completions", 1, sourceObserver.getCompletionCount());
assertEquals(0, sourceObserver.getErrorCount());
Set<EventType> events = sourceObserver.getEvents();
assertEquals(EXPECTED_EVENTS_SETS, events);
for (EventType event : events) {
assertEquals("Each event should be recorded exactly once per server", localServerProvider.serverSize(), sourceObserver.getEventCount(event));
}
}
}
| 8,576 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/LocalServerProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http;
import static org.junit.Assert.fail;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.ByteBuf;
import io.netty.channel.nio.NioEventLoopGroup;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import mantis.io.reactivex.netty.client.RxClient.ServerInfo;
import mantis.io.reactivex.netty.protocol.http.server.HttpServer;
import mantis.io.reactivex.netty.protocol.http.server.HttpServerBuilder;
import mantis.io.reactivex.netty.server.RxServerThreadFactory;
import rx.Observable;
import rx.functions.Func1;
import rx.subjects.PublishSubject;
public class LocalServerProvider implements HttpServerProvider {
private final List<Server> servers = new ArrayList<>();
private final ConcurrentMap<String, AtomicInteger> subscriptionCount = new ConcurrentHashMap<>();
private final PublishSubject<ServerInfo> serversToRemove = PublishSubject.create();
public LocalServerProvider() {
}
private List<Server> launchServers(int serverCount, int portStart) {
List<Server> servers = new ArrayList<>();
int maxRange = 1000;
int port = portStart;
Exception lastException = new Exception("InitialException");
for (int i = 0; i < serverCount; ++i) {
int count = 0;
while (count < maxRange) {
try {
HttpServerBuilder<ByteBuf, ByteBuf> builder = new HttpServerBuilder<>(
new ServerBootstrap().group(new NioEventLoopGroup(10, new RxServerThreadFactory())),
port,
new RequestProcessor());
HttpServer<ByteBuf, ByteBuf> server = builder.build();
server.start();
servers.add(new Server("localhost", port, server));
port += 1;
break;
} catch (Exception e) {
lastException = e;
}
}
if (count >= maxRange) {
fail(String.format("Can't obtain %d ports ranging from %d to %d. Last exception: %s",
serverCount,
portStart,
port,
lastException.getMessage()));
}
}
return servers;
}
@Override
public Observable<ServerInfo> getServersToAdd() {
return Observable.from(servers)
.map(new Func1<Server, ServerInfo>() {
@Override
public ServerInfo call(Server pair) {
return new ServerInfo(pair.getHost(), pair.getPort());
}
});
}
public void removeServer(ServerInfo server) {
serversToRemove.onNext(server);
}
@Override
public Observable<ServerInfo> getServersToRemove() {
return serversToRemove;
}
public int getSubscriptionCount(Server server) {
return subscriptionCount.get(server.getKey()).get();
}
public void shutDown() throws Exception {
for (Server server : servers) {
server.getServer().shutdown();
}
}
public void start(int serverCount, int portStart) {
this.servers.addAll(launchServers(3, portStart));
}
public List<Server> getServers() {
return this.servers;
}
public List<ServerInfo> getServerInfos() {
return Observable.from(this.servers).map(new Func1<Server, ServerInfo>() {
@Override
public ServerInfo call(Server server) {
return new ServerInfo(server.getHost(), server.getPort());
}
}).toList().toBlocking().first();
}
public int serverSize() {
return getServers().size();
}
public static class Server {
private final String host;
private final int port;
private final HttpServer<ByteBuf, ByteBuf> server;
public Server(String host, int port, HttpServer<ByteBuf, ByteBuf> server) {
this.host = host;
this.port = port;
this.server = server;
}
private static String getKey(String host, int port) {
return String.format("%s:%d", host, port);
}
public static String getKey(ServerInfo server) {
return getKey(server.getHost(), server.getPort());
}
public String getHost() {
return host;
}
public int getPort() {
return port;
}
public HttpServer<ByteBuf, ByteBuf> getServer() {
return server;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Server server = (Server) o;
if (port != server.port) return false;
if (!host.equals(server.host)) return false;
return true;
}
@Override
public int hashCode() {
int result = host.hashCode();
result = 31 * result + port;
return result;
}
public String getKey() {
return getKey(getHost(), getPort());
}
}
}
| 8,577 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/ClientResumePoliciesTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent;
import io.mantisrx.runtime.source.http.impl.ServerClientContext;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import mantis.io.reactivex.netty.client.RxClient.ServerInfo;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import org.junit.Before;
import org.junit.Test;
import rx.Observable;
import rx.Observer;
import rx.Subscriber;
import rx.functions.Func0;
public class ClientResumePoliciesTest {
public static final String RESPONSE_CONTENT = "value";
private Observer<HttpSourceEvent> observer;
private HttpClient<String, String> client;
private HttpRequestFactory<String> factory;
private HttpClientResponse<String> response;
private HttpClientRequest<String> request;
@SuppressWarnings("unchecked")
@Before
public void setup() {
observer = new TestSourceObserver();
client = mock(HttpClient.class);
factory = mock(HttpRequestFactory.class);
response = mock(HttpClientResponse.class);
request = mock(HttpClientRequest.class);
when(factory.create()).thenReturn(request);
when(response.getContent()).thenReturn(Observable.just(RESPONSE_CONTENT));
when(client.submit(any(HttpClientRequest.class))).thenReturn(Observable.just(response));
}
@Test
public void testMaxRepeatOnCompletionAndError() throws Exception {
int max = 10;
ClientResumePolicy<String, String> policy = ClientResumePolicies.maxRepeat(max);
ServerClientContext<String, String> context = new ServerClientContext<>(new ServerInfo("localhost", 1000), client, factory, observer);
for (int i = 0; i < 20; ++i) {
Observable<HttpClientResponse<String>> resumedOnCompleted = policy.onCompleted(context, i);
Observable<HttpClientResponse<String>> resumedOnError = policy.onError(context, i, new Throwable("error"));
if (i <= max) {
assertNotNull(resumedOnCompleted);
assertEquals(RESPONSE_CONTENT, resumedOnCompleted.toBlocking().first().getContent().toBlocking().first());
assertNotNull(resumedOnError);
assertEquals(RESPONSE_CONTENT, resumedOnError.toBlocking().first().getContent().toBlocking().first());
} else {
assertNull("The resumed on completion should be null as max repeat is passed", resumedOnCompleted);
assertNull("The resumed on error should be null as max repeat is passed", resumedOnError);
}
}
}
@Test
public void testMaxCombinator() throws Exception {
final AtomicLong start = new AtomicLong();
final AtomicLong end = new AtomicLong();
final long delay = 100;
final int repeat = 20;
final CountDownLatch done = new CountDownLatch(repeat);
ClientResumePolicy<String, String> delayedPolicy = ClientResumePolicies.delayed(new Func0<Long>() {
@Override
public Long call() {
return delay;
}
}, TimeUnit.MILLISECONDS);
ClientResumePolicy<String, String> policy = ClientResumePolicies.maxRepeat(delayedPolicy, repeat);
ServerClientContext<String, String> context = new ServerClientContext<>(new ServerInfo("localhost", 1000), client, factory, observer);
start.set(System.currentTimeMillis());
end.set(0);
for (int i = 0; i < repeat; ++i) {
Observable<HttpClientResponse<String>> resumedOnCompleted = policy.onCompleted(context, i);
Observable<HttpClientResponse<String>> resumedOnError = policy.onError(context, i, new Throwable("error"));
if (i <= repeat) {
assertNotNull(resumedOnCompleted);
assertNotNull(resumedOnError);
} else {
assertNull("The resumed on completion should be null as max repeat is passed", resumedOnCompleted);
assertNull("The resumed on error should be null as max repeat is passed", resumedOnError);
}
resumedOnCompleted.subscribe(new Subscriber<HttpClientResponse<String>>() {
@Override
public void onCompleted() {
end.getAndAdd(System.currentTimeMillis() - start.get());
done.countDown();
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(HttpClientResponse<String> stringHttpClientResponse) {
}
});
}
long wait = 5;
if (!done.await(5, TimeUnit.SECONDS)) {
fail("It should take far less than " + wait + " seconds to run the test. ");
}
long elapsed = end.get();
long maxDelay = delay + delay / 2;
assertTrue(String.format("The delay should be more than %d milliseconds, but no more than %d milliseconds. The actual: %d", repeat * delay, repeat * maxDelay, elapsed), elapsed >= repeat * delay && elapsed <= repeat * maxDelay);
}
@Test
public void testDelayedOnCompletion() throws Exception {
final AtomicLong start = new AtomicLong();
final AtomicLong end = new AtomicLong();
final long delay = 100;
final int repeat = 20;
final CountDownLatch done = new CountDownLatch(repeat);
ClientResumePolicy<String, String> policy = ClientResumePolicies.delayed(new Func0<Long>() {
@Override
public Long call() {
return delay;
}
}, TimeUnit.MILLISECONDS);
ServerClientContext<String, String> context = new ServerClientContext<>(new ServerInfo("localhost", 1000), client, factory, observer);
start.set(System.currentTimeMillis());
end.set(0);
for (int i = 0; i < repeat; ++i) {
Observable<HttpClientResponse<String>> resumedOnCompleted = policy.onCompleted(context, i);
resumedOnCompleted.subscribe(new Subscriber<HttpClientResponse<String>>() {
@Override
public void onCompleted() {
end.getAndAdd(System.currentTimeMillis() - start.get());
done.countDown();
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(HttpClientResponse<String> stringHttpClientResponse) {
}
});
}
long wait = 5;
if (!done.await(5, TimeUnit.SECONDS)) {
fail("It should take far less than " + wait + " seconds to run the test. ");
}
long elapsed = end.get();
long maxDelay = delay + delay / 2;
assertTrue(String.format("The delay should be more than %d milliseconds, but no more than %d milliseconds. The actual: %d", repeat * delay, repeat * maxDelay, elapsed), elapsed >= repeat * delay && elapsed <= repeat * maxDelay);
}
@Test
public void testDelayedOnError() throws Exception {
final AtomicLong start = new AtomicLong();
final AtomicLong end = new AtomicLong();
final long delay = 100;
final int repeat = 20;
final CountDownLatch done = new CountDownLatch(repeat);
ClientResumePolicy<String, String> policy = ClientResumePolicies.delayed(new Func0<Long>() {
@Override
public Long call() {
return delay;
}
}, TimeUnit.MILLISECONDS);
ServerClientContext<String, String> context = new ServerClientContext<>(new ServerInfo("localhost", 1000), client, factory, observer);
start.set(System.currentTimeMillis());
end.set(0);
for (int i = 0; i < repeat; ++i) {
Observable<HttpClientResponse<String>> resumedOnCompleted = policy.onError(context, i, new Throwable("error"));
resumedOnCompleted.subscribe(new Subscriber<HttpClientResponse<String>>() {
@Override
public void onCompleted() {
end.getAndAdd(System.currentTimeMillis() - start.get());
done.countDown();
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(HttpClientResponse<String> stringHttpClientResponse) {
}
});
}
long wait = 5;
if (!done.await(5, TimeUnit.SECONDS)) {
fail("It should take far less than " + wait + " seconds to run the test. ");
}
long elapsed = end.get();
long maxDelay = delay + delay / 2;
assertTrue(String.format("The delay should be more than %d millionseconds, but no more than %d millionseconds. The actual: %d", repeat * delay, repeat * maxDelay, elapsed), elapsed >= repeat * delay && elapsed <= repeat * maxDelay);
}
}
| 8,578 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/TestSourceObserver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http;
import io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent;
import io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType;
import io.mantisrx.runtime.source.http.impl.OperatorResumeOnCompleted;
import io.mantisrx.runtime.source.http.impl.ResumeOnCompletedPolicy;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import mantis.io.reactivex.netty.client.RxClient.ServerInfo;
import rx.Observable;
import rx.Observer;
import rx.Subscriber;
import rx.subjects.PublishSubject;
public class TestSourceObserver implements Observer<HttpSourceEvent> {
private final AtomicInteger completionCount = new AtomicInteger();
private final AtomicInteger errorCount = new AtomicInteger();
private final ConcurrentMap<EventType, AtomicInteger> sourceEventCounters = new ConcurrentHashMap<>();
private final ConcurrentHashMap<EventType, ConcurrentHashMap<ServerInfo, AtomicInteger>> serverEventCounters;
public TestSourceObserver() {
serverEventCounters = new ConcurrentHashMap<>();
for (EventType type : EventType.values()) {
serverEventCounters.put(type, new ConcurrentHashMap<ServerInfo, AtomicInteger>());
}
}
public static void main(String[] args) throws Exception {
final PublishSubject<Object> subject = PublishSubject.create();
final CountDownLatch done = new CountDownLatch(1);
Observable.interval(10, TimeUnit.MILLISECONDS)
.lift(new OperatorResumeOnCompleted<>(
new ResumeOnCompletedPolicy<Long>() {
@Override
public Observable<Long> call(Integer attempts) {
return Observable.just(99L);
}
}
))
.takeUntil(subject)
.subscribe(new Subscriber<Long>() {
@Override
public void onCompleted() {
System.out.println("completed. ");
done.countDown();
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(Long aLong) {
if (aLong > 10) {
subject.onNext("done");
}
}
});
done.await();
subject.onNext("abc");
}
@Override
public void onCompleted() {
completionCount.incrementAndGet();
}
@Override
public void onError(Throwable e) {
System.err.println("Run into error" + e.getMessage());
errorCount.incrementAndGet();
}
@Override
public void onNext(HttpSourceEvent event) {
sourceEventCounters.putIfAbsent(event.getEventType(), new AtomicInteger());
sourceEventCounters.get(event.getEventType()).incrementAndGet();
ConcurrentHashMap<ServerInfo, AtomicInteger> counters = serverEventCounters.get(event.getEventType());
counters.putIfAbsent(event.getServer(), new AtomicInteger());
counters.get(event.getServer()).incrementAndGet();
System.out.println(String.format("Event: %s for server %s:%s", event.getEventType(), event.getServer().getHost(), event.getServer().getPort()));
}
public int getCompletionCount() {
return completionCount.get();
}
public int getErrorCount() {
return errorCount.get();
}
public int getEventCount(EventType eventType) {
return sourceEventCounters.get(eventType).get();
}
public Set<EventType> getEvents() {
return sourceEventCounters.keySet();
}
public int getCount(ServerInfo server, EventType eventType) {
AtomicInteger value = serverEventCounters.get(eventType).get(server);
if (value == null) {
return 0;
}
return value.get();
}
}
| 8,579 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/StaticServerPollerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import io.mantisrx.runtime.source.http.impl.StaticServerPoller;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import mantis.io.reactivex.netty.client.RxClient.ServerInfo;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import rx.functions.Action1;
public class StaticServerPollerTest {
private Set<ServerInfo> servers;
private int pollingInterval = 1;
@Before
public void setUp() throws Exception {
servers = new HashSet<>();
for (int i = 0; i < 5; ++i) {
servers.add(new ServerInfo("host" + i, i));
}
}
@After
public void tearDown() throws Exception {
}
@Test
public void pollingIsScheduled() throws Exception {
StaticServerPoller poller = new StaticServerPoller(servers, pollingInterval);
final AtomicInteger count = new AtomicInteger();
final CountDownLatch done = new CountDownLatch(5);
long start = System.currentTimeMillis();
poller.servers()
.doOnNext(new Action1<Set<ServerInfo>>() {
@Override
public void call(Set<ServerInfo> data) {
assertEquals("We should always see the same set of servers", servers, data);
count.incrementAndGet();
done.countDown();
}
})
.subscribe();
done.await();
long elapsed = (System.currentTimeMillis() - start) / 1000;
System.out.println(elapsed);
assertTrue("The poller should have polled 5 times and the elaspsed time should be greater than 3", count.get() == 5 && elapsed <= 6);
}
}
| 8,580 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/impl/DefaultHttpServerProviderTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http.impl;
import static org.junit.Assert.assertEquals;
import io.mantisrx.runtime.source.http.ServerPoller;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import mantis.io.reactivex.netty.client.RxClient.ServerInfo;
import org.junit.Test;
import rx.Observable;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
public class DefaultHttpServerProviderTest {
@Test
public void testOverlappingSetsAreHandled() throws Exception {
final List<ServerInfo> added = new ArrayList<>();
final Queue<ServerInfo> removed = new ConcurrentLinkedQueue<>();
final CountDownLatch done = new CountDownLatch(1);
final int min = 1;
final int max = 8;
ServerPoller poller = new ServerPoller() {
@Override
public Observable<Set<ServerInfo>> servers() {
return Observable.range(min, max)
.buffer(3, 1)
.map(new Func1<List<Integer>, Set<ServerInfo>>() {
@Override
public Set<ServerInfo> call(List<Integer> ports) {
Set<ServerInfo> s = new HashSet<>();
for (int port : ports) {
s.add(new ServerInfo("host", port));
}
return s;
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
done.countDown();
}
});
}
@Override
public Set<ServerInfo> getServers() {
// TODO Auto-generated method stub
return null;
}
};
DefaultHttpServerProvider provider = new DefaultHttpServerProvider(poller);
provider.getServersToAdd()
.doOnNext(new Action1<ServerInfo>() {
@Override
public void call(ServerInfo server) {
added.add(server);
}
})
.subscribe();
provider.getServersToRemove()
.doOnNext(new Action1<ServerInfo>() {
@Override
public void call(ServerInfo server) {
removed.offer(server);
}
})
.subscribe();
done.await();
int port = min - 1;
added.sort(new Comparator<ServerInfo>() {
@Override
public int compare(ServerInfo o1, ServerInfo o2) {
return o1.getPort() - o2.getPort();
}
});
for (ServerInfo server : added) {
port += 1;
assertEquals(port, server.getPort());
}
assertEquals(max, port);
port = 0;
for (ServerInfo server : removed) {
port += 1;
assertEquals(port, server.getPort());
}
assertEquals("The very last element should not be removed. ", max - 1, port);
}
@Test
public void testNewSetAlwaysReplacesOldSet() throws Exception {
final List<ServerInfo> added = new ArrayList<>();
final List<ServerInfo> removed = new ArrayList<>();
final CountDownLatch done = new CountDownLatch(1);
final int min = 1;
final int max = 8;
ServerPoller poller = new ServerPoller() {
@Override
public Observable<Set<ServerInfo>> servers() {
return Observable.range(min, max)
.buffer(2)
.map(new Func1<List<Integer>, Set<ServerInfo>>() {
@Override
public Set<ServerInfo> call(List<Integer> ports) {
Set<ServerInfo> s = new HashSet<>();
for (int port : ports) {
s.add(new ServerInfo("host", port));
}
return s;
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
done.countDown();
}
});
}
@Override
public Set<ServerInfo> getServers() {
// TODO Auto-generated method stub
return null;
}
};
DefaultHttpServerProvider provider = new DefaultHttpServerProvider(poller);
provider.getServersToAdd()
.doOnNext(new Action1<ServerInfo>() {
@Override
public void call(ServerInfo server) {
added.add(server);
}
})
.subscribe();
provider.getServersToRemove()
.doOnNext(new Action1<ServerInfo>() {
@Override
public void call(ServerInfo server) {
removed.add(server);
}
})
.subscribe();
done.await();
int port = min - 1;
// Have to sort because items in a single batch may not come in order
Collections.sort(added, new Comparator<ServerInfo>() {
@Override
public int compare(ServerInfo o1, ServerInfo o2) {
return o1.getPort() - o2.getPort();
}
});
for (ServerInfo server : added) {
port += 1;
assertEquals(port, server.getPort());
}
assertEquals(max, port);
Collections.sort(removed, new Comparator<ServerInfo>() {
@Override
public int compare(ServerInfo o1, ServerInfo o2) {
return o1.getPort() - o2.getPort();
}
});
port = 0;
for (ServerInfo server : removed) {
port += 1;
assertEquals(port, server.getPort());
}
assertEquals("The very last two elements should not be removed. ", max - 2, port);
}
@Test
public void testTheSameSetWillBeIdempotent() throws Exception {
final List<ServerInfo> added = new ArrayList<>();
final List<ServerInfo> removed = new ArrayList<>();
final CountDownLatch done = new CountDownLatch(1);
final int min = 1;
final int max = 8;
ServerPoller poller = new ServerPoller() {
@Override
public Observable<Set<ServerInfo>> servers() {
return Observable.range(min, max)
.map(new Func1<Integer, Set<ServerInfo>>() {
@Override
public Set<ServerInfo> call(Integer port) {
Set<ServerInfo> s = new HashSet<>();
s.add(new ServerInfo("host", 1));
s.add(new ServerInfo("host", 2));
return s;
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
done.countDown();
}
});
}
@Override
public Set<ServerInfo> getServers() {
// TODO Auto-generated method stub
return null;
}
};
DefaultHttpServerProvider provider = new DefaultHttpServerProvider(poller);
provider.getServersToAdd()
.doOnNext(new Action1<ServerInfo>() {
@Override
public void call(ServerInfo server) {
added.add(server);
}
})
.subscribe();
provider.getServersToRemove()
.doOnNext(new Action1<ServerInfo>() {
@Override
public void call(ServerInfo server) {
removed.add(server);
}
})
.subscribe();
done.await();
int port = min - 1;
// Have to sort because items in a single batch may not come in order
Collections.sort(added, new Comparator<ServerInfo>() {
@Override
public int compare(ServerInfo o1, ServerInfo o2) {
return o1.getPort() - o2.getPort();
}
});
for (ServerInfo server : added) {
port += 1;
assertEquals(port, server.getPort());
}
assertEquals(2, port);
assertEquals("No element should be removed. ", 0, removed.size());
}
}
| 8,581 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/impl/OperatorResumeOnCompletedTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http.impl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Test;
import rx.Observable;
import rx.Observable.Operator;
import rx.Subscriber;
public class OperatorResumeOnCompletedTest {
@Test
public void testCanResumeOnCompletion() throws Exception {
final int max = 6;
final Observable<Integer> ints = Observable.range(1, max);
final int repeat = 5;
final AtomicInteger retries = new AtomicInteger();
Operator<Integer, Integer> resumeOperator = new OperatorResumeOnCompleted<>(
new ResumeOnCompletedPolicy<Integer>() {
@Override
public Observable<Integer> call(final Integer attempts) {
if (attempts > repeat) {
return null;
}
retries.incrementAndGet();
return Observable.just(attempts + max);
}
});
final CountDownLatch done = new CountDownLatch(1);
final AtomicInteger completionCount = new AtomicInteger();
final List<Integer> collected = new ArrayList<>();
ints
.lift(resumeOperator)
.subscribe(new Subscriber<Integer>() {
@Override
public void onCompleted() {
completionCount.incrementAndGet();
done.countDown();
}
@Override
public void onError(Throwable e) {
fail("There should be no error at all");
done.countDown();
}
@Override
public void onNext(Integer integer) {
collected.add(integer);
}
});
long timeoutSecs = 5;
if (!done.await(5, TimeUnit.SECONDS)) {
fail("Should finish within " + timeoutSecs + " seconds");
}
assertEquals(String.format("There should be exactly %d retries", repeat), repeat, retries.get());
assertEquals("There should be exactly one onCompleted call", 1, completionCount.get());
List<Integer> expected = Observable.range(1, max + repeat).toList().toBlocking().first();
assertEquals("The collected should include the original stream plus every attempt", expected, collected);
}
}
| 8,582 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/impl/HttpSourceImplTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http.impl;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.CONNECTION_ATTEMPTED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.CONNECTION_ESTABLISHED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.CONNECTION_UNSUBSCRIBED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.SERVER_FOUND;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.SOURCE_COMPLETED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.SUBSCRIPTION_ENDED;
import static io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType.SUBSCRIPTION_ESTABLISHED;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.http.ClientResumePolicy;
import io.mantisrx.runtime.source.http.HttpClientFactory;
import io.mantisrx.runtime.source.http.HttpServerProvider;
import io.mantisrx.runtime.source.http.LocalServerProvider;
import io.mantisrx.runtime.source.http.LocalServerProvider.Server;
import io.mantisrx.runtime.source.http.RequestProcessor;
import io.mantisrx.runtime.source.http.TestSourceObserver;
import io.mantisrx.runtime.source.http.impl.HttpSourceImpl.Builder;
import io.mantisrx.runtime.source.http.impl.HttpSourceImpl.HttpSourceEvent.EventType;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelOption;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import mantis.io.reactivex.netty.client.RxClient.ClientConfig;
import mantis.io.reactivex.netty.client.RxClient.ServerInfo;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientBuilder;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import rx.Observable;
import rx.Subscriber;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
public class HttpSourceImplTest {
private final static int SEED_PORT = 4000;
private final static int PORT_RANGE = 1000;
// Just make sure the unused port is out side the range of possible ports: [SEED_PORT, SEED_PORT + PORT_RANGE)
private final static int UNUSED_PORT = 8182;
private static LocalServerProvider localServerProvider;
private static EventType[] expectedEvents = new EventType[] {CONNECTION_ATTEMPTED, SUBSCRIPTION_ESTABLISHED, CONNECTION_UNSUBSCRIBED, CONNECTION_ESTABLISHED, SERVER_FOUND, SOURCE_COMPLETED, SUBSCRIPTION_ENDED};
private static Set<EventType> EXPECTED_EVENTS_SETS = new HashSet<>(Arrays.asList(expectedEvents));
private TestSourceObserver sourceObserver = new TestSourceObserver();
@BeforeClass
public static void init() {
int port = new Random().nextInt(PORT_RANGE) + SEED_PORT;
System.setProperty("log4j.rootLogger", "INFO, CONSOLE");
System.setProperty("log4j.appender.CONSOLE", "org.apache.log4j.ConsoleAppender");
System.setProperty("log4j.appender.CONSOLE.layout", "org.apache.log4j.PatternLayout");
System.setProperty("log4j.appender.CONSOLE.layout.ConversionPattern", "%d{HH:mm:ss,SSS} [%t] %-5p %x %C{1} : %m%n");
localServerProvider = new LocalServerProvider();
localServerProvider.start(3, port);
}
@AfterClass
public static void shutDown() throws Exception {
localServerProvider.shutDown();
}
@Before
public void setup() {
sourceObserver = new TestSourceObserver();
}
@Test
public void testGettingStreamFromMultipleServers() throws Exception {
HttpSourceImpl<ByteBuf, ServerSentEvent, ServerContext<ServerSentEvent>> source = createStreamingSource();
final AtomicInteger counter = new AtomicInteger();
final CountDownLatch done = new CountDownLatch(1);
final ConcurrentHashMap<String, AtomicInteger> result = new ConcurrentHashMap<>();
Observable.merge(source.call(new Context(), new Index(1, 1)))
.doOnNext(new Action1<ServerContext<ServerSentEvent>>() {
@Override
public void call(ServerContext<ServerSentEvent> pair) {
assertTrue(pair.getValue().contentAsString().contains("line"));
counter.incrementAndGet();
String msg = pair.getValue().contentAsString();
result.putIfAbsent(msg, new AtomicInteger());
result.get(msg).incrementAndGet();
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("Unexpected failure: " + throwable);
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
done.countDown();
}
})
.subscribe();
long waitSeconds = 3;
boolean timedout = !done.await(waitSeconds, TimeUnit.SECONDS);
if (timedout) {
fail(String.format("Waited at least %d seconds for the test to finish. Something is wrong", waitSeconds));
}
assertEquals(String.format("%d servers => the result has %d times of a single stream", localServerProvider.serverSize(), localServerProvider.serverSize()), counter.get(), RequestProcessor.smallStreamContent.size() * localServerProvider.serverSize());
for (String data : RequestProcessor.smallStreamContent) {
assertEquals(String.format("%d servers => %d identical copies per message", localServerProvider.serverSize(), localServerProvider.serverSize()), localServerProvider.serverSize(), result.get(data).get());
}
for (Server server : localServerProvider.getServers()) {
assertEquals("There should be one completion per server", 1, sourceObserver.getCount(toServerInfo(server), EventType.SOURCE_COMPLETED));
assertEquals("There should be one un-subscription per server", 1, sourceObserver.getCount(toServerInfo(server), EventType.CONNECTION_UNSUBSCRIBED));
assertEquals("There should be no error", 0, sourceObserver.getCount(toServerInfo(server), EventType.SUBSCRIPTION_FAILED));
assertEquals("There should be one connection per server", 1, sourceObserver.getCount(toServerInfo(server), EventType.CONNECTION_ESTABLISHED));
}
assertEquals(1, sourceObserver.getCompletionCount());
assertEquals(0, sourceObserver.getErrorCount());
Set<EventType> events = sourceObserver.getEvents();
assertEquals(EXPECTED_EVENTS_SETS, events);
for (EventType event : events) {
assertEquals("Each event should be recorded exactly once per server", localServerProvider.serverSize(), sourceObserver.getEventCount(event));
}
assertEquals("completed source should clean up its retry servers", 0, source.getRetryServers().size());
}
@Test
public void testRemovedServerWillBeUnsubscribed() throws Exception {
HttpSourceImpl<ByteBuf, ServerSentEvent, ServerContext<ServerSentEvent>> source = createStreamingSource("test/infStream");
final AtomicInteger counter = new AtomicInteger();
final CountDownLatch done = new CountDownLatch(1);
final ConcurrentHashMap<String, AtomicInteger> result = new ConcurrentHashMap<>();
final ConcurrentMap<ServerInfo, CountDownLatch> serverRemovalLatch = new ConcurrentHashMap<>();
for (Server server : localServerProvider.getServers()) {
serverRemovalLatch.put(toServerInfo(server), new CountDownLatch(1));
}
Observable.merge(source.call(new Context(), new Index(1, 1)))
.doOnNext(new Action1<ServerContext<ServerSentEvent>>() {
@Override
public void call(ServerContext<ServerSentEvent> pair) {
try {
assertTrue(pair.getValue().contentAsString().contains("line"));
counter.incrementAndGet();
String msg = pair.getValue().contentAsString();
result.putIfAbsent(msg, new AtomicInteger());
result.get(msg).incrementAndGet();
} finally {
serverRemovalLatch.get(pair.getServer()).countDown();
}
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("Unexpected failure: " + throwable);
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
done.countDown();
}
})
.subscribe();
for (Server server : localServerProvider.getServers()) {
serverRemovalLatch.get(toServerInfo(server)).await();
localServerProvider.removeServer(toServerInfo(server));
}
long waitSeconds = 5;
boolean timedout = !done.await(waitSeconds, TimeUnit.SECONDS);
if (timedout) {
fail(String.format("Waited at least %d seconds for the test to finish. Connection to at least one server is not unsubscribed.", waitSeconds));
}
assertTrue(String.format("Each server should emit at least one event before being canceled. Expected counter >= %d, actual counter: %d", localServerProvider.serverSize(), counter.get()), counter.get() >= localServerProvider.serverSize());
for (Server server : localServerProvider.getServers()) {
ServerInfo serverInfo = toServerInfo(server);
assertEquals("There should be one completion per server", 1, sourceObserver.getCount(serverInfo, EventType.SOURCE_COMPLETED));
assertEquals("There should be one un-subscription per server", 1, sourceObserver.getCount(serverInfo, EventType.CONNECTION_UNSUBSCRIBED));
assertEquals("There should be no error", 0, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_FAILED));
assertEquals("There should be one connection per server", 1, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_ESTABLISHED));
assertEquals(String.format("There should be exactly one cancellation event per server for %d servers. ", localServerProvider.serverSize()), 1, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_CANCELED));
}
assertEquals("The source should emit exactly one completion event", 1, sourceObserver.getCompletionCount());
assertEquals("The server should not have any error event", 0, sourceObserver.getErrorCount());
Set<EventType> events = sourceObserver.getEvents();
assertEquals("Each server should have one occurrence per event type except server failure event", (EventType.values().length - 1), events.size());
for (EventType event : events) {
assertEquals("Each event should be recorded exactly once", localServerProvider.serverSize(), sourceObserver.getEventCount(event));
}
}
@Test
public void testGettingSingleEntityFromMultipleServers() throws Exception {
HttpSourceImpl<ByteBuf, ByteBuf, ServerContext<ByteBuf>> source = createSingleEntitySource();
final AtomicInteger counter = new AtomicInteger();
final CountDownLatch done = new CountDownLatch(1);
Observable.merge(source.call(new Context(), new Index(1, 1)))
.map(new Func1<ServerContext<ByteBuf>, ServerContext<String>>() {
@Override
public ServerContext<String> call(ServerContext<ByteBuf> pair) {
return new ServerContext<>(pair.getServer(), pair.getValue().toString(Charset.defaultCharset()));
}
})
.doOnNext(new Action1<ServerContext<String>>() {
@Override
public void call(ServerContext<String> pair) {
counter.incrementAndGet();
assertEquals(RequestProcessor.SINGLE_ENTITY_RESPONSE, pair.getValue());
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("Unexpected failure: " + throwable);
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
done.countDown();
}
})
.subscribe();
done.await(3000, TimeUnit.SECONDS);
assertEquals(
String.format("There should be exactly one response from each of the %d servers", localServerProvider.serverSize()),
localServerProvider.serverSize(),
counter.get());
for (Server server : localServerProvider.getServers()) {
ServerInfo serverInfo = toServerInfo(server);
assertEquals("There should be one completion per server", 1, sourceObserver.getCount(serverInfo, EventType.SOURCE_COMPLETED));
assertEquals("There should be one un-subscription per server", 1, sourceObserver.getCount(serverInfo, EventType.CONNECTION_UNSUBSCRIBED));
assertEquals("There should be no error", 0, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_FAILED));
assertEquals("There should be one connection per server", 1, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_ESTABLISHED));
}
assertEquals(1, sourceObserver.getCompletionCount());
assertEquals(0, sourceObserver.getErrorCount());
Set<EventType> events = sourceObserver.getEvents();
assertEquals(EXPECTED_EVENTS_SETS, events);
for (EventType event : events) {
assertEquals("Each event should be recorded exactly once per server", localServerProvider.serverSize(), sourceObserver.getEventCount(event));
}
}
@Test
public void testConnectionFailureShouldBeCaptured() throws Exception {
HttpClientFactory<ByteBuf, ByteBuf> factory = new HttpClientFactory<ByteBuf, ByteBuf>() {
@Override
public HttpClient<ByteBuf, ByteBuf> createClient(ServerInfo server) {
HttpClientBuilder<ByteBuf, ByteBuf> clientBuilder = new HttpClientBuilder<>(server.getHost(), server.getPort());
return clientBuilder.channelOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, 100).build();
}
};
HttpSourceImpl<ByteBuf, ByteBuf, ServerContext<ByteBuf>> source = HttpSourceImpl
.builder(
factory,
HttpRequestFactories.createGetFactory("/"),
HttpSourceImpl.<ByteBuf>contextWrapper()
)
.withActivityObserver(sourceObserver)
.withServerProvider(
new HttpServerProvider() {
@Override
public Observable<ServerInfo> getServersToAdd() {
return Observable.just(new ServerInfo("localhost", UNUSED_PORT));
}
@Override
public Observable<ServerInfo> getServersToRemove() {
return Observable.empty();
}
}
).build();
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> ex = new AtomicReference<>();
final AtomicReference<ServerContext<ByteBuf>> items = new AtomicReference<>();
Observable.merge(source.call(new Context(), new Index(1, 1)))
.subscribe(new Subscriber<ServerContext<ByteBuf>>() {
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
ex.set(e);
latch.countDown();
}
@Override
public void onNext(ServerContext<ByteBuf> pair) {
items.set(pair);
}
});
if (!latch.await(5, TimeUnit.HOURS)) {
fail("The test case should finish way sooner than 5 seconds. ");
}
Assert.assertNull("The connection error should be captured per server, therefore not propagated up to the entire source.", ex.get());
Assert.assertNull("There should be no emitted item due to connection timeout", items.get());
}
@Test
public void testTimeoutShouldUnsubscribeServer() throws Exception {
HttpClientFactory<ByteBuf, ByteBuf> factory = new HttpClientFactory<ByteBuf, ByteBuf>() {
@Override
public HttpClient<ByteBuf, ByteBuf> createClient(ServerInfo server) {
ClientConfig clientConfig = new ClientConfig.Builder()
.readTimeout(10, TimeUnit.MILLISECONDS)
.build();
return new HttpClientBuilder<ByteBuf, ByteBuf>(server.getHost(), server.getPort())
.config(clientConfig).build();
}
};
HttpSourceImpl<ByteBuf, ByteBuf, ServerContext<ByteBuf>> source = HttpSourceImpl
.builder(
factory,
HttpRequestFactories.createGetFactory("test/timeout?timeout=10000"),
HttpSourceImpl.<ByteBuf>contextWrapper()
)
.withActivityObserver(sourceObserver)
.resumeWith(new ClientResumePolicy<ByteBuf, ByteBuf>() {
@Override
public Observable<HttpClientResponse<ByteBuf>> onError(
ServerClientContext<ByteBuf, ByteBuf> clientContext,
int attempts, Throwable error) {
// TODO Auto-generated method stub
return null;
}
@Override
public Observable<HttpClientResponse<ByteBuf>> onCompleted(
ServerClientContext<ByteBuf, ByteBuf> clientContext,
int attempts) {
// TODO Auto-generated method stub
return null;
}
})
.withServerProvider(
new HttpServerProvider() {
@Override
public Observable<ServerInfo> getServersToAdd() {
return Observable.from(localServerProvider.getServers()).map(new Func1<Server, ServerInfo>() {
@Override
public ServerInfo call(Server server) {
return toServerInfo(server);
}
});
}
@Override
public Observable<ServerInfo> getServersToRemove() {
return Observable.empty();
}
}
).build();
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> ex = new AtomicReference<>();
final AtomicReference<ServerContext<ByteBuf>> items = new AtomicReference<>();
Observable.merge(source.call(new Context(), new Index(1, 1)))
.subscribe(new Subscriber<ServerContext<ByteBuf>>() {
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
ex.set(e);
latch.countDown();
}
@Override
public void onNext(ServerContext<ByteBuf> pair) {
items.set(pair);
}
});
if (!latch.await(5, TimeUnit.SECONDS)) {
fail("The test case should finish way sooner than 5 seconds. ");
}
Assert.assertNull("The timeout error should be captured by the client so it does not surface to the source", ex.get());
Assert.assertNull("There should be no emitted item due to connection timeout", items.get());
for (Server server : localServerProvider.getServers()) {
ServerInfo serverInfo = toServerInfo(server);
assertEquals("There should be no source level error", 0, sourceObserver.getErrorCount());
assertEquals("There should be one connection attempt per server", 1, sourceObserver.getCount(serverInfo, EventType.CONNECTION_ATTEMPTED));
assertEquals("There should be no established connection per server due to read timeout. ", 0, sourceObserver.getCount(serverInfo, EventType.CONNECTION_ESTABLISHED));
assertEquals("There should no subscribed server because of read timeout", 0, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_ESTABLISHED));
}
}
@Test
public void testStreamingErrorFromServerWillNotCompleteSource() throws Exception {
int eventCount = 20;
HttpSourceImpl<ByteBuf, ServerSentEvent, ServerContext<ServerSentEvent>> source = createStreamingSource("test/finiteStream?count=" + eventCount);
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> ex = new AtomicReference<>();
final ConcurrentHashMap<ServerInfo, Set<String>> items = new ConcurrentHashMap<>();
Observable.merge(source.call(new Context(), new Index(1, 1)))
.subscribe(new Subscriber<ServerContext<ServerSentEvent>>() {
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
ex.set(e);
latch.countDown();
}
@Override
public void onNext(ServerContext<ServerSentEvent> pair) {
items.putIfAbsent(pair.getServer(), new HashSet<String>());
items.get(pair.getServer()).add(pair.getValue().contentAsString());
}
});
if (latch.await(5, TimeUnit.SECONDS)) {
fail("The test case should not finish at all");
}
Assert.assertNull("The timeout error should be captured by the client so it does not surface to the source", ex.get());
for (Server server : localServerProvider.getServers()) {
ServerInfo serverInfo = toServerInfo(server);
assertEquals("There should be no source level error", 0, sourceObserver.getErrorCount());
assertEquals("There should be one connection attempt per server", 1, sourceObserver.getCount(serverInfo, EventType.CONNECTION_ATTEMPTED));
assertEquals("There should be one established connection per server ", 1, sourceObserver.getCount(serverInfo, EventType.CONNECTION_ESTABLISHED));
assertEquals("There should no subscribed server because of read timeout", 1, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_ESTABLISHED));
assertEquals(String.format("There should be %d item before simulated error per server", eventCount), eventCount, items.get(serverInfo).size());
}
}
@Test
public void testResumeOnCompletion() throws Exception {
final int maxRepeat = 10;
final CountDownLatch countGate = new CountDownLatch(maxRepeat * localServerProvider.serverSize());
HttpSourceImpl<ByteBuf, ByteBuf, ServerContext<ByteBuf>> source = createSingleEntitySource(new ClientResumePolicy<ByteBuf, ByteBuf>() {
@Override
public Observable<HttpClientResponse<ByteBuf>> onError(ServerClientContext<ByteBuf, ByteBuf> clientContext, int attempts, Throwable error) {
return null;
}
@Override
public Observable<HttpClientResponse<ByteBuf>> onCompleted(ServerClientContext<ByteBuf, ByteBuf> clientContext, int attempts) {
countGate.countDown();
if (attempts < maxRepeat) {
return clientContext.newResponse();
} else {
return null;
}
}
});
final AtomicInteger counter = new AtomicInteger();
final CountDownLatch done = new CountDownLatch(1);
Observable.merge(source.call(new Context(), new Index(1, 1)))
.map(new Func1<ServerContext<ByteBuf>, ServerContext<String>>() {
@Override
public ServerContext<String> call(ServerContext<ByteBuf> pair) {
try {
return new ServerContext<>(pair.getServer(), pair.getValue().retain().toString(Charset.defaultCharset()));
} catch (Throwable e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
})
.doOnNext(new Action1<ServerContext<String>>() {
@Override
public void call(ServerContext<String> pair) {
counter.incrementAndGet();
assertEquals(RequestProcessor.SINGLE_ENTITY_RESPONSE, pair.getValue());
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("Unexpected failure: " + throwable);
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
try {
countGate.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
done.countDown();
}
})
.subscribe();
long wait = 5;
if (!done.await(5, TimeUnit.SECONDS)) {
fail(String.format("All streaming should be done within %d seconds. ", wait));
}
assertEquals(
String.format("There should be exactly %d response from each of the %d servers",
maxRepeat,
localServerProvider.serverSize()),
maxRepeat * localServerProvider.serverSize(),
counter.get());
for (Server server : localServerProvider.getServers()) {
ServerInfo serverInfo = toServerInfo(server);
assertEquals(
String.format("There should be %d completion per server as resumption function should called %d times",
maxRepeat,
maxRepeat - 1),
maxRepeat,
sourceObserver.getCount(serverInfo, EventType.SOURCE_COMPLETED));
assertEquals("There should be no error", 0, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_FAILED));
assertEquals(
String.format("Connection per server should have been established %d times", maxRepeat),
10,
sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_ESTABLISHED));
}
assertEquals(
String.format("There are %d repeats, but there should be only one final completion", maxRepeat - 1),
1,
sourceObserver.getCompletionCount());
assertEquals(0, sourceObserver.getErrorCount());
Set<EventType> events = sourceObserver.getEvents();
assertEquals(EXPECTED_EVENTS_SETS, events);
assertEquals("Each connection should be unsubscribed once by the subscriber", localServerProvider.serverSize(), sourceObserver.getEventCount(CONNECTION_UNSUBSCRIBED));
for (EventType eventType : new EventType[] {EventType.SOURCE_COMPLETED, EventType.SUBSCRIPTION_ESTABLISHED, EventType.CONNECTION_ATTEMPTED, EventType.CONNECTION_ESTABLISHED}) {
assertEquals(
String.format("Event %s should be recorded exactly %d times per server", eventType, maxRepeat),
maxRepeat * localServerProvider.serverSize(),
sourceObserver.getEventCount(eventType));
}
assertEquals(
"Event SERVER_FOUND should be recorded exactly once per server",
localServerProvider.serverSize(),
sourceObserver.getEventCount(EventType.SERVER_FOUND));
}
@Test
public void testResumeOnCompletionButNotOnRemovedServers() throws Exception {
final int maxRepeat = 10;
final int cutOff = 5;
final CountDownLatch countGate = new CountDownLatch(localServerProvider.serverSize() * (cutOff - 1));
final ConcurrentHashMap<ServerInfo, AtomicInteger> resumptionCounts = new ConcurrentHashMap<>();
final ConcurrentHashMap<ServerInfo, AtomicInteger> counterPerServer = new ConcurrentHashMap<>();
for (Server server : localServerProvider.getServers()) {
ServerInfo serverInfo = toServerInfo(server);
resumptionCounts.put(serverInfo, new AtomicInteger(0));
counterPerServer.put(serverInfo, new AtomicInteger(0));
}
HttpSourceImpl<ByteBuf, ByteBuf, ServerContext<ByteBuf>> source = createSingleEntitySource(new ClientResumePolicy<ByteBuf, ByteBuf>() {
@Override
public Observable<HttpClientResponse<ByteBuf>> onError(ServerClientContext<ByteBuf, ByteBuf> clientContext, int attempts, Throwable error) {
return null;
}
@Override
public Observable<HttpClientResponse<ByteBuf>> onCompleted(ServerClientContext<ByteBuf, ByteBuf> clientContext, int attempts) {
resumptionCounts.get(clientContext.getServer()).incrementAndGet();
countGate.countDown();
if (attempts < maxRepeat) {
return clientContext.newResponse();
} else {
return null;
}
}
});
final AtomicInteger counter = new AtomicInteger();
final CountDownLatch done = new CountDownLatch(1);
Observable.merge(source.call(new Context(), new Index(1, 1)))
.map(new Func1<ServerContext<ByteBuf>, ServerContext<String>>() {
@Override
public ServerContext<String> call(ServerContext<ByteBuf> pair) {
return new ServerContext<>(pair.getServer(), pair.getValue().toString(Charset.defaultCharset()));
}
})
.doOnNext(new Action1<ServerContext<String>>() {
@Override
public void call(ServerContext<String> context) {
counter.incrementAndGet();
assertEquals(RequestProcessor.SINGLE_ENTITY_RESPONSE, context.getValue());
ServerInfo server = context.getServer();
counterPerServer.get(server).incrementAndGet();
if (counterPerServer.get(server).get() > cutOff) {
localServerProvider.removeServer(server);
}
}
})
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
fail("Unexpected failure: " + throwable);
}
})
.doAfterTerminate(new Action0() {
@Override
public void call() {
try {
countGate.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
done.countDown();
}
})
.subscribe();
long wait = 5;
if (!done.await(wait, TimeUnit.SECONDS)) {
fail(String.format("All streaming should be done within %d seconds. ", wait));
}
assertEquals("All server should be resumed", localServerProvider.serverSize(), resumptionCounts.size());
for (ServerInfo server : resumptionCounts.keySet()) {
assertTrue(
String.format("The server %s:%s should be resumed fewer than %d times", server.getHost(), server.getPort(), maxRepeat),
maxRepeat > resumptionCounts.get(server).get());
}
assertTrue(
String.format("There should be at least %d response from each of the %d servers",
cutOff + 1,
localServerProvider.serverSize()),
(cutOff + 1) * localServerProvider.serverSize() <= counter.get());
for (Server server : localServerProvider.getServers()) {
ServerInfo serverInfo = toServerInfo(server);
assertEquals("There should be no error", 0, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_FAILED));
}
assertEquals(
String.format("There are %d repeats, but there should be only one final completion", maxRepeat - 1),
1,
sourceObserver.getCompletionCount());
assertEquals("There should be no error", 0, sourceObserver.getErrorCount());
Set<EventType> events = sourceObserver.getEvents();
Set<EventType> expectedEvents = new HashSet<>();
expectedEvents.addAll(Arrays.asList(
EventType.SUBSCRIPTION_CANCELED,
EventType.SERVER_FOUND,
EventType.CONNECTION_ATTEMPTED,
EventType.CONNECTION_ESTABLISHED,
EventType.SUBSCRIPTION_ESTABLISHED,
EventType.SOURCE_COMPLETED,
EventType.SUBSCRIPTION_ENDED,
EventType.CONNECTION_UNSUBSCRIBED));
assertEquals(expectedEvents, events);
assertEquals("Each connection should be unsubscribed once by the subscriber", localServerProvider.serverSize(), sourceObserver.getEventCount(CONNECTION_UNSUBSCRIBED));
for (EventType eventType : new EventType[] {EventType.SOURCE_COMPLETED, EventType.SUBSCRIPTION_ESTABLISHED, EventType.CONNECTION_ATTEMPTED, EventType.CONNECTION_ESTABLISHED}) {
assertTrue(
String.format("Event %s should be recorded at least %d times per server", eventType, cutOff),
(cutOff + 1) * localServerProvider.serverSize() <= sourceObserver.getEventCount(eventType));
}
for (EventType eventType : new EventType[] {EventType.SERVER_FOUND, EventType.SUBSCRIPTION_CANCELED}) {
assertEquals(
String.format("Event %s should be recorded exactly once per server", eventType),
localServerProvider.serverSize(),
sourceObserver.getEventCount(eventType));
}
}
@Test
public void testResumeOnTimeout() throws Exception {
HttpClientFactory<ByteBuf, ByteBuf> factory = new HttpClientFactory<ByteBuf, ByteBuf>() {
@Override
public HttpClient<ByteBuf, ByteBuf> createClient(ServerInfo server) {
ClientConfig clientConfig = new ClientConfig.Builder()
.readTimeout(10, TimeUnit.MILLISECONDS)
.build();
return new HttpClientBuilder<ByteBuf, ByteBuf>(server.getHost(), server.getPort())
.config(clientConfig).build();
}
};
final ConcurrentMap<ServerInfo, AtomicInteger> resumptions = new ConcurrentHashMap<>();
for (ServerInfo server : localServerProvider.getServerInfos()) {
resumptions.put(server, new AtomicInteger());
}
final int maxRepeat = 5;
HttpSourceImpl<ByteBuf, ByteBuf, ServerContext<ByteBuf>> source = HttpSourceImpl
.builder(
factory,
HttpRequestFactories.createGetFactory("test/timeout?timeout=10000"),
HttpSourceImpl.<ByteBuf>contextWrapper()
)
.withActivityObserver(sourceObserver)
.resumeWith(new ClientResumePolicy<ByteBuf, ByteBuf>() {
@Override
public Observable<HttpClientResponse<ByteBuf>> onError(ServerClientContext<ByteBuf, ByteBuf> clientContext, int attempts, Throwable error) {
if (attempts <= maxRepeat) {
resumptions.get(clientContext.getServer()).incrementAndGet();
return clientContext.newResponse();
}
return null;
}
@Override
public Observable<HttpClientResponse<ByteBuf>> onCompleted(ServerClientContext<ByteBuf, ByteBuf> clientContext, int attempts) {
return null;
}
})
.withServerProvider(localServerProvider).build();
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> ex = new AtomicReference<>();
final AtomicReference<ServerContext<ByteBuf>> items = new AtomicReference<>();
Observable.merge(source.call(new Context(), new Index(1, 1)))
.subscribe(new Subscriber<ServerContext<ByteBuf>>() {
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
ex.set(e);
latch.countDown();
}
@Override
public void onNext(ServerContext<ByteBuf> pair) {
items.set(pair);
}
});
if (!latch.await(10, TimeUnit.SECONDS)) {
fail("The test case should finish way sooner than 10 seconds. ");
}
Assert.assertNull("The timeout error should be captured by the client so it does not surface to the source", ex.get());
Assert.assertNull("There should be no emitted item due to connection timeout", items.get());
for (ServerInfo serverInfo : localServerProvider.getServerInfos()) {
assertEquals("There should be no source level error", 0, sourceObserver.getErrorCount());
assertEquals("There should be one connection attempt per server per retry", maxRepeat + 1, sourceObserver.getCount(serverInfo, EventType.CONNECTION_ATTEMPTED));
assertEquals("There should be no established connection per server due to read timeout. ", 0, sourceObserver.getCount(serverInfo, EventType.CONNECTION_ESTABLISHED));
assertEquals("There should no subscribed server because of read timeout", 0, sourceObserver.getCount(serverInfo, EventType.SUBSCRIPTION_ESTABLISHED));
assertEquals("Each server will repeat exactly " + maxRepeat + " times", maxRepeat, resumptions.get(serverInfo).get());
}
}
private ServerInfo toServerInfo(Server server) {
return new ServerInfo(server.getHost(), server.getPort());
}
private HttpSourceImpl<ByteBuf, ByteBuf, ServerContext<ByteBuf>> createSingleEntitySource() {
// The default policy resumes nothing
return createSingleEntitySource(new ClientResumePolicy<ByteBuf, ByteBuf>() {
@Override
public Observable<HttpClientResponse<ByteBuf>> onError(ServerClientContext<ByteBuf, ByteBuf> clientContext, int attempts, Throwable error) {
return null;
}
@Override
public Observable<HttpClientResponse<ByteBuf>> onCompleted(ServerClientContext<ByteBuf, ByteBuf> clientContext, int attempts) {
return null;
}
});
}
private HttpSourceImpl<ByteBuf, ByteBuf, ServerContext<ByteBuf>> createSingleEntitySource(ClientResumePolicy<ByteBuf, ByteBuf> resumePolicy) {
Builder<ByteBuf, ByteBuf, ServerContext<ByteBuf>> builder =
HttpSourceImpl.builder(
HttpClientFactories.<ByteBuf>defaultFactory(),
HttpRequestFactories.createGetFactory("test/singleEntity"),
HttpSourceImpl.<ByteBuf>contextWrapper()
);
return builder.withActivityObserver(sourceObserver)
.withServerProvider(localServerProvider)
.resumeWith(resumePolicy)
.build();
}
private HttpSourceImpl<ByteBuf, ServerSentEvent, ServerContext<ServerSentEvent>> createStreamingSource() {
String uri = "test/stream";
return createStreamingSource(uri);
}
private HttpSourceImpl<ByteBuf, ServerSentEvent, ServerContext<ServerSentEvent>> createStreamingSource(String uri) {
Builder<ByteBuf, ServerSentEvent, ServerContext<ServerSentEvent>> builder =
HttpSourceImpl.builder(
HttpClientFactories.sseClientFactory(),
HttpRequestFactories.createGetFactory(uri),
HttpSourceImpl.<ServerSentEvent>contextWrapper()
);
return builder
.withActivityObserver(sourceObserver)
.withServerProvider(localServerProvider)
.resumeWith(new ClientResumePolicy<ByteBuf, ServerSentEvent>() {
@Override
public Observable<HttpClientResponse<ServerSentEvent>> onError(
ServerClientContext<ByteBuf, ServerSentEvent> clientContext,
int attempts, Throwable error) {
// TODO Auto-generated method stub
return null;
}
@Override
public Observable<HttpClientResponse<ServerSentEvent>> onCompleted(
ServerClientContext<ByteBuf, ServerSentEvent> clientContext,
int attempts) {
// TODO Auto-generated method stub
return null;
}
})
.build();
}
}
| 8,583 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/source/http/impl/OperatorResumeOnErrorTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.source.http.impl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Test;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Observable.Operator;
import rx.Subscriber;
import rx.functions.Action1;
public class OperatorResumeOnErrorTest {
@Test
public void testCanResumeOnError() throws Exception {
final int threshold = 6;
final Observable<Integer> ints = createIntegerStreamThatFailsOnThresholdValue(threshold);
final int repeat = 5;
final AtomicInteger retries = new AtomicInteger();
Operator<Integer, Integer> resumeOperator = resumeWithFixNumberOfRetries(ints, repeat, retries);
final CountDownLatch done = new CountDownLatch(1);
final AtomicInteger errorCount = new AtomicInteger();
final AtomicReference<Throwable> error = new AtomicReference<>();
ints
.lift(resumeOperator)
.subscribe(new Subscriber<Integer>() {
@Override
public void onCompleted() {
done.countDown();
}
@Override
public void onError(Throwable e) {
errorCount.incrementAndGet();
error.set(e);
done.countDown();
}
@Override
public void onNext(Integer integer) {
assertTrue("The integer should not be over the threshold. The integer value: " + integer, integer < threshold);
}
});
long timeoutSecs = 5;
if (!done.await(5, TimeUnit.SECONDS)) {
fail("Should finish within " + timeoutSecs + " seconds");
}
assertEquals(String.format("There should be exactly %d retries", repeat), repeat, retries.get());
assertEquals("There should be exactly one onError", 1, errorCount.get());
assertEquals("The error should be the user created one. ", TestException.class, error.get().getClass());
}
private OperatorResumeOnError<Integer> resumeWithFixNumberOfRetries(final Observable<Integer> ints, final int repeat, final AtomicInteger retries) {
return new OperatorResumeOnError<>(
new ResumeOnErrorPolicy<Integer>() {
@Override
public Observable<Integer> call(final Integer attempts, final Throwable error) {
if (attempts > repeat) {
return null;
}
retries.incrementAndGet();
return ints;
}
});
}
private Observable<Integer> createIntegerStreamThatFailsOnThresholdValue(final int threshold) {
return Observable.create(new OnSubscribe<Integer>() {
@Override
public void call(final Subscriber<? super Integer> subscriber) {
Observable
.just(1, 2, 3, 4, 5, 6)
.doOnNext(new Action1<Integer>() {
@Override
public void call(Integer value) {
if (value == threshold) {
subscriber.onError(new TestException("Failed on value " + value));
} else {
subscriber.onNext(value);
}
}
}).subscribe();
}
});
}
public static class TestException extends RuntimeException {
public TestException(String message) {
super(message);
}
}
}
| 8,584 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/scheduler/MantisRxSingleThreadSchedulerTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.scheduler;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import io.mantisrx.common.metrics.rx.MonitorOperator;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Subscription;
import rx.internal.util.RxThreadFactory;
public class MantisRxSingleThreadSchedulerTest {
private static final Logger logger = LoggerFactory.getLogger(MantisRxSingleThreadSchedulerTest.class);
Observable<Observable<String>> createSourceObs(int numInnerObs, int valuesPerCompletingInnerObs) {
return Observable.range(1, numInnerObs)
.map(x -> {
if (x != 1) {
return Observable.interval(10, TimeUnit.MILLISECONDS)
.map(l -> String.format("I%d: %d", x, l.intValue()))
.take(valuesPerCompletingInnerObs);
} else {
return Observable.interval(100, TimeUnit.MILLISECONDS)
.map(l -> String.format("I%d: %d", x, l.intValue()));
}
});
}
@Test
public void testObserveOnAfterOnCompleteMantisRxScheduler() throws InterruptedException {
int nThreads = 6;
final MantisRxSingleThreadScheduler[] mantisRxSingleThreadSchedulers = new MantisRxSingleThreadScheduler[nThreads];
RxThreadFactory rxThreadFactory = new RxThreadFactory("MantisRxScheduler-");
logger.info("creating {} Mantis threads", nThreads);
for (int i = 0; i < nThreads; i++) {
mantisRxSingleThreadSchedulers[i] = new MantisRxSingleThreadScheduler(rxThreadFactory);
}
int numInnerObs = 10;
int valuesPerCompletingInnerObs = 2;
int valuesToWaitFor = numInnerObs*valuesPerCompletingInnerObs + 10;
Observable<Observable<String>> oo = createSourceObs(numInnerObs, valuesPerCompletingInnerObs);
final CountDownLatch latch = new CountDownLatch(valuesToWaitFor);
Observable<String> map = oo
.lift(new MonitorOperator<>("worker_stage_outer"))
.map(observable -> observable
.groupBy(e -> System.nanoTime() % nThreads)
.flatMap(go -> go
.observeOn(mantisRxSingleThreadSchedulers[go.getKey().intValue()])
.doOnNext(x -> {
logger.info("processing {} on thread {}", x, Thread.currentThread().getName());
latch.countDown();
})
)
).flatMap(x -> x);
Subscription subscription = map.subscribe();
assertTrue(latch.await(5, TimeUnit.SECONDS));
for (int i = 0; i < nThreads; i++) {
assertFalse(mantisRxSingleThreadSchedulers[i].createWorker().isUnsubscribed());
}
subscription.unsubscribe();
}
}
| 8,585 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/parameter/SourceJobParametersTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.parameter;
import static org.junit.Assert.assertEquals;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.Collections;
import java.util.List;
import org.junit.Test;
public class SourceJobParametersTest {
@Test
public void shouldParseTargetInfoJson() {
String json = "{\"targets\":[" +
"{" +
" \"sourceJobName\":\"TestSource\"," +
" \"criterion\":\"select * from stream\"," +
" \"unknownProperty\":\"value\"" +
"}," +
"{" +
" \"sourceJobName\":\"TestSource2\"," +
" \"criterion\":\"select * from stream2\"," +
" \"clientId\":\"TestClientId2\"," +
" \"sample\":10," +
" \"isBroadcastMode\":true," +
" \"enableMetaMessages\":true," +
" \"mantis.EnableCompressedBinary\":true," +
" \"enableMetaMessages\":true," +
" \"mantis.CompressionDelimiter\":\"Delimiter2\"" +
"}" +
"]}";
List<SourceJobParameters.TargetInfo> infos = SourceJobParameters.parseTargetInfo(json);
List<SourceJobParameters.TargetInfo> expected = ImmutableList.of(
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource").withQuery("select * from stream").build(),
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource2").withQuery("select * from stream2")
.withClientId("TestClientId2").withSamplePerSec(10).withBroadCastMode().withMetaMessagesEnabled().withBinaryCompressionEnabled().withDelimiter("Delimiter2").build()
);
assertEquals(expected, infos);
}
@Test
public void shouldParseEmptyJson() {
String json = "{}";
List<SourceJobParameters.TargetInfo> infos = SourceJobParameters.parseTargetInfo(json);
assertEquals(0, infos.size());
json = "invalid_json";
infos = SourceJobParameters.parseTargetInfo(json);
assertEquals(0, infos.size());
}
@Test
public void shouldInsertDefaultClientIdIfNoneIsPresent() {
SourceJobParameters.TargetInfo target = new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource").withQuery("select * from stream").build();
List<SourceJobParameters.TargetInfo> result = SourceJobParameters
.enforceClientIdConsistency(Collections.singletonList(target), "defaultId");
SourceJobParameters.TargetInfo firstResult = result.get(0);
assertEquals(firstResult.clientId, "defaultId");
}
@Test
public void shouldNotChangeSingleSourceWithClientId() {
SourceJobParameters.TargetInfo target = new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource").withQuery("select * from stream").withClientId("myClient").build();
List<SourceJobParameters.TargetInfo> result = SourceJobParameters
.enforceClientIdConsistency(Collections.singletonList(target), "defaultId");
SourceJobParameters.TargetInfo firstResult = result.get(0);
assertEquals(firstResult.clientId, "myClient");
}
@Test
public void shouldChangeSecondTargetId() {
SourceJobParameters.TargetInfo target = new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource").withQuery("select * from stream").withClientId("myClient").build();
SourceJobParameters.TargetInfo target2 = new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource").withQuery("select * from stream").withClientId("myClient").build();
List<SourceJobParameters.TargetInfo> result = SourceJobParameters
.enforceClientIdConsistency(Lists.newArrayList(target, target2), "defaultId");
assertEquals("myClient", result.get(0).clientId);
assertEquals("myClient_1", result.get(1).clientId);
}
@Test
public void shouldChangeSecondTargetIdWithDefaults() {
SourceJobParameters.TargetInfo target = new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource").withQuery("select * from stream").build();
SourceJobParameters.TargetInfo target2 = new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource").withQuery("select * from stream").build();
List<SourceJobParameters.TargetInfo> result = SourceJobParameters
.enforceClientIdConsistency(Lists.newArrayList(target, target2), "defaultId");
assertEquals("defaultId", result.get(0).clientId);
assertEquals("defaultId_1", result.get(1).clientId);
}
@Test
public void shouldNotImpactUnrelatedSource() {
SourceJobParameters.TargetInfo target = new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource").withQuery("select * from stream").withClientId("myClient").build();
SourceJobParameters.TargetInfo target2 = new SourceJobParameters.TargetInfoBuilder().withSourceJobName("TestSource").withQuery("select * from streamz").withClientId("myClient").build();
SourceJobParameters.TargetInfo target3 = new SourceJobParameters.TargetInfoBuilder().withSourceJobName("UnrelatedSource").withQuery("select * from streamzz").withClientId("myUnrelatedClient").build();
List<SourceJobParameters.TargetInfo> result = SourceJobParameters
.enforceClientIdConsistency(Lists.newArrayList(target, target2, target3), "defaultId");
assertEquals("myClient", result.get(0).clientId);
assertEquals("myClient_1", result.get(1).clientId);
assertEquals("myUnrelatedClient", result.get(2).clientId);
}
}
| 8,586 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/parameter/ParametersTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.parameter;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.junit.Before;
import org.junit.Test;
public class ParametersTest {
private Parameters parameters;
@Before
public void setup() {
final Set<String> requiredParameters = new HashSet<>();
final Set<String> parameterDefinitions = new HashSet<>();
final Map<String, Object> state = new HashMap<>();
// required parameter r1 -> v1
requiredParameters.add("r1");
parameterDefinitions.add("r1");
state.put("r1", "v1");
// required parameter r2 -> N/A
requiredParameters.add("r2");
parameterDefinitions.add("r2");
// optional parameter o1 -> v1
parameterDefinitions.add("o1");
state.put("o1", "v1");
// optional parameter o2 -> null
parameterDefinitions.add("o2");
state.put("o2", null);
parameters = new Parameters(state, requiredParameters, parameterDefinitions);
}
@Test
public void testGet() {
// Get required parameter r1 should succeed
assertEquals("v1", parameters.get("r1"));
// Get required parameter r2 should fail because it does not have a value
assertThrows(ParameterException.class, () -> parameters.get("r2"));
// Get optional parameter o1 should succeed
assertEquals("v1", parameters.get("o1"));
// Get optional parameter o2 should succeed with null value
assertNull(parameters.get("o2"));
// Get undefined parameter u1 should fail
assertThrows(ParameterException.class, () -> parameters.get("u1"));
}
@Test
public void testGetWithDefaultValue() {
// Get required parameter r1 should return value in state
assertEquals("v1", parameters.get("r1", "defaultValue"));
// Get required parameter r2 should return the provided default value
assertEquals("defaultValue", parameters.get("r2", "defaultValue"));
// Get optional parameter o1 should return value in state
assertEquals("v1", parameters.get("o1", "defaultValue"));
// Get optional parameter o2 should return the provided default value instead of null
assertEquals("defaultValue", parameters.get("o2", "defaultValue"));
// Get undefined parameter u1 should return the provided default value
assertEquals("defaultValue", parameters.get("u2", "defaultValue"));
}
static <T extends Throwable> void assertThrows(Class<T> expectedType, Runnable runnable) {
try {
runnable.run();
} catch (Throwable t) {
if (expectedType.isInstance(t)) {
return;
}
}
fail("Should have failed with exception class " + expectedType);
}
}
| 8,587 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/parameter/SinkParameterTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.parameter;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
public class SinkParameterTest {
@Test
public void testGenerateURI() throws Exception {
SinkParameters sps;
sps = new SinkParameters.Builder().withParameter("p1", "v1").withParameter("p2", "v2").withParameter("p3", "v3").build();
assertEquals("?p1=v1&p2=v2&p3=v3", sps.toString());
}
@Test
public void testGenerateURI2() throws Exception {
SinkParameters sps;
sps = new SinkParameters.Builder().withParameter("p1", "v1").withParameter("p2", null).withParameter("p3", "v3").build();
assertEquals("?p1=v1&p2=&p3=v3", sps.toString());
}
@Test
public void testGenerateURI3() throws Exception {
SinkParameters sps;
sps = new SinkParameters.Builder().withParameter("p1", "select esn, country where e[\"response.header.x-netflix.api-script-endpoint\"]==\"/account/geo\"").build();
assertEquals("?p1=select+esn%2C+country+where+e%5B%22response.header.x-netflix.api-script-endpoint%22%5D%3D%3D%22%2Faccount%2Fgeo%22", sps.toString());
}
@Test
public void testGenerateURI4() {
SinkParameters sps = new SinkParameters.Builder().build();
assertEquals("", sps.toString());
}
}
| 8,588 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/StageExecutorsTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.common.network.Endpoint;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.StageConfig;
import io.reactivex.mantis.remote.observable.ConnectToObservable;
import io.reactivex.mantis.remote.observable.EndpointChange;
import io.reactivex.mantis.remote.observable.EndpointInjector;
import io.reactivex.mantis.remote.observable.PortSelectorWithinRange;
import io.reactivex.mantis.remote.observable.RemoteObservable;
import io.reactivex.mantis.remote.observable.RemoteRxServer;
import io.reactivex.mantis.remote.observable.RxMetrics;
import java.util.Iterator;
import java.util.List;
import junit.framework.Assert;
import org.junit.Test;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Subscriber;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.subjects.BehaviorSubject;
public class StageExecutorsTest {
@SuppressWarnings( {"rawtypes", "unchecked"})
@Test
public void testExecuteSource() {
TestJob provider = new TestJob();
Job<Integer> job = provider.getJobInstance();
List<StageConfig<?, ?>> stages = job.getStages();
PortSelectorWithinRange portSelector = new PortSelectorWithinRange(8000, 9000);
int serverPort = portSelector.acquirePort();
WorkerPublisher producer = new WorkerPublisherRemoteObservable(serverPort, null,
Observable.just(1), null);
// execute source
BehaviorSubject<Integer> workersInStageOneObservable = BehaviorSubject.create(1);
StageExecutors.executeSource(0, job.getSource(), stages.get(0), producer,
new Context(), workersInStageOneObservable);
Iterator<Integer> iter = RemoteObservable.connect(new ConnectToObservable.Builder<Integer>()
.host("localhost")
.slotId("0")
.port(serverPort)
.decoder(Codecs.integer())
.build())
.getObservable()
.toBlocking()
.getIterator();
// verify numbers are doubled
Assert.assertEquals(0, iter.next().intValue());
Assert.assertEquals(1, iter.next().intValue());
Assert.assertEquals(4, iter.next().intValue());
Assert.assertEquals(9, iter.next().intValue());
}
@SuppressWarnings( {"rawtypes", "unchecked"})
@Test
public void testExecuteIntermediatStage() throws InterruptedException {
TestJob provider = new TestJob();
Job<Integer> job = provider.getJobInstance();
List<StageConfig<?, ?>> stages = job.getStages();
PortSelectorWithinRange portSelector = new PortSelectorWithinRange(8000, 9000);
final int publishPort = portSelector.acquirePort();
final int consumerPort = portSelector.acquirePort();
// mimic previous stage with a server
RemoteRxServer server1 = RemoteObservable.serve(consumerPort, Observable.range(0, 10), Codecs.integer());
server1.start();
EndpointInjector staticEndpoints = new EndpointInjector() {
@Override
public Observable<EndpointChange> deltas() {
return Observable.create(new OnSubscribe<EndpointChange>() {
@Override
public void call(Subscriber<? super EndpointChange> subscriber) {
subscriber.onNext(new EndpointChange(EndpointChange.Type.add, new Endpoint("localhost", consumerPort, "1")));
subscriber.onCompleted();
}
});
}
};
WorkerConsumer consumer = new WorkerConsumerRemoteObservable(null, staticEndpoints);
WorkerPublisher producer = new WorkerPublisherRemoteObservable(publishPort, null,
Observable.just(1), null);
// execute intermediate, flatten results
StageExecutors.executeIntermediate(consumer, stages.get(1), producer,
new Context());
Iterator<Integer> iter = RemoteObservable.connect(new ConnectToObservable.Builder<Integer>()
.host("localhost")
.slotId("0")
.port(publishPort)
.decoder(Codecs.integer())
.build())
.getObservable()
.toBlocking()
.getIterator();
// verify numbers are even
Assert.assertEquals(0, iter.next().intValue());
Assert.assertEquals(2, iter.next().intValue());
Assert.assertEquals(4, iter.next().intValue());
}
@SuppressWarnings( {"rawtypes", "unchecked"})
@Test
public void testExecuteSink() throws InterruptedException {
TestJob provider = new TestJob();
Job<Integer> job = provider.getJobInstance();
List<StageConfig<?, ?>> stages = job.getStages();
PortSelectorWithinRange portSelector = new PortSelectorWithinRange(8000, 9000);
final int consumerPort = portSelector.acquirePort();
// mimic previous stage with a server
RemoteRxServer server1 = RemoteObservable.serve(consumerPort, Observable.range(0, 10), Codecs.integer());
server1.start();
EndpointInjector staticEndpoints = new EndpointInjector() {
@Override
public Observable<EndpointChange> deltas() {
return Observable.create(new OnSubscribe<EndpointChange>() {
@Override
public void call(Subscriber<? super EndpointChange> subscriber) {
subscriber.onNext(new EndpointChange(EndpointChange.Type.add, new Endpoint("localhost", consumerPort, "1")));
subscriber.onCompleted();
}
});
}
};
Action0 noOpAction = new Action0() {
@Override
public void call() {}
};
Action1<Throwable> noOpError = new Action1<Throwable>() {
@Override
public void call(Throwable t) {}
};
WorkerConsumer consumer = new WorkerConsumerRemoteObservable(null, staticEndpoints);
// execute source
StageExecutors.executeSink(consumer, stages.get(1), job.getSink(), new TestPortSelector(), new RxMetrics(),
new Context(),
noOpAction, null, null, noOpAction, noOpError);
Iterator<Integer> iter = provider.getItemsWritten().iterator();
// verify numbers are even
Assert.assertEquals(0, iter.next().intValue());
Assert.assertEquals(2, iter.next().intValue());
Assert.assertEquals(4, iter.next().intValue());
}
}
| 8,589 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/Pair.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
public class Pair {
private String key;
private Integer value;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Pair(@JsonProperty("key") String key, @JsonProperty("value") Integer value) {
this.key = key;
this.value = value;
}
@Override
public String toString() {
return "Pair [key=" + key + ", value=" + value + "]";
}
public String getKey() {
return key;
}
public Integer getValue() {
return value;
}
}
| 8,590 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/TestJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.sink.Sink;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import rx.Observable;
import rx.functions.Action1;
import rx.functions.Func1;
public class TestJob extends MantisJobProvider<Integer> {
private final List<Integer> itemsWritten = new LinkedList<Integer>();
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new TestJob().getJobInstance());
}
public List<Integer> getItemsWritten() {
return itemsWritten;
}
@Override
public Job<Integer> getJobInstance() {
return MantisJob
.<Integer>
source(new Source<Integer>() {
@Override
public Observable<Observable<Integer>> call(Context t1,
Index t2) {
return Observable.just(Observable.range(0, 10));
}
@Override
public void close() throws IOException {
}
})
// doubles number
.stage(new ScalarComputation<Integer, Integer>() {
@Override
public Observable<Integer> call(Context context, Observable<Integer> t1) {
return t1.map(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t1) {
return t1 * t1;
}
});
}
}, new ScalarToScalar.Config<Integer, Integer>()
.codec(Codecs.integer()))
// return only even numbers
.stage(new ScalarComputation<Integer, Integer>() {
@Override
public Observable<Integer> call(Context context, Observable<Integer> t1) {
return t1.filter(new Func1<Integer, Boolean>() {
@Override
public Boolean call(Integer t1) {
return ((t1 % 2) == 0);
}
});
}
}, new ScalarToScalar.Config<Integer, Integer>()
.codec(Codecs.integer()))
.sink(new Sink<Integer>() {
@Override
public void call(Context context, PortRequest p, Observable<Integer> o) {
o
.toBlocking().forEach(new Action1<Integer>() {
@Override
public void call(Integer t1) {
System.out.println(t1);
itemsWritten.add(t1);
}
});
}
@Override
public void close() throws IOException {
}
})
.create();
}
}
| 8,591 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/StageExecutorsSingleStageTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.StageConfig;
import io.reactivex.mantis.remote.observable.RxMetrics;
import java.util.Iterator;
import junit.framework.Assert;
import org.junit.Test;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.subjects.BehaviorSubject;
public class StageExecutorsSingleStageTest {
@SuppressWarnings("rawtypes")
@Test
public void testSingleStageJob() {
Action0 noOpAction = new Action0() {
@Override
public void call() {}
};
Action1<Throwable> noOpError = new Action1<Throwable>() {
@Override
public void call(Throwable t) {}
};
TestJobSingleStage provider = new TestJobSingleStage();
Job job = provider.getJobInstance();
PortSelector portSelector = new PortSelectorInRange(8000, 9000);
StageConfig<?, ?> stage = (StageConfig<?, ?>) job.getStages().get(0);
BehaviorSubject<Integer> workersInStageOneObservable = BehaviorSubject.create(1);
StageExecutors.executeSingleStageJob(job.getSource(), stage,
job.getSink(), portSelector, new RxMetrics(), new Context(),
noOpAction, 0, workersInStageOneObservable, null, null, noOpAction, noOpError);
Iterator<Integer> iter = provider.getItemsWritten().iterator();
Assert.assertEquals(0, iter.next().intValue());
Assert.assertEquals(1, iter.next().intValue());
Assert.assertEquals(4, iter.next().intValue());
Assert.assertEquals(9, iter.next().intValue());
}
}
| 8,592 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/TestPortSelector.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
public class TestPortSelector implements PortSelector {
@Override
public int acquirePort() {
return -1;
}
}
| 8,593 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/LocalJobExecutorNetworkedTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.runtime.MachineDefinitions;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import java.util.Iterator;
import junit.framework.Assert;
import org.junit.Test;
public class LocalJobExecutorNetworkedTest {
@Test
public void testSingleStage() {
TestJobSingleStage provider = new TestJobSingleStage();
LocalJobExecutorNetworked.execute(provider.getJobInstance());
Iterator<Integer> iter = provider.getItemsWritten().iterator();
int count = 0;
while (iter.hasNext()) {
iter.next();
count++;
}
Assert.assertEquals(10, count);
}
@Test
public void testSingleStageMultiWorker() {
TestJobSingleStage provider = new TestJobSingleStage();
SchedulingInfo scheduling = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerStage(2, MachineDefinitions.micro())
.build();
LocalJobExecutorNetworked.execute(provider.getJobInstance(), scheduling);
Iterator<Integer> iter = provider.getItemsWritten().iterator();
int count = 0;
while (iter.hasNext()) {
iter.next();
count++;
}
Assert.assertEquals(20, count);
}
@Test
public void testTwoStage() {
TestJob provider = new TestJob();
LocalJobExecutorNetworked.execute(provider.getJobInstance());
Iterator<Integer> iter = provider.getItemsWritten().iterator();
Assert.assertEquals(0, iter.next().intValue());
Assert.assertEquals(4, iter.next().intValue());
Assert.assertEquals(16, iter.next().intValue());
}
@Test
public void testThreeStage() {
TestJobThreeStage provider = new TestJobThreeStage(); // 1 instance per stage
SchedulingInfo scheduling = new SchedulingInfo.Builder()
.numberOfStages(3)
.singleWorkerStage(MachineDefinitions.micro())
.singleWorkerStage(MachineDefinitions.micro())
.singleWorkerStage(MachineDefinitions.micro())
.build();
LocalJobExecutorNetworked.execute(provider.getJobInstance(), scheduling);
Iterator<Integer> iter = provider.getItemsWritten().iterator();
Assert.assertEquals(0, iter.next().intValue());
Assert.assertEquals(16, iter.next().intValue());
Assert.assertEquals(256, iter.next().intValue());
}
@Test
public void testThreeStageTopology1_2_1() {
TestJobThreeStage provider = new TestJobThreeStage(); // 1,2,1 topology
SchedulingInfo scheduling = new SchedulingInfo.Builder()
.numberOfStages(3)
.singleWorkerStage(MachineDefinitions.micro())
.multiWorkerStage(2, MachineDefinitions.micro())
.singleWorkerStage(MachineDefinitions.micro())
.build();
LocalJobExecutorNetworked.execute(provider.getJobInstance(), scheduling);
Iterator<Integer> iter = provider.getItemsWritten().iterator();
Assert.assertEquals(0, iter.next().intValue());
Assert.assertEquals(16, iter.next().intValue());
Assert.assertEquals(256, iter.next().intValue());
}
@Test
public void testThreeStageTopology2_1_1() {
TestJobThreeStage provider = new TestJobThreeStage(); // 2,1,1 topology
SchedulingInfo scheduling = new SchedulingInfo.Builder()
.numberOfStages(3)
.multiWorkerStage(2, MachineDefinitions.micro())
.singleWorkerStage(MachineDefinitions.micro())
.singleWorkerStage(MachineDefinitions.micro())
.build();
LocalJobExecutorNetworked.execute(provider.getJobInstance(), scheduling);
// with two source instances, should have double the expected
// input
Assert.assertEquals(10, provider.getItemsWritten().size());
}
@Test
public void testThreeStageTopology2_2_1() {
// Note, fails due to onComplete timing issue
TestJobThreeStage provider = new TestJobThreeStage(); // 2,1,1 topology
SchedulingInfo scheduling = new SchedulingInfo.Builder()
.numberOfStages(3)
.multiWorkerStage(2, MachineDefinitions.micro())
.multiWorkerStage(2, MachineDefinitions.micro())
.singleWorkerStage(MachineDefinitions.micro())
.build();
LocalJobExecutorNetworked.execute(provider.getJobInstance(), scheduling);
// with two source instances, should have double the expected
// input
Assert.assertEquals(10, provider.getItemsWritten().size());
}
}
| 8,594 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/StageExecutorsGroupByTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.common.network.Endpoint;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.StageConfig;
import io.reactivex.mantis.remote.observable.ConnectToGroupedObservable;
import io.reactivex.mantis.remote.observable.EndpointChange;
import io.reactivex.mantis.remote.observable.EndpointInjector;
import io.reactivex.mantis.remote.observable.PortSelectorWithinRange;
import io.reactivex.mantis.remote.observable.RemoteObservable;
import io.reactivex.mantis.remote.observable.RemoteRxServer;
import io.reactivex.mantis.remote.observable.ServeGroupedObservable;
import java.util.Iterator;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Subscriber;
import rx.functions.Func1;
import rx.observables.GroupedObservable;
import rx.subjects.BehaviorSubject;
public class StageExecutorsGroupByTest {
@SuppressWarnings( {"rawtypes", "unchecked"})
@Test
public void testExecuteSource() {
TestGroupByJob provider = new TestGroupByJob();
Job<Pair> job = provider.getJobInstance();
List<StageConfig<?, ?>> stages = job.getStages();
PortSelectorWithinRange portSelector = new PortSelectorWithinRange(8000, 9000);
int serverPort = portSelector.acquirePort();
WorkerPublisher producer = new WorkerPublisherRemoteObservable(serverPort, null,
Observable.just(1), null);
// execute source
BehaviorSubject<Integer> workersInStageOneObservable = BehaviorSubject.create(1);
StageExecutors.executeSource(0, job.getSource(), stages.get(0), producer,
new Context(), workersInStageOneObservable);
ConnectToGroupedObservable<String, Integer> config = new ConnectToGroupedObservable
.Builder<String, Integer>()
.slotId("0")
.host("localhost")
.port(serverPort)
.keyDecoder(Codecs.string())
.valueDecoder(Codecs.integer())
.build();
Iterator<GroupedObservable<String, Integer>> iter =
RemoteObservable.connect(config).getObservable()
.toBlocking()
.getIterator();
Assert.assertTrue(iter.hasNext());
// verify numbers are grouped by even/odd
GroupedObservable<String, Integer> even = iter.next(); // even is first due to zero
Assert.assertEquals("even", even.getKey());
Iterator<Integer> evenIter = even.toBlocking().getIterator();
Assert.assertEquals(0, evenIter.next().intValue());
Assert.assertEquals(2, evenIter.next().intValue());
Assert.assertEquals(4, evenIter.next().intValue());
Assert.assertEquals(6, evenIter.next().intValue());
GroupedObservable<String, Integer> odd = iter.next();
Assert.assertEquals("odd", odd.getKey());
Iterator<Integer> oddIter = odd.toBlocking().getIterator();
Assert.assertEquals(1, oddIter.next().intValue());
Assert.assertEquals(3, oddIter.next().intValue());
Assert.assertEquals(5, oddIter.next().intValue());
Assert.assertEquals(7, oddIter.next().intValue());
Assert.assertEquals(false, iter.hasNext()); // should only have two groups
}
@SuppressWarnings( {"rawtypes", "unchecked"})
@Test
public void testExecuteIntermediatStage() throws InterruptedException {
// Note, this test has a timing issue, client starts
// sending data before server is ready, resulting
// in a RST (connection reset by peer)
TestGroupByJob provider = new TestGroupByJob();
Job<Pair> job = provider.getJobInstance();
List<StageConfig<?, ?>> stages = job.getStages();
PortSelectorWithinRange portSelector = new PortSelectorWithinRange(8000, 9000);
final int publishPort = portSelector.acquirePort();
final int consumerPort = portSelector.acquirePort();
Observable<Observable<GroupedObservable<String, Integer>>> go = Observable.just(Observable.range(0, 10)
.groupBy(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
if ((t1 % 2) == 0) {
return "even";
} else {
return "odd";
}
}
}));
// mimic previous stage with a server
ServeGroupedObservable<String, Integer> config = new ServeGroupedObservable.Builder<String, Integer>()
.keyEncoder(Codecs.string())
.valueEncoder(Codecs.integer())
.observable(go)
.build();
RemoteRxServer server = new RemoteRxServer.Builder()
.addObservable(config)
.port(consumerPort)
.build();
server.start();
EndpointInjector staticEndpoints = new EndpointInjector() {
@Override
public Observable<EndpointChange> deltas() {
return Observable.create(new OnSubscribe<EndpointChange>() {
@Override
public void call(Subscriber<? super EndpointChange> subscriber) {
subscriber.onNext(new EndpointChange(EndpointChange.Type.add, new Endpoint("localhost", consumerPort, "0")));
subscriber.onNext(new EndpointChange(EndpointChange.Type.add, new Endpoint("localhost", consumerPort, "1")));
subscriber.onCompleted();
}
});
}
};
WorkerConsumer consumer = new WorkerConsumerRemoteObservable(null, staticEndpoints);
WorkerPublisher producer = new WorkerPublisherRemoteObservable(publishPort, null,
Observable.just(1), null);
// execute source
StageExecutors.executeIntermediate(consumer, stages.get(1), producer,
new Context());
ConnectToGroupedObservable<String, Integer> connectConfig = new ConnectToGroupedObservable
.Builder<String, Integer>()
.host("localhost")
.port(publishPort)
.keyDecoder(Codecs.string())
.valueDecoder(Codecs.integer())
.build();
Iterator<GroupedObservable<String, Integer>> iter =
RemoteObservable.connect(connectConfig).getObservable()
.toBlocking()
.getIterator();
// verify numbers are grouped by even/odd
GroupedObservable<String, Integer> even = iter.next(); // even is first due to zero
Assert.assertEquals("even", even.getKey());
Iterator<Integer> evenIter = even.toBlocking().getIterator();
Assert.assertEquals(0, evenIter.next().intValue());
Assert.assertEquals(4, evenIter.next().intValue());
Assert.assertEquals(16, evenIter.next().intValue());
Assert.assertEquals(36, evenIter.next().intValue());
GroupedObservable<String, Integer> odd = iter.next();
Assert.assertEquals("odd", odd.getKey());
Iterator<Integer> oddIter = odd.toBlocking().getIterator();
Assert.assertEquals(1, oddIter.next().intValue());
Assert.assertEquals(9, oddIter.next().intValue());
Assert.assertEquals(25, oddIter.next().intValue());
Assert.assertEquals(49, oddIter.next().intValue());
Assert.assertEquals(false, iter.hasNext()); // should only have two groups
}
}
| 8,595 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/TestJobSingleStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.sink.Sink;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import rx.Observable;
import rx.functions.Action1;
import rx.functions.Func1;
public class TestJobSingleStage extends MantisJobProvider<Integer> {
private List<Integer> itemsWritten = new LinkedList<Integer>();
public static void main(String[] args) throws InterruptedException {
LocalJobExecutorNetworked.execute(new TestJobSingleStage().getJobInstance());
}
public List<Integer> getItemsWritten() {
return itemsWritten;
}
@Override
public Job<Integer> getJobInstance() {
return MantisJob
.<Integer>
source(new Source<Integer>() {
@Override
public Observable<Observable<Integer>> call(Context t1,
Index t2) {
return Observable.just(Observable.range(0, 10));
}
@Override
public void close() throws IOException {
}
})
// doubles number
.stage(new ScalarComputation<Integer, Integer>() {
@Override
public Observable<Integer> call(Context context, Observable<Integer> t1) {
return t1.map(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t1) {
return t1 * t1;
}
});
}
}, new ScalarToScalar.Config<Integer, Integer>()
.codec(Codecs.integer()))
.sink(new Sink<Integer>() {
@Override
public void call(Context context,
PortRequest p,
Observable<Integer> o) {
o
.toBlocking().forEach(new Action1<Integer>() {
@Override
public void call(Integer t1) {
System.out.println(t1);
itemsWritten.add(t1);
}
});
}
@Override
public void close() {
}
})
.create();
}
}
| 8,596 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/TestJobParameterized.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.sink.Sink;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import rx.Observable;
import rx.functions.Action1;
import rx.functions.Func1;
public class TestJobParameterized extends MantisJobProvider<Integer> {
private List<Integer> itemsWritten = new LinkedList<Integer>();
public static void main(String[] args) throws InterruptedException {
Job<Integer> job = new TestJobParameterized().getJobInstance();
LocalJobExecutorNetworked.execute(job,
new Parameter("start-range", "1"),
new Parameter("end-range", "100"),
new Parameter("scale-by", "2"));
}
public List<Integer> getItemsWritten() {
return itemsWritten;
}
@Override
public Job<Integer> getJobInstance() {
return MantisJob
.<Integer>
source(new Source<Integer>() {
@Override
public Observable<Observable<Integer>> call(Context context,
Index index) {
Integer start = (Integer) context.getParameters().get("start-range");
Integer end = (Integer) context.getParameters().get("end-range");
return Observable.just(Observable.range(start, end));
}
@Override
public void close() throws IOException {
}
})
// doubles number
.stage(new ScalarComputation<Integer, Integer>() {
@Override
public Observable<Integer> call(Context context, Observable<Integer> t1) {
final Integer scale = (Integer) context.getParameters().get("scale-by");
return t1.map(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t1) {
return t1 * scale;
}
});
}
}, new ScalarToScalar.Config<Integer, Integer>()
.codec(Codecs.integer()))
.sink(new Sink<Integer>() {
@Override
public void init(Context context) {
System.out.println("sink init called");
}
@Override
public void call(Context context,
PortRequest p,
Observable<Integer> o) {
final String message = (String) context.getParameters().get("sink-message");
o
.toBlocking().forEach(new Action1<Integer>() {
@Override
public void call(Integer t1) {
System.out.println(message + t1);
itemsWritten.add(t1);
}
});
}
@Override
public void close() {
}
})
.metadata(new Metadata.Builder()
.name("test job")
.description("showcase parameters")
.build())
.parameterDefinition(new IntParameter()
.name("start-range")
.validator(Validators.range(0, 100))
.build())
.parameterDefinition(new IntParameter()
.name("end-range")
.validator(Validators.range(100, 1000))
.build())
.parameterDefinition(new IntParameter()
.name("scale-by")
.description("scale each value from the range")
.validator(Validators.range(1, 10))
.build())
.parameterDefinition(new StringParameter()
.name("sink-message")
.defaultValue("hello: ")
.description("concat with results")
.validator(Validators.notNullOrEmpty())
.build())
.create();
}
}
| 8,597 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/TestJobThreeStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.sink.Sink;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import rx.Observable;
import rx.functions.Action1;
import rx.functions.Func1;
public class TestJobThreeStage extends MantisJobProvider<Integer> {
private List<Integer> itemsWritten = new LinkedList<Integer>();
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new TestJobThreeStage().getJobInstance());
}
public List<Integer> getItemsWritten() {
return itemsWritten;
}
@Override
public Job<Integer> getJobInstance() {
return MantisJob
.<Integer>
source(new Source<Integer>() {
@Override
public Observable<Observable<Integer>> call(Context t1,
Index t2) {
return Observable.just(Observable.range(0, 10));
}
@Override
public void close() throws IOException {
}
})
// doubles number
.stage(new ScalarComputation<Integer, Integer>() {
@Override
public Observable<Integer> call(Context context, Observable<Integer> t1) {
return t1.map(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t1) {
return t1 * t1;
}
});
}
}, new ScalarToScalar.Config<Integer, Integer>()
.codec(Codecs.integer()))
.stage(new ScalarComputation<Integer, Integer>() {
@Override
public Observable<Integer> call(Context context, Observable<Integer> t1) {
return t1.map(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t1) {
return t1 * t1;
}
});
}
}, new ScalarToScalar.Config<Integer, Integer>()
.codec(Codecs.integer()))
// return only even numbers
.stage(new ScalarComputation<Integer, Integer>() {
@Override
public Observable<Integer> call(Context context, Observable<Integer> t1) {
return t1.filter(new Func1<Integer, Boolean>() {
@Override
public Boolean call(Integer t1) {
return ((t1 % 2) == 0);
}
});
}
}, new ScalarToScalar.Config<Integer, Integer>()
.codec(Codecs.integer()))
.sink(new Sink<Integer>() {
@Override
public void call(Context context,
PortRequest p,
Observable<Integer> o) {
o
.toBlocking().forEach(new Action1<Integer>() {
@Override
public void call(Integer t1) {
System.out.println(t1);
itemsWritten.add(t1);
}
});
}
@Override
public void close() {
}
})
.create();
}
}
| 8,598 |
0 | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-runtime/src/test/java/io/mantisrx/runtime/executor/TestGroupByJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.executor;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.KeyToKey;
import io.mantisrx.runtime.KeyToScalar;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.ScalarToKey;
import io.mantisrx.runtime.codec.JacksonCodecs;
import io.mantisrx.runtime.computation.KeyComputation;
import io.mantisrx.runtime.computation.ToKeyComputation;
import io.mantisrx.runtime.computation.ToScalarComputation;
import io.mantisrx.runtime.sink.Sink;
import io.mantisrx.runtime.source.Sources;
import io.reactivx.mantis.operators.GroupedObservableUtils;
import java.util.LinkedList;
import java.util.List;
import rx.Observable;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.observables.GroupedObservable;
import rx.schedulers.Schedulers;
public class TestGroupByJob extends MantisJobProvider<Pair> {
private List<Pair> itemsWritten = new LinkedList<>();
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new TestGroupByJob().getJobInstance());
}
public List<Pair> getItemsWritten() {
return itemsWritten;
}
@Override
public Job<Pair> getJobInstance() {
return MantisJob
.<Integer>
source(Sources.observable(Observable.range(0, 100).subscribeOn(Schedulers.io())))
// group by even/odd
.stage(new ToKeyComputation<Integer, String, Integer>() {
@Override
public Observable<GroupedObservable<String, Integer>> call(
Context context,
Observable<Integer> t1) {
return t1.groupBy(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
if ((t1 % 2) == 0) {
return "even";
} else {
return "odd";
}
}
});
}
}, new ScalarToKey.Config<Integer, String, Integer>()
.codec(Codecs.integer()))
// double numbers
.stage(new KeyComputation<String, Integer, String, Integer>() {
@Override
public Observable<GroupedObservable<String, Integer>> call(
Context context,
final GroupedObservable<String, Integer> group) {
// return group with doubled numbers
return
Observable.just(GroupedObservableUtils
.createGroupedObservable(group.getKey(),
group.map(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t1) {
return t1 * t1;
}
})));
}
}, new KeyToKey.Config<String, Integer, String, Integer>()
.codec(Codecs.integer()))
// create a new type
.stage(new ToScalarComputation<String, Integer, Pair>() {
@Override
public Observable<Pair> call(Context t1,
final GroupedObservable<String, Integer> group) {
System.out.println("group computation running on thread: " + Thread.currentThread().getName() + " group: " + group.getKey());
return group.map(new Func1<Integer, Pair>() {
@Override
public Pair call(Integer t1) {
return new Pair(group.getKey(), t1);
}
});
}
}, new KeyToScalar.Config<String, Integer, Pair>()
.codec(JacksonCodecs.pojo(Pair.class)))
.sink(new Sink<Pair>() {
@Override
public void call(Context t1, PortRequest p, Observable<Pair> o) {
o.toBlocking()
.forEach(new Action1<Pair>() {
@Override
public void call(Pair pair) {
System.out.println(pair);
itemsWritten.add(pair);
}
});
}
@Override
public void close() {
}
})
.create();
}
}
| 8,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.