index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/fork/InstrumentedForkOperator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.fork;
import java.util.List;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Instrumented {@link org.apache.gobblin.fork.ForkOperator} automatically capturing certain metrics.
* Subclasses should implement forkDataRecordImpl instead of forkDataRecord.
*
* @author ibuenros
*/
public abstract class InstrumentedForkOperator<S, D> extends InstrumentedForkOperatorBase<S, D> {
@Override
public final List<Boolean> forkDataRecord(WorkUnitState workUnitState, D input) {
return super.forkDataRecord(workUnitState, input);
}
}
| 4,500 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/fork/InstrumentedForkOperatorBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.fork;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.fork.ForkOperator;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.MetricNames;
import org.apache.gobblin.metrics.Tag;
/**
* Package-private implementation of instrumentation for {@link org.apache.gobblin.fork.ForkOperator}.
*
* @see org.apache.gobblin.instrumented.fork.InstrumentedForkOperator for extensible class.
*/
abstract class InstrumentedForkOperatorBase<S, D> implements Instrumentable, ForkOperator<S, D> {
private boolean instrumentationEnabled;
private MetricContext metricContext;
private Optional<Class<?>> classTag;
private Optional<Meter> inputMeter;
private Optional<Meter> outputForks;
private Optional<Timer> forkOperatorTimer;
protected final Closer closer;
public InstrumentedForkOperatorBase() {
this(Optional.<Class<?>>absent());
}
protected InstrumentedForkOperatorBase(Optional<Class<?>> classTag) {
this.closer = Closer.create();
this.classTag = classTag;
regenerateMetrics();
}
@Override
public void init(WorkUnitState workUnitState) throws Exception {
init(workUnitState, this.getClass());
}
protected void init(WorkUnitState workUnitState, Class<?> classTag) throws Exception {
this.instrumentationEnabled = GobblinMetrics.isEnabled(workUnitState);
this.metricContext = this.closer.register(Instrumented.getMetricContext(workUnitState, this.classTag.or(classTag)));
regenerateMetrics();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
this.metricContext = this.closer
.register(Instrumented.newContextFromReferenceContext(this.metricContext, tags, Optional.<String>absent()));
regenerateMetrics();
}
@Override
public void switchMetricContext(MetricContext context) {
this.metricContext = context;
regenerateMetrics();
}
/**
* Generates metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
if (isInstrumentationEnabled()) {
this.inputMeter = Optional.of(this.metricContext.meter(MetricNames.ForkOperatorMetrics.RECORDS_IN_METER));
this.outputForks = Optional.of(this.metricContext.meter(MetricNames.ForkOperatorMetrics.FORKS_OUT_METER));
this.forkOperatorTimer = Optional.<Timer>of(this.metricContext.timer(MetricNames.ForkOperatorMetrics.FORK_TIMER));
} else {
this.inputMeter = Optional.absent();
this.outputForks = Optional.absent();
this.forkOperatorTimer = Optional.absent();
}
}
/**
* Default with no additional tags
*/
@Override
public List<Tag<?>> generateTags(State state) {
return Lists.newArrayList();
}
@Override
public boolean isInstrumentationEnabled() {
return this.instrumentationEnabled;
}
@Override
public List<Boolean> forkDataRecord(WorkUnitState workUnitState, D input) {
if (!isInstrumentationEnabled()) {
return forkDataRecordImpl(workUnitState, input);
}
long startTimeNanos = System.nanoTime();
beforeFork(input);
List<Boolean> result = forkDataRecordImpl(workUnitState, input);
afterFork(result, startTimeNanos);
return result;
}
/**
* Called before forkDataRecord.
*
* @param input an input data record
*/
protected void beforeFork(D input) {
Instrumented.markMeter(this.inputMeter);
}
/**
* Called after forkDataRecord.
*
* @param forks result from forkDataRecord.
* @param startTimeNanos start time of forkDataRecord.
*/
protected void afterFork(List<Boolean> forks, long startTimeNanos) {
int forksGenerated = 0;
for (Boolean fork : forks) {
forksGenerated += fork ? 1 : 0;
}
Instrumented.markMeter(this.outputForks, forksGenerated);
Instrumented.updateTimer(this.forkOperatorTimer, System.nanoTime() - startTimeNanos, TimeUnit.NANOSECONDS);
}
/**
* Subclasses should implement this instead of {@link org.apache.gobblin.fork.ForkOperator#forkDataRecord}.
*/
public abstract List<Boolean> forkDataRecordImpl(WorkUnitState workUnitState, D input);
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
@Override
public void close()
throws IOException {
this.closer.close();
}
}
| 4,501 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/fork/InstrumentedForkOperatorDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.fork;
import java.util.List;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.fork.ForkOperator;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.DecoratorUtils;
/**
* Decorator that automatically instruments {@link org.apache.gobblin.fork.ForkOperator}.
* Handles already instrumented {@link org.apache.gobblin.instrumented.fork.InstrumentedForkOperator}
* appropriately to avoid double metric reporting.
*/
public class InstrumentedForkOperatorDecorator<S, D> extends InstrumentedForkOperatorBase<S, D> implements Decorator {
private ForkOperator<S, D> embeddedForkOperator;
private boolean isEmbeddedInstrumented;
public InstrumentedForkOperatorDecorator(ForkOperator<S, D> forkOperator) {
super(Optional.<Class<?>> of(DecoratorUtils.resolveUnderlyingObject(forkOperator).getClass()));
this.embeddedForkOperator = this.closer.register(forkOperator);
this.isEmbeddedInstrumented = Instrumented.isLineageInstrumented(forkOperator);
}
@Override
public void init(WorkUnitState workUnitState) throws Exception {
this.embeddedForkOperator.init(workUnitState);
super.init(workUnitState, DecoratorUtils.resolveUnderlyingObject(this).getClass());
}
@Override
public MetricContext getMetricContext() {
return this.isEmbeddedInstrumented
? ((InstrumentedForkOperatorBase<S, D>) this.embeddedForkOperator).getMetricContext()
: super.getMetricContext();
}
@Override
public List<Boolean> forkDataRecord(WorkUnitState workUnitState, D input) {
return this.isEmbeddedInstrumented ? forkDataRecordImpl(workUnitState, input)
: super.forkDataRecord(workUnitState, input);
}
@Override
public List<Boolean> forkDataRecordImpl(WorkUnitState workUnitState, D input) {
return this.embeddedForkOperator.forkDataRecord(workUnitState, input);
}
@Override
public int getBranches(WorkUnitState workUnitState) {
return this.embeddedForkOperator.getBranches(workUnitState);
}
@Override
public List<Boolean> forkSchema(WorkUnitState workUnitState, S input) {
return this.embeddedForkOperator.forkSchema(workUnitState, input);
}
@Override
public Object getDecoratedObject() {
return this.embeddedForkOperator;
}
}
| 4,502 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/qualitychecker/InstrumentedRowLevelPolicyBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.qualitychecker;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.MetricNames;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicy;
/**
* package-private implementation of instrumentation for {@link org.apache.gobblin.qualitychecker.row.RowLevelPolicy}.
* See {@link org.apache.gobblin.instrumented.qualitychecker.InstrumentedRowLevelPolicy} for extensible class.
*/
abstract class InstrumentedRowLevelPolicyBase extends RowLevelPolicy implements Instrumentable, Closeable {
private final boolean instrumentationEnabled;
private MetricContext metricContext;
private Optional<Meter> recordsMeter;
private Optional<Meter> passedRecordsMeter;
private Optional<Meter> failedRecordsMeter;
private Optional<Timer> policyTimer;
protected final Closer closer;
public InstrumentedRowLevelPolicyBase(State state, Type type) {
this(state, type, Optional.<Class<?>>absent());
}
protected InstrumentedRowLevelPolicyBase(State state, Type type, Optional<Class<?>> classTag) {
super(state, type);
this.instrumentationEnabled = GobblinMetrics.isEnabled(state);
this.closer = Closer.create();
this.metricContext =
this.closer.register(Instrumented.getMetricContext(state, classTag.or(this.getClass())));
regenerateMetrics();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
this.metricContext = this.closer.register(Instrumented.newContextFromReferenceContext(this.metricContext, tags,
Optional.<String>absent()));
regenerateMetrics();
}
@Override
public void switchMetricContext(MetricContext context) {
this.metricContext = context;
regenerateMetrics();
}
/**
* Generates metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
if(isInstrumentationEnabled()) {
this.recordsMeter = Optional.of(this.metricContext.meter(MetricNames.RowLevelPolicyMetrics.RECORDS_IN_METER));
this.passedRecordsMeter = Optional.of(
this.metricContext.meter(MetricNames.RowLevelPolicyMetrics.RECORDS_PASSED_METER));
this.failedRecordsMeter = Optional.of(
this.metricContext.meter(MetricNames.RowLevelPolicyMetrics.RECORDS_FAILED_METER));
this.policyTimer = Optional.<Timer>of(
this.metricContext.timer(MetricNames.RowLevelPolicyMetrics.CHECK_TIMER));
} else {
this.recordsMeter = Optional.absent();
this.passedRecordsMeter = Optional.absent();
this.failedRecordsMeter = Optional.absent();
this.policyTimer = Optional.absent();
}
}
/** Default with no additional tags */
@Override
public List<Tag<?>> generateTags(State state) {
return Lists.newArrayList();
}
@Override
public boolean isInstrumentationEnabled() {
return this.instrumentationEnabled;
}
@Override
public Result executePolicy(Object record) {
if(!isInstrumentationEnabled()) {
return executePolicyImpl(record);
}
long startTime = System.nanoTime();
beforeCheck(record);
Result result = executePolicyImpl(record);
afterCheck(result, startTime);
return result;
}
/**
* Called before check is run.
* @param record
*/
public void beforeCheck(Object record) {
Instrumented.markMeter(this.recordsMeter);
}
/**
* Called after check is run.
* @param result result from check.
* @param startTimeNanos start time of check.
*/
public void afterCheck(Result result, long startTimeNanos) {
switch (result) {
case FAILED:
Instrumented.markMeter(this.failedRecordsMeter);
break;
case PASSED:
Instrumented.markMeter(this.passedRecordsMeter);
break;
default:
}
Instrumented.updateTimer(this.policyTimer, System.nanoTime() - startTimeNanos, TimeUnit.NANOSECONDS);
}
/**
* Subclasses should implement this instead of {@link org.apache.gobblin.qualitychecker.row.RowLevelPolicy#executePolicy}.
*/
public abstract Result executePolicyImpl(Object record);
@Override
public void close()
throws IOException {
this.closer.close();
}
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
}
| 4,503 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/qualitychecker/InstrumentedRowLevelPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.qualitychecker;
import org.apache.gobblin.configuration.State;
/**
* Instrumented {@link org.apache.gobblin.qualitychecker.row.RowLevelPolicy} automatically capturing certain metrics.
* Subclasses should implement executePolicyImpl instead of executePolicy.
*
* @author ibuenros
*/
public abstract class InstrumentedRowLevelPolicy extends InstrumentedRowLevelPolicyBase {
public InstrumentedRowLevelPolicy(State state, Type type) {
super(state, type);
}
@Override
public final Result executePolicy(Object record) {
return super.executePolicy(record);
}
}
| 4,504 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/qualitychecker/InstrumentedRowLevelPolicyDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.qualitychecker;
import java.io.IOException;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.qualitychecker.row.RowLevelPolicy;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.DecoratorUtils;
/**
* Decorator that automatically instruments {@link org.apache.gobblin.qualitychecker.row.RowLevelPolicy}.
* Handles already instrumented {@link org.apache.gobblin.instrumented.qualitychecker.InstrumentedRowLevelPolicy}
* appropriately to avoid double metric reporting.
*/
public class InstrumentedRowLevelPolicyDecorator extends InstrumentedRowLevelPolicyBase implements Decorator {
private RowLevelPolicy embeddedPolicy;
private boolean isEmbeddedInstrumented;
public InstrumentedRowLevelPolicyDecorator(RowLevelPolicy policy) {
super(policy.getTaskState(), policy.getType(),
Optional.<Class<?>> of(DecoratorUtils.resolveUnderlyingObject(policy).getClass()));
this.embeddedPolicy = policy;
this.isEmbeddedInstrumented = Instrumented.isLineageInstrumented(policy);
}
@Override
public MetricContext getMetricContext() {
return this.isEmbeddedInstrumented ? ((InstrumentedRowLevelPolicyBase) this.embeddedPolicy).getMetricContext()
: super.getMetricContext();
}
@Override
public Result executePolicy(Object record) {
return this.isEmbeddedInstrumented ? executePolicyImpl(record) : super.executePolicy(record);
}
@Override
public Result executePolicyImpl(Object record) {
return this.embeddedPolicy.executePolicy(record);
}
@Override
public void close() throws IOException {
this.embeddedPolicy.close();
}
@Override
public Object getDecoratedObject() {
return this.embeddedPolicy;
}
@Override
public State getFinalState() {
return this.embeddedPolicy.getFinalState();
}
}
| 4,505 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/converter/InstrumentedConverterDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.converter;
import java.io.IOException;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.records.ControlMessageHandler;
import org.apache.gobblin.records.RecordStreamWithMetadata;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.DecoratorUtils;
/**
* Decorator that automatically instruments {@link org.apache.gobblin.converter.Converter}.
* Handles already instrumented {@link org.apache.gobblin.instrumented.converter.InstrumentedConverter}
* appropriately to avoid double metric reporting.
*/
public class InstrumentedConverterDecorator<SI, SO, DI, DO> extends InstrumentedConverterBase<SI, SO, DI, DO>
implements Decorator {
private Converter<SI, SO, DI, DO> embeddedConverter;
private final boolean isEmbeddedInstrumented;
public InstrumentedConverterDecorator(Converter<SI, SO, DI, DO> converter) {
this.embeddedConverter = converter;
this.isEmbeddedInstrumented = Instrumented.isLineageInstrumented(converter);
}
@Override
public Converter<SI, SO, DI, DO> init(WorkUnitState workUnit) {
this.embeddedConverter = this.embeddedConverter.init(workUnit);
return super.init(workUnit, DecoratorUtils.resolveUnderlyingObject(this).getClass());
}
@Override
public MetricContext getMetricContext() {
return this.isEmbeddedInstrumented
? ((InstrumentedConverterBase<SI, SO, DI, DO>) this.embeddedConverter).getMetricContext()
: super.getMetricContext();
}
@Override
public Iterable<DO> convertRecord(SO outputSchema, DI inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return this.isEmbeddedInstrumented ? convertRecordImpl(outputSchema, inputRecord, workUnit)
: super.convertRecord(outputSchema, inputRecord, workUnit);
}
@Override
public Iterable<DO> convertRecordImpl(SO outputSchema, DI inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return this.embeddedConverter.convertRecord(outputSchema, inputRecord, workUnit);
}
@Override
public SO convertSchema(SI inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return this.embeddedConverter.convertSchema(inputSchema, workUnit);
}
@Override
public void close() throws IOException {
this.embeddedConverter.close();
}
@Override
public State getFinalState() {
return this.embeddedConverter.getFinalState();
}
@Override
public Object getDecoratedObject() {
return this.embeddedConverter;
}
/**
* This workarounds the issue that {@link Converter#processStream(RecordStreamWithMetadata, WorkUnitState)} will invoke
* {@link org.apache.gobblin.converter.AsyncConverter1to1#convertRecord(Object, Object, WorkUnitState)} directly, which is an unsupported method.
*/
@Override
public RecordStreamWithMetadata<DO, SO> processStream(RecordStreamWithMetadata<DI, SI> inputStream,
WorkUnitState workUnitState) throws SchemaConversionException {
return this.embeddedConverter.processStream(inputStream, workUnitState);
}
@Override
public ControlMessageHandler getMessageHandler() {
return this.embeddedConverter.getMessageHandler();
}
}
| 4,506 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/converter/InstrumentedConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.converter;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
/**
* Instrumented converter that automatically captures certain metrics.
* Subclasses should implement convertRecordImpl instead of convertRecord.
*
* See {@link org.apache.gobblin.converter.Converter}.
*
* @author ibuenros
*/
public abstract class InstrumentedConverter<SI, SO, DI, DO> extends InstrumentedConverterBase<SI, SO, DI, DO> {
@Override
public final Iterable<DO> convertRecord(SO outputSchema, DI inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return super.convertRecord(outputSchema, inputRecord, workUnit);
}
}
| 4,507 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/converter/InstrumentedConverterBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.converter;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.MetricNames;
import org.apache.gobblin.metrics.Tag;
/**
* package-private implementation of instrumentation for {@link org.apache.gobblin.converter.Converter}.
* See {@link org.apache.gobblin.instrumented.converter.InstrumentedConverter} for extensible class.
*/
abstract class InstrumentedConverterBase<SI, SO, DI, DO> extends Converter<SI, SO, DI, DO>
implements Instrumentable, Closeable {
private boolean instrumentationEnabled = false;
private MetricContext metricContext;
private Optional<Meter> recordsInMeter = Optional.absent();
private Optional<Meter> recordsOutMeter = Optional.absent();
private Optional<Meter> recordsExceptionMeter = Optional.absent();
private Optional<Timer> converterTimer = Optional.absent();
protected final Closer closer = Closer.create();
@Override
public Converter<SI, SO, DI, DO> init(WorkUnitState workUnit) {
return init(workUnit, this.getClass());
}
protected Converter<SI, SO, DI, DO> init(WorkUnitState workUnit, Class<?> classTag) {
Converter<SI, SO, DI, DO> converter = super.init(workUnit);
this.instrumentationEnabled = GobblinMetrics.isEnabled(workUnit);
this.metricContext = this.closer.register(Instrumented.getMetricContext(workUnit, classTag));
regenerateMetrics();
return converter;
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
this.metricContext = this.closer.register(Instrumented.newContextFromReferenceContext(this.metricContext, tags,
Optional.<String>absent()));
regenerateMetrics();
}
@Override
public void switchMetricContext(MetricContext context) {
this.metricContext = context;
regenerateMetrics();
}
/**
* Generates metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
if (isInstrumentationEnabled()) {
this.recordsInMeter = Optional.of(this.metricContext.meter(MetricNames.ConverterMetrics.RECORDS_IN_METER));
this.recordsOutMeter = Optional.of(this.metricContext.meter(MetricNames.ConverterMetrics.RECORDS_OUT_METER));
this.recordsExceptionMeter = Optional.of(
this.metricContext.meter(MetricNames.ConverterMetrics.RECORDS_FAILED_METER));
this.converterTimer = Optional.<Timer>of(this.metricContext.timer(MetricNames.ConverterMetrics.CONVERT_TIMER));
}
}
/** Default with no additional tags */
@Override
public List<Tag<?>> generateTags(State state) {
return Lists.newArrayList();
}
@Override
public boolean isInstrumentationEnabled() {
return this.instrumentationEnabled;
}
@Override
public Iterable<DO> convertRecord(SO outputSchema, DI inputRecord, WorkUnitState workUnit)
throws DataConversionException {
if(!isInstrumentationEnabled()) {
return convertRecordImpl(outputSchema, inputRecord, workUnit);
}
try {
long startTime = System.nanoTime();
beforeConvert(outputSchema, inputRecord, workUnit);
final Iterable<DO> it = convertRecordImpl(outputSchema, inputRecord, workUnit);
afterConvert(it, startTime);
return Iterables.transform(it, new Function<DO, DO>() {
@Override
public DO apply(DO input) {
onIterableNext(input);
return input;
}
});
} catch(DataConversionException exception) {
onException(exception);
throw exception;
}
}
/**
* Called before conversion.
* @param outputSchema output schema of the {@link #convertSchema(Object, WorkUnitState)} method
* @param inputRecord an input data record
* @param workUnit a {@link WorkUnitState} instance
*/
public void beforeConvert(SO outputSchema, DI inputRecord, WorkUnitState workUnit) {
Instrumented.markMeter(this.recordsInMeter);
}
/**
* Called after conversion.
* @param iterable conversion result.
* @param startTimeNanos start time of conversion.
*/
public void afterConvert(Iterable<DO> iterable, long startTimeNanos) {
Instrumented.updateTimer(this.converterTimer, System.nanoTime() - startTimeNanos, TimeUnit.NANOSECONDS);
}
/**
* Called every time next() method in iterable is called.
* @param next next value in iterable.
*/
public void onIterableNext(DO next) {
Instrumented.markMeter(this.recordsOutMeter);
}
/**
* Called when converter throws an exception.
* @param exception exception thrown.
*/
public void onException(Exception exception) {
if(DataConversionException.class.isInstance(exception)) {
Instrumented.markMeter(this.recordsExceptionMeter);
}
}
/**
* Subclasses should implement this method instead of convertRecord.
*
* See {@link org.apache.gobblin.converter.Converter#convertRecord}.
*/
public abstract Iterable<DO> convertRecordImpl(SO outputSchema, DI inputRecord, WorkUnitState workUnit)
throws DataConversionException;
@Override
public void close()
throws IOException {
this.closer.close();
}
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
}
| 4,508 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/extractor/InstrumentedExtractorBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.extractor;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metadata.GlobalMetadata;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.MetricNames;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.records.RecordStreamWithMetadata;
import org.apache.gobblin.runtime.JobShutdownException;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.stream.StreamEntity;
import org.apache.gobblin.util.FinalState;
import edu.umd.cs.findbugs.annotations.SuppressWarnings;
import io.reactivex.Emitter;
import io.reactivex.Flowable;
import io.reactivex.functions.BiConsumer;
import javax.annotation.Nullable;
/**
* package-private implementation of instrumentation for {@link org.apache.gobblin.source.extractor.Extractor}.
* See {@link org.apache.gobblin.instrumented.extractor.InstrumentedExtractor} for extensible class.
*/
public abstract class InstrumentedExtractorBase<S, D>
implements Extractor<S, D>, Instrumentable, Closeable, FinalState {
private final boolean instrumentationEnabled;
private MetricContext metricContext;
private Optional<Meter> readRecordsMeter;
private Optional<Meter> dataRecordExceptionsMeter;
private Optional<Timer> extractorTimer;
protected final Closer closer;
public InstrumentedExtractorBase(WorkUnitState workUnitState) {
this(workUnitState, Optional.<Class<?>> absent());
}
protected InstrumentedExtractorBase(WorkUnitState workUnitState, Optional<Class<?>> classTag) {
super();
this.closer = Closer.create();
this.instrumentationEnabled = GobblinMetrics.isEnabled(workUnitState);
this.metricContext = this.closer.register(
Instrumented.getMetricContext(workUnitState, classTag.or(this.getClass()), generateTags(workUnitState)));
regenerateMetrics();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
this.metricContext = this.closer
.register(Instrumented.newContextFromReferenceContext(this.metricContext, tags, Optional.<String> absent()));
regenerateMetrics();
}
@Override
public void switchMetricContext(MetricContext context) {
this.metricContext = context;
regenerateMetrics();
}
/**
* Generates metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
if (isInstrumentationEnabled()) {
this.readRecordsMeter = Optional.of(this.metricContext.meter(MetricNames.ExtractorMetrics.RECORDS_READ_METER));
this.dataRecordExceptionsMeter =
Optional.of(this.metricContext.meter(MetricNames.ExtractorMetrics.RECORDS_FAILED_METER));
this.extractorTimer = Optional.<Timer>of(this.metricContext.timer(MetricNames.ExtractorMetrics.EXTRACT_TIMER));
} else {
this.readRecordsMeter = Optional.absent();
this.dataRecordExceptionsMeter = Optional.absent();
this.extractorTimer = Optional.absent();
}
}
@Override
public boolean isInstrumentationEnabled() {
return this.instrumentationEnabled;
}
/** Default with no additional tags */
@Override
public List<Tag<?>> generateTags(State state) {
return Lists.newArrayList();
}
@Override
public RecordEnvelope<D> readRecordEnvelope() throws DataRecordException, IOException {
if (!isInstrumentationEnabled()) {
return readRecordEnvelopeImpl();
}
try {
long startTimeNanos = System.nanoTime();
beforeRead();
RecordEnvelope<D> record = readRecordEnvelopeImpl();
afterRead(record == null ? null : record.getRecord(), startTimeNanos);
return record;
} catch (DataRecordException exception) {
onException(exception);
throw exception;
} catch (IOException exception) {
onException(exception);
throw exception;
}
}
/**
* @param shutdownRequest an {@link AtomicBoolean} that becomes true when a shutdown has been requested.
* @return a {@link Flowable} with the records from this source. Note the flowable should honor downstream backpressure.
*/
@Override
public RecordStreamWithMetadata<D, S> recordStream(AtomicBoolean shutdownRequest) throws IOException {
S schema = getSchema();
Flowable<StreamEntity<D>> recordStream = Flowable.generate(() -> shutdownRequest, (BiConsumer<AtomicBoolean, Emitter<StreamEntity<D>>>) (state, emitter) -> {
if (state.get()) {
// shutdown requested
try {
shutdown();
} catch (JobShutdownException exc) {
emitter.onError(exc);
}
}
try {
long startTimeNanos = 0;
if (isInstrumentationEnabled()) {
startTimeNanos = System.nanoTime();
beforeRead();
}
StreamEntity<D> record = readStreamEntityImpl();
if (isInstrumentationEnabled()) {
D unwrappedRecord = null;
if (record instanceof RecordEnvelope) {
unwrappedRecord = ((RecordEnvelope<D>) record).getRecord();
}
afterRead(unwrappedRecord, startTimeNanos);
}
if (record != null) {
emitter.onNext(record);
} else {
emitter.onComplete();
}
} catch (DataRecordException | IOException exc) {
if (isInstrumentationEnabled()) {
onException(exc);
}
emitter.onError(exc);
}
});
recordStream = recordStream.doFinally(this::close);
return new RecordStreamWithMetadata<>(recordStream, GlobalMetadata.<S>builder().schema(schema).build());
}
/**
* Called before each record is read.
*/
public void beforeRead() {}
/**
* Called after each record is read.
* @param record record read.
* @param startTime reading start time.
*/
public void afterRead(D record, long startTime) {
Instrumented.updateTimer(this.extractorTimer, System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
if (record != null) {
Instrumented.markMeter(this.readRecordsMeter);
}
}
/**
* Called on exception when trying to read.
* @param exception exception thrown.
*/
public void onException(Exception exception) {
if (DataRecordException.class.isInstance(exception)) {
Instrumented.markMeter(this.dataRecordExceptionsMeter);
}
}
/**
* Subclasses should implement this or {@link #readRecordEnvelopeImpl()}
* instead of {@link org.apache.gobblin.source.extractor.Extractor#readRecord}
*/
protected StreamEntity<D> readStreamEntityImpl() throws DataRecordException, IOException {
return readRecordEnvelopeImpl();
}
/**
* Subclasses should implement this or {@link #readRecordImpl(Object)}
* instead of {@link org.apache.gobblin.source.extractor.Extractor#readRecord}
*/
@SuppressWarnings(value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE",
justification = "Findbugs believes readRecord(null) is non-null. This is not true.")
protected RecordEnvelope<D> readRecordEnvelopeImpl() throws DataRecordException, IOException {
D record = readRecordImpl(null);
return record == null ? null : new RecordEnvelope<>(record);
}
/**
* Subclasses should implement this or {@link #readRecordEnvelopeImpl()}
* instead of {@link org.apache.gobblin.source.extractor.Extractor#readRecord}
*/
@Nullable
protected D readRecordImpl(D reuse) throws DataRecordException, IOException {
throw new UnsupportedOperationException();
}
/**
* Get final state for this object. By default this returns an empty {@link org.apache.gobblin.configuration.State}, but
* concrete subclasses can add information that will be added to the task state.
* @return Empty {@link org.apache.gobblin.configuration.State}.
*/
@Override
public State getFinalState() {
return new State();
}
@Override
public void close() throws IOException {
this.closer.close();
}
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
}
| 4,509 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/extractor/InstrumentedExtractorDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.extractor;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.records.RecordStreamWithMetadata;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.DecoratorUtils;
import org.apache.gobblin.util.FinalState;
/**
* Decorator that automatically instruments {@link org.apache.gobblin.source.extractor.Extractor}.
* Handles already instrumented {@link org.apache.gobblin.instrumented.extractor.InstrumentedExtractor}
* appropriately to avoid double metric reporting.
*/
public class InstrumentedExtractorDecorator<S, D> extends InstrumentedExtractorBase<S, D> implements Decorator {
private final Extractor<S, D> embeddedExtractor;
private final boolean isEmbeddedInstrumented;
private volatile long lastRecordTime;
public InstrumentedExtractorDecorator(WorkUnitState workUnitState, Extractor<S, D> extractor) {
super(workUnitState, Optional.<Class<?>> of(DecoratorUtils.resolveUnderlyingObject(extractor).getClass()));
this.embeddedExtractor = this.closer.register(extractor);
this.isEmbeddedInstrumented = Instrumented.isLineageInstrumented(extractor);
}
@Override
public MetricContext getMetricContext() {
return this.isEmbeddedInstrumented ? ((InstrumentedExtractorBase<S, D>) this.embeddedExtractor).getMetricContext()
: super.getMetricContext();
}
@Override
public RecordEnvelope<D> readRecordEnvelope() throws DataRecordException, IOException {
return this.isEmbeddedInstrumented ? this.embeddedExtractor.readRecordEnvelope() : super.readRecordEnvelope();
}
@Override
protected RecordEnvelope<D> readRecordEnvelopeImpl() throws DataRecordException, IOException {
return this.embeddedExtractor.readRecordEnvelope();
}
@Override
public RecordStreamWithMetadata<D, S> recordStream(AtomicBoolean shutdownRequest) throws IOException {
if (this.isEmbeddedInstrumented) {
return this.embeddedExtractor.recordStream(shutdownRequest);
}
RecordStreamWithMetadata<D, S> stream = this.embeddedExtractor.recordStream(shutdownRequest);
stream = stream.mapRecords(r -> {
if (this.lastRecordTime == 0) {
this.lastRecordTime = System.nanoTime();
}
afterRead(r.getRecord(), this.lastRecordTime);
this.lastRecordTime = System.nanoTime();
return r;
});
return stream;
}
@Override
public S getSchema() throws IOException {
return this.embeddedExtractor.getSchema();
}
@Override
public long getExpectedRecordCount() {
return this.embeddedExtractor.getExpectedRecordCount();
}
@Override
public long getHighWatermark() {
return this.embeddedExtractor.getHighWatermark();
}
@Override
public State getFinalState() {
if (this.embeddedExtractor instanceof FinalState) {
return ((FinalState) this.embeddedExtractor).getFinalState();
}
return super.getFinalState();
}
@Override
public Object getDecoratedObject() {
return this.embeddedExtractor;
}
}
| 4,510 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/extractor/InstrumentedExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.extractor;
import java.io.IOException;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.stream.RecordEnvelope;
/**
* Instrumented version of {@link org.apache.gobblin.source.extractor.Extractor} automatically captures certain metrics.
* Subclasses should implement readRecordImpl instead of readRecord.
*/
public abstract class InstrumentedExtractor<S, D> extends InstrumentedExtractorBase<S, D> {
public InstrumentedExtractor(WorkUnitState workUnitState) {
super(workUnitState);
}
/**
* @deprecated provided for test compatibility.
*/
@Deprecated
@Override
public D readRecord(@Deprecated D reuse) throws DataRecordException, IOException {
return this.readRecordImpl(null);
}
@Override
public final RecordEnvelope<D> readRecordEnvelope()
throws DataRecordException, IOException {
return super.readRecordEnvelope();
}
}
| 4,511 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/writer/InstrumentedPartitionedDataWriterDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.writer;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.writer.DataWriter;
/**
* {@link InstrumentedDataWriterDecorator} which add partition tags to the metric context.
*/
public class InstrumentedPartitionedDataWriterDecorator<D> extends InstrumentedDataWriterDecorator<D> {
public static final String PARTITION = "Partition";
private final GenericRecord partition;
public InstrumentedPartitionedDataWriterDecorator(DataWriter<D> writer, State state, GenericRecord partition) {
super(writer, state);
this.partition = partition;
}
@Override
public List<Tag<?>> generateTags(State state) {
ImmutableList.Builder<Tag<?>> tags = ImmutableList.<Tag<?>> builder().addAll(super.generateTags(state));
tags.add(new Tag<>(PARTITION, this.partition));
for (Schema.Field field : this.partition.getSchema().getFields()) {
tags.add(new Tag<>(field.name(), this.partition.get(field.name())));
}
return tags.build();
}
}
| 4,512 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/writer/InstrumentedDataWriterBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.writer;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.MetricNames;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.FinalState;
import org.apache.gobblin.writer.DataWriter;
@Slf4j
/**
* Package-private implementation of instrumentation for {@link org.apache.gobblin.writer.DataWriter}.
*
* @see org.apache.gobblin.instrumented.writer.InstrumentedDataWriter for extensible class.
*/
abstract class InstrumentedDataWriterBase<D> implements DataWriter<D>, Instrumentable, Closeable, FinalState {
private final Optional<ScheduledThreadPoolExecutor> writerMetricsUpdateExecutor;
private final boolean instrumentationEnabled;
private MetricContext metricContext;
private Optional<Meter> recordsInMeter;
private Optional<Meter> successfulWritesMeter;
private Optional<Meter> failedWritesMeter;
private Optional<Timer> dataWriterTimer;
private Optional<Meter> recordsWrittenMeter;
private Optional<Meter> bytesWrittenMeter;
protected final Closer closer;
public static final String WRITER_METRICS_UPDATER_INTERVAL = "gobblin.writer.metrics.updater.interval";
public static final long DEFAULT_WRITER_METRICS_UPDATER_INTERVAL = 30000;
public InstrumentedDataWriterBase(State state) {
this(state, Optional.<Class<?>> absent());
}
protected InstrumentedDataWriterBase(State state, Optional<Class<?>> classTag) {
this.closer = Closer.create();
this.instrumentationEnabled = GobblinMetrics.isEnabled(state);
this.metricContext = this.closer.register(Instrumented.getMetricContext(state, classTag.or(this.getClass())));
if (this.instrumentationEnabled) {
this.writerMetricsUpdateExecutor = Optional.of(buildWriterMetricsUpdateExecutor());
scheduleWriterMetricsUpdater(this.writerMetricsUpdateExecutor.get(), getWriterMetricsUpdaterInterval(state));
} else {
this.writerMetricsUpdateExecutor = Optional.absent();
}
regenerateMetrics();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
this.metricContext = this.closer
.register(Instrumented.newContextFromReferenceContext(this.metricContext, tags, Optional.<String> absent()));
regenerateMetrics();
}
@Override
public void switchMetricContext(MetricContext context) {
this.metricContext = context;
regenerateMetrics();
}
/**
* Generates metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
if (isInstrumentationEnabled()) {
this.recordsInMeter = Optional.of(this.metricContext.meter(MetricNames.DataWriterMetrics.RECORDS_IN_METER));
this.successfulWritesMeter =
Optional.of(this.metricContext.meter(MetricNames.DataWriterMetrics.SUCCESSFUL_WRITES_METER));
this.failedWritesMeter = Optional.of(this.metricContext.meter(MetricNames.DataWriterMetrics.FAILED_WRITES_METER));
setRecordsWrittenMeter(isInstrumentationEnabled());
setBytesWrittenMeter(isInstrumentationEnabled());
this.dataWriterTimer = Optional.<Timer>of(this.metricContext.timer(MetricNames.DataWriterMetrics.WRITE_TIMER));
} else {
this.recordsInMeter = Optional.absent();
this.successfulWritesMeter = Optional.absent();
this.failedWritesMeter = Optional.absent();
setRecordsWrittenMeter(isInstrumentationEnabled());
setBytesWrittenMeter(isInstrumentationEnabled());
this.dataWriterTimer = Optional.absent();
}
}
private synchronized void setRecordsWrittenMeter(boolean isInstrumentationEnabled) {
if (isInstrumentationEnabled) {
this.recordsWrittenMeter =
Optional.of(this.metricContext.meter(MetricNames.DataWriterMetrics.RECORDS_WRITTEN_METER));
} else {
this.recordsWrittenMeter = Optional.absent();
}
}
private synchronized void setBytesWrittenMeter(boolean isInstrumentationEnabled) {
if (isInstrumentationEnabled) {
this.bytesWrittenMeter = Optional.of(this.metricContext.meter(MetricNames.DataWriterMetrics.BYTES_WRITTEN_METER));
} else {
this.bytesWrittenMeter = Optional.absent();
}
}
/** Default with no additional tags */
@Override
public List<Tag<?>> generateTags(State state) {
return Lists.newArrayList();
}
@Override
public boolean isInstrumentationEnabled() {
return this.instrumentationEnabled;
}
@Override
public void write(D record) throws IOException {
if (!isInstrumentationEnabled()) {
writeImpl(record);
return;
}
try {
long startTimeNanos = System.nanoTime();
beforeWrite(record);
writeImpl(record);
onSuccessfulWrite(startTimeNanos);
} catch (IOException exception) {
onException(exception);
throw exception;
}
}
/**
* Called beforeWriting a record.
* @param record record to write.
*/
public void beforeWrite(D record) {
Instrumented.markMeter(this.recordsInMeter);
}
/**
* Called after a successful write of a record.
* @param startTimeNanos time at which writing started.
*/
public void onSuccessfulWrite(long startTimeNanos) {
Instrumented.updateTimer(this.dataWriterTimer, System.nanoTime() - startTimeNanos, TimeUnit.NANOSECONDS);
Instrumented.markMeter(this.successfulWritesMeter);
}
/** Called after a failed writing of a record.
* @param exception exception thrown.
*/
public void onException(Exception exception) {
Instrumented.markMeter(this.failedWritesMeter);
}
/**
* Subclasses should implement this instead of {@link org.apache.gobblin.writer.DataWriter#write}
*/
public abstract void writeImpl(D record) throws IOException;
/**
* Get final state for this object. By default this returns an empty {@link org.apache.gobblin.configuration.State}, but
* concrete subclasses can add information that will be added to the task state.
* @return Empty {@link org.apache.gobblin.configuration.State}.
*/
@Override
public State getFinalState() {
return new State();
}
@Override
public void close() throws IOException {
try {
this.closer.close();
} finally {
if (this.writerMetricsUpdateExecutor.isPresent()) {
ExecutorsUtils.shutdownExecutorService(this.writerMetricsUpdateExecutor.get(), Optional.of(log));
}
}
}
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
/**
* Update the {@link #recordsWrittenMeter} and {@link #bytesWrittenMeter}. This method should be invoked after the
* wrapped {@link DataWriter#commit()} is invoked. This ensures that the record-level and byte-level meters are
* updated at least once.
*/
@Override
public void commit() throws IOException {
updateRecordsWrittenMeter();
updateBytesWrittenMeter();
}
/**
* Update the {@link #recordsWrittenMeter} using the {@link DataWriter#recordsWritten()} method..
*/
private synchronized void updateRecordsWrittenMeter() {
if (this.recordsWrittenMeter.isPresent()) {
this.recordsWrittenMeter.get().mark(recordsWritten() - this.recordsWrittenMeter.get().getCount());
}
}
/**
* Update the {@link #bytesWrittenMeter} using the {@link DataWriter#bytesWritten()} method.
*/
private synchronized void updateBytesWrittenMeter() {
if (this.bytesWrittenMeter.isPresent()) {
try {
this.bytesWrittenMeter.get().mark(bytesWritten() - this.bytesWrittenMeter.get().getCount());
} catch (IOException e) {
log.error("Cannot get bytesWritten for DataWriter, will not update " + this.bytesWrittenMeter.get().toString(),
e);
}
}
}
/**
* Build a {@link ScheduledThreadPoolExecutor} that updates record-level and byte-level metrics.
*/
private static ScheduledThreadPoolExecutor buildWriterMetricsUpdateExecutor() {
return new ScheduledThreadPoolExecutor(1,
ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("WriterMetricsUpdater-%d")));
}
/**
* Get the interval that the Writer Metrics Updater should be scheduled on.
*/
private static long getWriterMetricsUpdaterInterval(State state) {
return state.getPropAsLong(WRITER_METRICS_UPDATER_INTERVAL, DEFAULT_WRITER_METRICS_UPDATER_INTERVAL);
}
/**
* Schedule the given {@link ScheduledThreadPoolExecutor} to run at the given interval.
*/
private ScheduledFuture<?> scheduleWriterMetricsUpdater(ScheduledThreadPoolExecutor writerMetricsUpdater,
long scheduleInterval) {
return writerMetricsUpdater.scheduleAtFixedRate(new WriterMetricsUpdater(), scheduleInterval, scheduleInterval,
TimeUnit.MILLISECONDS);
}
/**
* An implementation of {@link Runnable} that updates record-level and byte-level metrics.
*/
private class WriterMetricsUpdater implements Runnable {
@Override
public void run() {
updateRecordsWrittenMeter();
updateBytesWrittenMeter();
}
}
}
| 4,513 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/writer/InstrumentedDataWriterDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.writer;
import java.io.IOException;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.Descriptor;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.records.ControlMessageHandler;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.Decorator;
import org.apache.gobblin.util.DecoratorUtils;
import org.apache.gobblin.util.FinalState;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.WatermarkAwareWriter;
/**
* Decorator that automatically instruments {@link org.apache.gobblin.writer.DataWriter}. Handles already instrumented
* {@link org.apache.gobblin.instrumented.writer.InstrumentedDataWriter} appropriately to avoid double metric reporting.
*/
public class InstrumentedDataWriterDecorator<D> extends InstrumentedDataWriterBase<D> implements Decorator, WatermarkAwareWriter<D> {
private DataWriter<D> embeddedWriter;
private boolean isEmbeddedInstrumented;
private Optional<WatermarkAwareWriter> watermarkAwareWriter;
public InstrumentedDataWriterDecorator(DataWriter<D> writer, State state) {
super(state, Optional.<Class<?>> of(DecoratorUtils.resolveUnderlyingObject(writer).getClass()));
this.embeddedWriter = this.closer.register(writer);
this.isEmbeddedInstrumented = Instrumented.isLineageInstrumented(writer);
Object underlying = DecoratorUtils.resolveUnderlyingObject(embeddedWriter);
if (underlying instanceof WatermarkAwareWriter) {
this.watermarkAwareWriter = Optional.of((WatermarkAwareWriter) underlying);
} else {
this.watermarkAwareWriter = Optional.absent();
}
}
@Override
public MetricContext getMetricContext() {
return this.isEmbeddedInstrumented ? ((InstrumentedDataWriterBase<D>) this.embeddedWriter).getMetricContext()
: super.getMetricContext();
}
@Override
public final void write(D record) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void writeEnvelope(RecordEnvelope<D> record) throws IOException {
if (this.isEmbeddedInstrumented) {
this.embeddedWriter.writeEnvelope(record);
} else {
if (!isInstrumentationEnabled()) {
this.embeddedWriter.writeEnvelope(record);
return;
}
try {
long startTimeNanos = System.nanoTime();
beforeWrite(record.getRecord());
this.embeddedWriter.writeEnvelope(record);
onSuccessfulWrite(startTimeNanos);
} catch (IOException exception) {
onException(exception);
throw exception;
}
}
}
@Override
public void writeImpl(D record) throws IOException {
this.embeddedWriter.write(record);
}
@Override
public void commit() throws IOException {
this.embeddedWriter.commit();
super.commit();
}
@Override
public void cleanup() throws IOException {
this.embeddedWriter.cleanup();
}
@Override
public long recordsWritten() {
return this.embeddedWriter.recordsWritten();
}
@Override
public long bytesWritten() throws IOException {
return this.embeddedWriter.bytesWritten();
}
@Override
public State getFinalState() {
if (this.embeddedWriter instanceof FinalState) {
return ((FinalState) this.embeddedWriter).getFinalState();
}
return super.getFinalState();
}
@Override
public Descriptor getDataDescriptor() {
return this.embeddedWriter.getDataDescriptor();
}
@Override
public Object getDecoratedObject() {
return this.embeddedWriter;
}
@Override
public boolean isWatermarkCapable() {
return watermarkAwareWriter.isPresent() && watermarkAwareWriter.get().isWatermarkCapable();
}
@Override
public ControlMessageHandler getMessageHandler() {
return this.embeddedWriter.getMessageHandler();
}
@Override
public void flush() throws IOException {
this.embeddedWriter.flush();
}
}
| 4,514 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/writer/InstrumentedDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.instrumented.writer;
import java.io.IOException;
import org.apache.gobblin.configuration.State;
/**
* Instrumented version of {@link org.apache.gobblin.writer.DataWriter} automatically capturing certain metrics.
* Subclasses should implement writeImpl instead of write.
*/
public abstract class InstrumentedDataWriter<D> extends InstrumentedDataWriterBase<D> {
public InstrumentedDataWriter(State state) {
super(state);
}
@Override
public final void write(D record)
throws IOException {
super.write(record);
}
}
| 4,515 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/crypto/CredentialStoreFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.crypto;
import java.util.Map;
import java.util.ServiceLoader;
import lombok.Synchronized;
import lombok.extern.slf4j.Slf4j;
/**
* This class knows how to build encryption algorithms based on configuration parameters. To add your own
* encryption implementation, please add another implementation of {@link EncryptionProvider}
* in Gobblin's classpath as well as referencing the new implementation in META-INF/services/gobblin.crypto.EncryptionProvider
* of the containing JAR. (See {@link ServiceLoader} documentation for more details).
*/
@Slf4j
public class CredentialStoreFactory {
private static ServiceLoader<CredentialStoreProvider> credentialStoreProviderLoader = ServiceLoader.load(CredentialStoreProvider.class);
/**
* Build a CredentialStore with the given config parameters. The type will be extracted from the parameters.
* See {@link EncryptionConfigParser} for a set of standard configuration parameters, although
* each encryption provider may have its own arbitrary set.
* @return A CredentialStore for the given parameters
* @throws IllegalArgumentException If no provider exists that can build the requested encryption codec
*/
@Synchronized
public static CredentialStore buildCredentialStore(Map<String, Object> parameters) {
String credType = EncryptionConfigParser.getKeystoreType(parameters);
for (CredentialStoreProvider provider : credentialStoreProviderLoader) {
log.debug("Looking for cred store type {} in provider {}", credType, provider.getClass().getName());
CredentialStore credStore = provider.buildCredentialStore(parameters);
if (credStore != null) {
log.debug("Found cred store type {} in provider {}", credType, provider.getClass().getName());
return credStore;
}
}
throw new IllegalArgumentException("Could not find a provider to build algorithm " + credType + " - is gobblin-crypto-provider in classpath?");
}
}
| 4,516 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/crypto/EncryptionFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.crypto;
import java.util.Map;
import java.util.ServiceLoader;
import lombok.Synchronized;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.codec.StreamCodec;
/**
* This class knows how to build encryption algorithms based on configuration parameters. To add your own
* encryption implementation, please add another implementation of {@link org.apache.gobblin.crypto.EncryptionProvider}
* in Gobblin's classpath as well as referencing the new implementation in META-INF/services/gobblin.crypto.EncryptionProvider
* of the containing JAR. (See {@link java.util.ServiceLoader} documentation for more details).
*/
@Slf4j
public class EncryptionFactory {
private static ServiceLoader<EncryptionProvider> encryptionProviderLoader = ServiceLoader.load(EncryptionProvider.class);
/**
* Build a StreamCodec with the given config parameters. The type will be extracted from the parameters.
* See {@link org.apache.gobblin.crypto.EncryptionConfigParser} for a set of standard configuration parameters, although
* each encryption provider may have its own arbitrary set.
* @return A StreamCodec for the given parameters
* @throws IllegalArgumentException If no provider exists that can build the requested encryption codec
*/
public static StreamCodec buildStreamCryptoProvider(Map<String, Object> parameters) {
String encryptionType = EncryptionConfigParser.getEncryptionType(parameters);
if (encryptionType == null) {
throw new IllegalArgumentException("Encryption type not present in parameters!");
}
return buildStreamCryptoProvider(encryptionType, parameters);
}
/**
* Return a StreamEncryptor for the given algorithm and with appropriate parameters.
* @param algorithm Algorithm to build
* @param parameters Parameters for algorithm
* @return A StreamCodec for that algorithm
* @throws IllegalArgumentException If the given algorithm/parameter pair cannot be built
*/
@Synchronized
public static StreamCodec buildStreamCryptoProvider(String algorithm, Map<String, Object> parameters) {
for (EncryptionProvider provider : encryptionProviderLoader) {
log.debug("Looking for algorithm {} in provider {}", algorithm, provider.getClass().getName());
StreamCodec codec = provider.buildStreamCryptoProvider(algorithm, parameters);
if (codec != null) {
log.debug("Found algorithm {} in provider {}", algorithm, provider.getClass().getName());
return codec;
}
}
throw new IllegalArgumentException("Could not find a provider to build algorithm " + algorithm + " - is gobblin-crypto-provider in classpath?");
}
}
| 4,517 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/crypto/EncryptionConfigParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.crypto;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* Extract encryption related information from taskState
*/
@Slf4j
public class EncryptionConfigParser {
/**
* Encryption parameters for converters and writers.
* Algorithm: Encryption algorithm. Can be 'any' to let the system choose one.
*
* Keystore parameters:
* keystore_type: Type of keystore to build. Gobblin supports 'java' (a JCEKSKeystoreCredenetialStore)
* and 'json' (a JSONCredentialStore).
* keystore_path: Location for the java keystore where encryption keys can be found
* keystore_password: Password the keystore is encrypted with
* keystore_encoding: Encoding of the key value in a file. Could be 'hex', 'base64', or
* other implementation defined values.
*
* Note that some of these parameters may be optional depending on the type of keystore -- for example,
* a java keystore does not look at the encoding parameter.
*/
static final String WRITER_ENCRYPT_PREFIX = ConfigurationKeys.WRITER_PREFIX + ".encrypt";
static final String CONVERTER_ENCRYPT_PREFIX = "converter.encrypt";
static final String CONVERTER_DECRYPT_PREFIX = "converter.decrypt";
public static final String ENCRYPTION_ALGORITHM_KEY = "algorithm";
public static final String ENCRYPTION_KEYSTORE_PATH_KEY = "keystore_path";
public static final String ENCRYPTION_KEYSTORE_PASSWORD_KEY = "keystore_password";
public static final String ENCRYPTION_KEY_NAME = "key_name";
public static final String ENCRYPTION_KEYSTORE_TYPE_KEY = "keystore_type";
public static final String ENCRYPTION_KEYSTORE_TYPE_KEY_DEFAULT = "java";
public static final String ENCRYPTION_KEYSTORE_ENCODING_KEY = "keystore_encoding";
public static final String ENCRYPTION_KEYSTORE_ENCODING_DEFAULT = "hex";
public static final String ENCRYPTION_TYPE_ANY = "any";
/**
* Some algorithms can be configured with an underlying cipher, like the symmetric cipher used with GPG
*/
public static final String ENCRYPTION_CIPHER_KEY = "cipher";
/**
* Represents the entity we are trying to retrieve configuration for. Internally this
* enum maps entity type to a configuration prefix.
*/
public enum EntityType {
CONVERTER_ENCRYPT(CONVERTER_ENCRYPT_PREFIX),
CONVERTER_DECRYPT(CONVERTER_DECRYPT_PREFIX),
WRITER(WRITER_ENCRYPT_PREFIX);
private final String configPrefix;
private EntityType(String configPrefix) {
this.configPrefix = configPrefix;
}
public String getConfigPrefix() {
return configPrefix;
}
}
/**
* Retrieve encryption configuration for the branch the WorKUnitState represents
* @param entityType Type of entity we are retrieving config for
* @param workUnitState State for the object querying config
* @return A list of encryption properties or null if encryption isn't configured
*/
public static Map<String, Object> getConfigForBranch(EntityType entityType, WorkUnitState workUnitState) {
return getConfigForBranch(entityType, null, workUnitState);
}
/**
* Retrieve encryption configuration for the branch the WorKUnitState represents. This will first retrieve
* config for a given entity type (converter.encrypt.*) and then merge it with any entity-specific config
* (eg converter.FieldEncryptionConverter.*).
*
* @param entityType Type of entity we are retrieving config for
* @param workUnitState State for the object querying config
* @return A list of encryption properties or null if encryption isn't configured
*/
public static Map<String, Object> getConfigForBranch(EntityType entityType, String entityName, WorkUnitState workUnitState) {
Map<String, Object> config = getConfigForBranch(workUnitState.getJobState(),
entityType.getConfigPrefix(),
ForkOperatorUtils.getPropertyNameForBranch(workUnitState, ""));
if (entityName != null) {
final String entityPrefix = entityType.getConfigPrefix() + "." + entityName;
Map<String, Object> entitySpecificConfig = getConfigForBranch(workUnitState.getJobState(), entityPrefix,
ForkOperatorUtils.getPropertyNameForBranch(workUnitState, ""));
if (config == null) {
config = entitySpecificConfig;
} else if (entitySpecificConfig != null) {
// Remove keys that would have been picked up twice - eg converter.FooConverter.encrypt would first
// be picked up by the converter.* check
config.keySet().removeIf(s -> s.startsWith(entityName + "."));
config.putAll(entitySpecificConfig);
}
}
return config;
}
/**
* Retrieve encryption config for a given branch of a task
* @param entityType Entity type we are retrieving config for
* @param taskState State of the task
* @param numBranches Number of branches overall
* @param branch Branch # of the current object
* @return A list of encryption properties or null if encryption isn't configured
*/
public static Map<String, Object> getConfigForBranch(EntityType entityType, State taskState, int numBranches, int branch) {
return getConfigForBranch(taskState,
entityType.getConfigPrefix(),
ForkOperatorUtils.getPropertyNameForBranch("", numBranches, branch));
}
private static Map<String, Object> getConfigForBranch(State taskState, String prefix, String branchSuffix) {
Map<String, Object> properties =
extractPropertiesForBranch(taskState.getProperties(), prefix, branchSuffix);
if (properties.isEmpty()) {
return null;
}
if (getEncryptionType(properties) == null) {
log.warn("Encryption algorithm not specified; ignoring other encryption settings");
return null;
}
PasswordManager passwordManager = PasswordManager.getInstance(taskState);
if (properties.containsKey(ENCRYPTION_KEYSTORE_PASSWORD_KEY)) {
properties.put(ENCRYPTION_KEYSTORE_PASSWORD_KEY,
passwordManager.readPassword((String)properties.get(ENCRYPTION_KEYSTORE_PASSWORD_KEY)));
}
return properties;
}
public static String getEncryptionType(Map<String, Object> properties) {
return (String)properties.get(ENCRYPTION_ALGORITHM_KEY);
}
public static String getKeystorePath(Map<String, Object> properties) {
return (String)properties.get(ENCRYPTION_KEYSTORE_PATH_KEY);
}
public static String getKeystorePassword(Map<String, Object> properties) {
return (String)properties.get(ENCRYPTION_KEYSTORE_PASSWORD_KEY);
}
/**
* Get the type of keystore to instantiate
*/
public static String getKeystoreType(Map<String, Object> parameters) {
String type = (String)parameters.get(ENCRYPTION_KEYSTORE_TYPE_KEY);
if (type == null) {
type = ENCRYPTION_KEYSTORE_TYPE_KEY_DEFAULT;
}
return type;
}
public static String getKeyName(Map<String, Object> parameters) {
return (String)parameters.get(ENCRYPTION_KEY_NAME);
}
public static String getKeystoreEncoding(Map<String, Object> parameters) {
return (String)parameters.getOrDefault(ENCRYPTION_KEYSTORE_ENCODING_KEY, ENCRYPTION_KEYSTORE_ENCODING_DEFAULT);
}
/**
* Get the underlying cipher name
* @param parameters parameters map
* @return the cipher name
*/
public static String getCipher(Map<String, Object> parameters) {
return (String)parameters.get(ENCRYPTION_CIPHER_KEY);
}
/**
* Extract a set of properties for a given branch, stripping out the prefix and branch
* suffix.
*
* Eg - original output:
* writer.encrypt.1 -> foo
* writer.encrypt.something.1 -> bar
*
* will transform to
*
* "" -> foo
* something - bar
* this is very similar to ConfigUtils and typesafe config; need to figure out config story
* @param properties Properties to extract data from
* @param prefix Prefix to match; all other properties will be ignored
* @param branchSuffix Suffix for all config properties
* @return Transformed properties as described above
*/
private static Map<String, Object> extractPropertiesForBranch(
Properties properties, String prefix, String branchSuffix) {
Map<String, Object> ret = new HashMap<>();
for (Map.Entry<Object, Object> prop: properties.entrySet()) {
String key = (String)prop.getKey();
if (key.startsWith(prefix) && (branchSuffix.length() == 0 || key.endsWith(branchSuffix))) {
int strippedKeyStart = Math.min(key.length(), prefix.length() + 1);
// filter out subkeys that don't have a '.' -- eg writer.encrypted.foo shouldn't be returned
// if prefix is writer.encrypt
if (strippedKeyStart != key.length() && key.charAt(strippedKeyStart - 1) != '.') {
continue;
}
int strippedKeyEnd = Math.max(strippedKeyStart, key.length() - branchSuffix.length());
String strippedKey = key.substring(strippedKeyStart, strippedKeyEnd);
ret.put(strippedKey, prop.getValue());
}
}
return ret;
}
}
| 4,518 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/types/FieldMappingException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.types;
public class FieldMappingException extends Exception {
public FieldMappingException(String message, Exception e) {
super(message, e);
}
}
| 4,519 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/types/AvroGenericRecordTypeMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.types;
import org.apache.avro.generic.GenericRecord;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class AvroGenericRecordTypeMapper implements TypeMapper<GenericRecord> {
@Override
public Object getField(GenericRecord record, String fieldPath) throws FieldMappingException {
if (fieldPath.equals(FIELD_PATH_ALL)) {
return record;
}
Object field = record;
try {
for (String part: fieldPath.split("\\.")) {
field = ((GenericRecord) field).get(part);
}
} catch (Exception e) {
throw new FieldMappingException("Failed to retrieve fieldPath " + fieldPath + " from record " + record.toString(), e);
}
return field;
}
}
| 4,520 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/types/TypeMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.types;
import java.io.Closeable;
import java.io.IOException;
import com.typesafe.config.Config;
/**
* An interface that allows Gobblin constructs to introspect a type T
* @param <T>
*/
public interface TypeMapper<T> extends Closeable {
String FIELD_PATH_ALL = "*";
default void configure(Config config) {
}
Object getField(T record, String fieldPath)
throws FieldMappingException;
@Override
default void close()
throws IOException {
}
}
| 4,521 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/test/AnyToStringConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
/**
* Converts any Object into a String
*/
public class AnyToStringConverter extends Converter<Object, String, Object, String> {
@Override
public String convertSchema(Object inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return "";
}
@Override
public Iterable<String> convertRecord(String outputSchema, Object inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return new SingleRecordIterable(String.valueOf(inputRecord));
}
} | 4,522 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/test/AnyToJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import java.util.Collections;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
/**
* Converts any Object into a Json object
*/
public class AnyToJsonConverter extends Converter<Object, String, Object, JsonElement> {
private static final Gson GSON = GsonInterfaceAdapter.getGson(Object.class);
private boolean stripTopLevelType = true; // TODO: Configure
@Override
public String convertSchema(Object inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return "";
}
@Override
public Iterable<JsonElement> convertRecord(String outputSchema, Object inputRecord, WorkUnitState workUnit)
throws DataConversionException {
JsonElement jsonElement = GSON.toJsonTree(inputRecord);
// The interface adapter packs everything into object-type, object-data pairs.
// Strip out the top level.
if (stripTopLevelType) {
jsonElement = jsonElement.getAsJsonObject().get("object-data");
}
return Collections.singletonList(jsonElement);
}
}
| 4,523 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/test/SequentialTestSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.google.common.base.Throwables;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.DefaultCheckpointableWatermark;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.StreamingExtractor;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.ExtractFactory;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.test.proto.TestRecordProtos;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.WatermarkStorage;
/**
* A Test source that generates a sequence of records, works in batch and streaming mode.
*/
@Slf4j
public class SequentialTestSource implements Source<Object, Object> {
private enum InMemoryFormat {
POJO,
AVRO,
PROTOBUF
}
private static final int DEFAULT_NUM_PARALLELISM = 1;
private static final String DEFAULT_NAMESPACE = "TestDB";
private static final String DEFAULT_TABLE = "TestTable";
private static final Integer DEFAULT_NUM_RECORDS_PER_EXTRACT = 100;
public static final String WORK_UNIT_INDEX = "workUnitIndex";
private static final Long DEFAULT_SLEEP_TIME_PER_RECORD_MILLIS = 10L;
public static final String MEMORY_FORMAT_KEY = "inMemoryFormat";
public static final String DEFAULT_IN_MEMORY_FORMAT = InMemoryFormat.POJO.toString();
private final AtomicBoolean configured = new AtomicBoolean(false);
private int num_parallelism;
private String namespace;
private String table;
private int numRecordsPerExtract;
private long sleepTimePerRecord;
private InMemoryFormat inMemFormat;
private final Extract.TableType tableType = Extract.TableType.APPEND_ONLY;
private final ExtractFactory _extractFactory = new ExtractFactory("yyyyMMddHHmmss");
private boolean streaming = false;
private void configureIfNeeded(Config config)
{
if (!configured.get()) {
num_parallelism = ConfigUtils.getInt(config, "source.numParallelism", DEFAULT_NUM_PARALLELISM);
namespace = ConfigUtils.getString(config, "source.namespace", DEFAULT_NAMESPACE);
table = ConfigUtils.getString(config, "source.table", DEFAULT_TABLE);
numRecordsPerExtract = ConfigUtils.getInt(config, "source.numRecordsPerExtract", DEFAULT_NUM_RECORDS_PER_EXTRACT);
sleepTimePerRecord = ConfigUtils.getLong(config, "source.sleepTimePerRecordMillis",
DEFAULT_SLEEP_TIME_PER_RECORD_MILLIS);
streaming = (ConfigUtils.getString(config, "task.executionMode", "BATCH").equalsIgnoreCase("STREAMING"));
if (streaming) {
numRecordsPerExtract = Integer.MAX_VALUE;
}
inMemFormat = InMemoryFormat.valueOf(ConfigUtils.getString(config, "source." + MEMORY_FORMAT_KEY,
DEFAULT_IN_MEMORY_FORMAT));
log.info("Source configured with: num_parallelism: {}, namespace: {}, "
+ "table: {}, numRecordsPerExtract: {}, sleepTimePerRecord: {}, streaming: {}, inMemFormat: {}",
this.num_parallelism, this.namespace,
this.table, this.numRecordsPerExtract, this.sleepTimePerRecord, this.streaming, this.inMemFormat);
configured.set(true);
}
}
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
configureIfNeeded(ConfigFactory.parseProperties(state.getProperties()));
final List<WorkUnitState> previousWorkUnitStates = state.getPreviousWorkUnitStates();
if (!previousWorkUnitStates.isEmpty())
{
List<WorkUnit> newWorkUnits = Lists.newArrayListWithCapacity(previousWorkUnitStates.size());
int i = 0;
for (WorkUnitState workUnitState: previousWorkUnitStates)
{
WorkUnit workUnit;
if (workUnitState.getWorkingState().equals(WorkUnitState.WorkingState.COMMITTED))
{
LongWatermark watermark = workUnitState.getActualHighWatermark(LongWatermark.class);
LongWatermark expectedWatermark = new LongWatermark(watermark.getValue() + numRecordsPerExtract);
WatermarkInterval watermarkInterval = new WatermarkInterval(watermark, expectedWatermark);
workUnit = WorkUnit.create(newExtract(tableType, namespace, table), watermarkInterval);
log.debug("Will be setting watermark interval to " + watermarkInterval.toJson());
workUnit.setProp(WORK_UNIT_INDEX, workUnitState.getWorkunit().getProp(WORK_UNIT_INDEX));
workUnit.setProp(MEMORY_FORMAT_KEY, this.inMemFormat.toString());
}
else
{
// retry
LongWatermark watermark = workUnitState.getWorkunit().getLowWatermark(LongWatermark.class);
LongWatermark expectedWatermark = new LongWatermark(watermark.getValue() + numRecordsPerExtract);
WatermarkInterval watermarkInterval = new WatermarkInterval(watermark, expectedWatermark);
workUnit = WorkUnit.create(newExtract(tableType, namespace, table), watermarkInterval);
log.debug("Will be setting watermark interval to " + watermarkInterval.toJson());
workUnit.setProp(WORK_UNIT_INDEX, workUnitState.getWorkunit().getProp(WORK_UNIT_INDEX));
workUnit.setProp(MEMORY_FORMAT_KEY, this.inMemFormat.toString());
}
newWorkUnits.add(workUnit);
}
return newWorkUnits;
}
else {
return initialWorkUnits();
}
}
private List<WorkUnit> initialWorkUnits() {
List<WorkUnit> workUnits = Lists.newArrayList();
for (int i=0; i < num_parallelism; i++)
{
WorkUnit workUnit = WorkUnit.create(newExtract(Extract.TableType.APPEND_ONLY, namespace,
table));
LongWatermark lowWatermark = new LongWatermark(i * numRecordsPerExtract + 1);
LongWatermark expectedHighWatermark = new LongWatermark((i + 1) * numRecordsPerExtract);
workUnit.setWatermarkInterval(new WatermarkInterval(lowWatermark, expectedHighWatermark));
workUnit.setProp(WORK_UNIT_INDEX, i);
workUnit.setProp(MEMORY_FORMAT_KEY, this.inMemFormat.toString());
workUnits.add(workUnit);
}
return workUnits;
}
private Extract newExtract(Extract.TableType tableType, String namespace, String table) {
return _extractFactory.getUniqueExtract(tableType, namespace, table);
}
static class TestBatchExtractor implements Extractor<Object, Object> {
private long recordsExtracted = 0;
private final long numRecordsPerExtract;
private LongWatermark currentWatermark;
private long sleepTimePerRecord;
private int partition;
private final InMemoryFormat inMemoryFormat;
private final Object schema;
WorkUnitState workUnitState;
public TestBatchExtractor(int partition,
LongWatermark lowWatermark,
long numRecordsPerExtract,
long sleepTimePerRecord,
WorkUnitState wuState) {
this.partition = partition;
this.currentWatermark = lowWatermark;
this.numRecordsPerExtract = numRecordsPerExtract;
this.sleepTimePerRecord = sleepTimePerRecord;
this.workUnitState = wuState;
this.inMemoryFormat = InMemoryFormat.valueOf(this.workUnitState.getProp(MEMORY_FORMAT_KEY));
this.schema = getSchema(inMemoryFormat);
}
@Override
public Object getSchema()
throws IOException {
return this.schema;
}
private Object getSchema(InMemoryFormat inMemoryFormat) {
switch (inMemoryFormat) {
case POJO: {
return TestRecord.class;
}
case AVRO: {
return org.apache.gobblin.test.avro.TestRecord.getClassSchema();
}
case PROTOBUF: {
return TestRecordProtos.TestRecord.class;
}
default:
throw new RuntimeException("Not implemented " + inMemoryFormat.name());
}
}
@Override
public RecordEnvelope readRecordEnvelope()
throws DataRecordException, IOException {
if (recordsExtracted < numRecordsPerExtract) {
try {
Thread.sleep(sleepTimePerRecord);
} catch (InterruptedException e) {
Throwables.propagate(e);
}
Object record;
switch (this.inMemoryFormat) {
case POJO: {
record = new TestRecord(this.partition, this.currentWatermark.getValue(), "I am a POJO message");
break;
}
case AVRO: {
record = org.apache.gobblin.test.avro.TestRecord.newBuilder()
.setPartition(this.partition)
.setSequence(this.currentWatermark.getValue())
.setPayload("I am an Avro message")
.build();
break;
}
case PROTOBUF: {
record = TestRecordProtos.TestRecord.newBuilder()
.setPartition(this.partition)
.setSequence(this.currentWatermark.getValue())
.setPayload("I am a Protobuf message")
.build();
break;
}
default: throw new RuntimeException("");
}
log.debug("Extracted record -> {}", record);
RecordEnvelope re = new RecordEnvelope<>(record,
new DefaultCheckpointableWatermark(String.valueOf(this.partition),
new LongWatermark(this.currentWatermark.getValue())));
currentWatermark.increment();
recordsExtracted++;
return re;
} else {
return null;
}
}
@Override
public long getExpectedRecordCount() {
return numRecordsPerExtract;
}
@Override
public long getHighWatermark() {
return workUnitState.getHighWaterMark();
}
@Override
public void close()
throws IOException {
workUnitState.setActualHighWatermark(currentWatermark);
}
public void setCurrentWatermark(LongWatermark currentWatermark) {
this.currentWatermark = currentWatermark;
}
}
static class TestStreamingExtractor implements StreamingExtractor<Object, Object> {
private Optional<WatermarkStorage> watermarkStorage;
private final TestBatchExtractor extractor;
public TestStreamingExtractor(TestBatchExtractor extractor) {
this.extractor = extractor;
}
@Override
public void close()
throws IOException {
extractor.close();
}
@Override
public Object getSchema()
throws IOException {
return extractor.getSchema();
}
@Override
public RecordEnvelope<Object> readRecordEnvelope()
throws DataRecordException, IOException {
return extractor.readRecordEnvelope();
}
@Override
public long getExpectedRecordCount() {
return extractor.getExpectedRecordCount();
}
@Override
public long getHighWatermark() {
return extractor.getHighWatermark();
}
@Override
public void start(WatermarkStorage watermarkStorage)
throws IOException {
this.watermarkStorage = Optional.of(watermarkStorage);
Map<String, CheckpointableWatermark> lastCommitted;
try {
lastCommitted = this.watermarkStorage.get()
.getCommittedWatermarks(DefaultCheckpointableWatermark.class, ImmutableList.of("" + extractor.partition));
} catch (IOException e) {
// failed to get watermarks ... log a warning message
log.warn("Failed to get watermarks... will start from the beginning", e);
lastCommitted = Collections.EMPTY_MAP;
}
for (Map.Entry entry: lastCommitted.entrySet()) {
log.info("{}: Found these committed watermarks: key: {}, value: {}", this, entry.getKey(), entry.getValue());
}
LongWatermark currentWatermark;
if (!lastCommitted.isEmpty() && lastCommitted.containsKey(""+extractor.partition)) {
currentWatermark = (LongWatermark) (lastCommitted.get("" + extractor.partition)).getWatermark();
} else {
// first record starts from 0
currentWatermark = new LongWatermark(-1);
}
extractor.setCurrentWatermark(currentWatermark);
log.info("{}: Set current watermark to : {}", this, currentWatermark);
}
};
@Override
public Extractor<Object, Object> getExtractor(WorkUnitState state)
throws IOException {
Config config = ConfigFactory.parseProperties(state.getProperties());
configureIfNeeded(config);
final LongWatermark lowWatermark = state.getWorkunit().getLowWatermark(LongWatermark.class);
final WorkUnitState workUnitState = state;
final int index = state.getPropAsInt(WORK_UNIT_INDEX);
final TestBatchExtractor extractor = new TestBatchExtractor(index, lowWatermark, numRecordsPerExtract,
sleepTimePerRecord, workUnitState);
if (!streaming) {
return extractor;
} else {
return (Extractor) new TestStreamingExtractor(extractor);
}
}
@Override
public void shutdown(SourceState state) {
}
}
| 4,524 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter/AsyncConverter1to1.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.concurrent.CompletableFuture;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metadata.GlobalMetadata;
import org.apache.gobblin.stream.ControlMessage;
import org.apache.gobblin.records.RecordStreamWithMetadata;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.stream.StreamEntity;
import io.reactivex.Flowable;
import io.reactivex.Single;
import io.reactivex.SingleObserver;
import io.reactivex.annotations.NonNull;
import lombok.RequiredArgsConstructor;
/**
* A {@link Converter} that allows for pipelining asynchronous conversions.
*
* The number of outstanding conversions is limited by {@link #MAX_CONCURRENT_ASYNC_CONVERSIONS_KEY} and defaults to
* {@link #DEFAULT_MAX_CONCURRENT_ASYNC_CONVERSIONS}.
*
* Subclasses should implement {@link #convertRecordAsync(Object, Object, WorkUnitState)}.
*/
@Alpha
public abstract class AsyncConverter1to1<SI, SO, DI, DO> extends Converter<SI, SO, DI, DO> {
public static final String MAX_CONCURRENT_ASYNC_CONVERSIONS_KEY = "gobblin.converter.maxConcurrentAsyncConversions";
public static final int DEFAULT_MAX_CONCURRENT_ASYNC_CONVERSIONS = 20;
@Override
public abstract SO convertSchema(SI inputSchema, WorkUnitState workUnit) throws SchemaConversionException;
@Override
public final Iterable<DO> convertRecord(SO outputSchema, DI inputRecord, WorkUnitState workUnit)
throws DataConversionException {
throw new UnsupportedOperationException("Async converters are only supported in stream mode. Make sure to set "
+ ConfigurationKeys.TASK_SYNCHRONOUS_EXECUTION_MODEL_KEY + " to false.");
}
/**
* Convert the input record asynchronously. Return a {@link CompletableFuture} for the converted record.
*/
protected abstract CompletableFuture<DO> convertRecordAsync(SO outputSchema, DI inputRecord, WorkUnitState workUnit)
throws DataConversionException;
/**
* Return a {@link RecordStreamWithMetadata} with the appropriate modifications.
* @param inputStream
* @param workUnitState
* @return
* @throws SchemaConversionException
* @implNote this processStream does not handle {@link org.apache.gobblin.stream.MetadataUpdateControlMessage}s
*/
@Override
public RecordStreamWithMetadata<DO, SO> processStream(RecordStreamWithMetadata<DI, SI> inputStream,
WorkUnitState workUnitState) throws SchemaConversionException {
int maxConcurrentAsyncConversions = workUnitState.getPropAsInt(MAX_CONCURRENT_ASYNC_CONVERSIONS_KEY,
DEFAULT_MAX_CONCURRENT_ASYNC_CONVERSIONS);
SO outputSchema = convertSchema(inputStream.getGlobalMetadata().getSchema(), workUnitState);
Flowable<StreamEntity<DO>> outputStream =
inputStream.getRecordStream()
.flatMapSingle(in -> {
if (in instanceof ControlMessage) {
getMessageHandler().handleMessage((ControlMessage) in);
return Single.just((ControlMessage<DO>) in);
} else if (in instanceof RecordEnvelope) {
RecordEnvelope<DI> recordEnvelope = (RecordEnvelope<DI>) in;
return new SingleAsync(recordEnvelope, convertRecordAsync(outputSchema, recordEnvelope.getRecord(), workUnitState));
} else {
throw new IllegalStateException("Expected ControlMessage or RecordEnvelope.");
}
}, false, maxConcurrentAsyncConversions);
return inputStream.withRecordStream(outputStream, GlobalMetadata.<SI, SO>builderWithInput(inputStream.getGlobalMetadata(),
Optional.fromNullable(outputSchema)).build());
}
@RequiredArgsConstructor
private class SingleAsync extends Single<RecordEnvelope<DO>> {
private final RecordEnvelope<DI> originalRecord;
private final CompletableFuture<DO> completableFuture;
@Override
protected void subscribeActual(@NonNull SingleObserver<? super RecordEnvelope<DO>> observer) {
this.completableFuture.thenAccept(d -> observer.onSuccess(originalRecord.withRecord(d))).exceptionally(t -> {
observer.onError(t);
return null;
});
}
}
}
| 4,525 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter/IdentityConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Implementation of {@link Converter} that returns the inputSchema unmodified and each inputRecord unmodified
*/
public class IdentityConverter<S, D> extends Converter<S, S, D, D> {
@Override
public Object convertSchema(Object inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<Object> convertRecord(Object outputSchema, Object inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return new SingleRecordIterable<>(inputRecord);
}
}
| 4,526 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter/ToAvroConverterBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* A base abstract {@link Converter} class for data transformation to Avro.
*
* @param <SI> input schema type
* @param <DI> input data type
*/
public abstract class ToAvroConverterBase<SI, DI> extends Converter<SI, Schema, DI, GenericRecord> {
@Override
public abstract Schema convertSchema(SI schema, WorkUnitState workUnit)
throws SchemaConversionException;
@Override
public abstract Iterable<GenericRecord> convertRecord(Schema outputSchema, DI inputRecord, WorkUnitState workUnit)
throws DataConversionException;
}
| 4,527 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter/SamplingConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.Collections;
import java.util.Random;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.util.ConfigUtils;
/**
* A converter that samples records based on a configured sampling ratio.
*/
@Slf4j
public class SamplingConverter extends Converter<Object, Object, Object, Object> {
public static final String SAMPLE_RATIO_KEY="converter.sample.ratio";
public static final double DEFAULT_SAMPLE_RATIO=0.01; // Sample 1% by default
private final Random random = new Random();
private double sampleRatio = DEFAULT_SAMPLE_RATIO;
@Override
public Converter<Object, Object, Object, Object> init(WorkUnitState workUnit) {
super.init(workUnit);
try {
Config config = ConfigUtils.propertiesToConfig(workUnit.getProperties());
double sampleRatio = ConfigUtils.getDouble(config, SAMPLE_RATIO_KEY, DEFAULT_SAMPLE_RATIO);
Preconditions.checkState(sampleRatio >= 0 && sampleRatio <= 1.0,
"Sample ratio must be between 0.0 and 1.0. Found " + sampleRatio);
this.sampleRatio = sampleRatio;
} catch (Exception e) {
log.warn("Unable to retrieve config", e);
log.warn("Defaulting to default sample ratio: {}", this.sampleRatio);
}
log.debug("Sample ratio configured: {}", this.sampleRatio);
return this;
}
@Override
public Object convertSchema(Object inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<Object> convertRecord(Object outputSchema, Object inputRecord, WorkUnitState workUnit)
throws DataConversionException {
if (random.nextDouble() <= this.sampleRatio) {
return new SingleRecordIterable<>(inputRecord);
} else {
return Collections.EMPTY_LIST;
}
}
} | 4,528 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter/SingleRecordIterable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.Iterator;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterators;
/**
* A type of {@link Iterable}s for a single non-nullable record.
*
* @author Yinan Li
*
* @param <T> record type
*/
public class SingleRecordIterable<T> implements Iterable<T> {
private final T value;
public SingleRecordIterable(T value) {
Preconditions.checkNotNull(value);
this.value = value;
}
@Override
public Iterator<T> iterator() {
return Iterators.singletonIterator(this.value);
}
}
| 4,529 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter/AvroToAvroConverterBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.instrumented.converter.InstrumentedConverter;
/**
* A base abstract {@link Converter} class for data transformation from Avro to Avro.
*/
public abstract class AvroToAvroConverterBase extends InstrumentedConverter<Schema, Schema, GenericRecord, GenericRecord> {
@Override
public abstract Schema convertSchema(Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException;
}
| 4,530 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter/filter/AvroProjectionConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.filter;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.AvroToAvroConverterBase;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.util.AvroUtils;
/**
* A {@link Converter} that removes certain fields from an Avro schema or an Avro record.
*
* @author Ziyang Liu
*/
public class AvroProjectionConverter extends AvroToAvroConverterBase {
public static final String REMOVE_FIELDS = ".remove.fields";
public static final String USE_NAMESPACE = "avroProjectionConverter.useNamespace";
private Optional<AvroSchemaFieldRemover> fieldRemover;
/**
* To remove certain fields from the Avro schema or records of a topic/table, set property
* {topic/table name}.remove.fields={comma-separated, fully qualified field names} in workUnit.
*
* E.g., PageViewEvent.remove.fields=header.memberId,mobileHeader.osVersion
*/
@Override
public AvroProjectionConverter init(WorkUnitState workUnit) {
if (workUnit.contains(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY)) {
String removeFieldsPropName = workUnit.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY) + REMOVE_FIELDS;
if (workUnit.getPropAsBoolean(USE_NAMESPACE)) {
removeFieldsPropName = String.format("%s.%s",
workUnit.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY), removeFieldsPropName);
}
if (workUnit.contains(removeFieldsPropName)) {
this.fieldRemover = Optional.of(new AvroSchemaFieldRemover(workUnit.getProp(removeFieldsPropName)));
} else {
this.fieldRemover = Optional.absent();
}
}
return this;
}
/**
* Remove the specified fields from inputSchema.
*/
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
Schema outputSchema = inputSchema;
if (this.fieldRemover.isPresent()) {
outputSchema = this.fieldRemover.get().removeFields(inputSchema);
}
AvroUtils.addSchemaCreationTime(inputSchema, outputSchema);
return outputSchema;
}
/**
* Convert the schema of inputRecord to outputSchema.
*/
@Override
public Iterable<GenericRecord> convertRecordImpl(Schema outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
return new SingleRecordIterable<>(AvroUtils.convertRecordSchema(inputRecord, outputSchema));
} catch (IOException e) {
throw new DataConversionException(e);
}
}
}
| 4,531 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter/filter/AvroSchemaFieldRemover.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.filter;
import java.util.List;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.util.AvroSchemaUtils;
import org.apache.gobblin.util.AvroUtils;
/**
* A class that removes specific fields from a (possibly recursive) Avro schema.
*
* @author Ziyang Liu
*/
public class AvroSchemaFieldRemover {
private static final Splitter SPLITTER_ON_COMMA = Splitter.on(',').trimResults().omitEmptyStrings();
private static final Splitter SPLITTER_ON_DOT = Splitter.on('.').trimResults().omitEmptyStrings();
private static final AvroSchemaFieldRemover DO_NOTHING_INSTANCE = new AvroSchemaFieldRemover();
private final Map<String, AvroSchemaFieldRemover> children = Maps.newHashMap();
/**
* @param fieldNames Field names to be removed from the Avro schema. Contains comma-separated fully-qualified
* field names, e.g., "header.memberId,mobileHeader.osVersion".
*/
public AvroSchemaFieldRemover(String fieldNames) {
this.addChildren(fieldNames);
}
private AvroSchemaFieldRemover() {
this("");
}
private void addChildren(String fieldNames) {
for (String fieldName : SPLITTER_ON_COMMA.splitToList(fieldNames)) {
List<String> fieldNameComponents = SPLITTER_ON_DOT.splitToList(fieldName);
if (!fieldNameComponents.isEmpty()) {
this.addChildren(fieldNameComponents, 0);
}
}
}
private void addChildren(List<String> fieldNameComponents, int level) {
Preconditions.checkArgument(fieldNameComponents.size() > level);
if (!this.children.containsKey(fieldNameComponents.get(level))) {
this.children.put(fieldNameComponents.get(level), new AvroSchemaFieldRemover());
}
if (level < fieldNameComponents.size() - 1) {
this.children.get(fieldNameComponents.get(level)).addChildren(fieldNameComponents, level + 1);
}
}
/**
* @param schema The Avro schema where the specified fields should be removed from.
* @return A new Avro schema with the specified fields removed.
*/
public Schema removeFields(Schema schema) {
return removeFields(schema, Maps.<String, Schema> newHashMap());
}
private Schema removeFields(Schema schema, Map<String, Schema> schemaMap) {
switch (schema.getType()) {
case RECORD:
if (schemaMap.containsKey(schema.getFullName())) {
return schemaMap.get(schema.getFullName());
}
return this.removeFieldsFromRecords(schema, schemaMap);
case UNION:
return this.removeFieldsFromUnion(schema, schemaMap);
case ARRAY:
return this.removeFieldsFromArray(schema, schemaMap);
case MAP:
return this.removeFieldsFromMap(schema, schemaMap);
default:
return schema;
}
}
private Schema removeFieldsFromRecords(Schema schema, Map<String, Schema> schemaMap) {
Schema newRecord = Schema.createRecord(schema.getName(), schema.getDoc(), schema.getNamespace(), schema.isError());
AvroSchemaUtils.copySchemaProperties(schema, newRecord);
// Put an incomplete schema into schemaMap to avoid re-processing a recursive field.
// The fields in the incomplete schema will be populated once the current schema is completely processed.
schemaMap.put(schema.getFullName(), newRecord);
List<Field> newFields = Lists.newArrayList();
for (Field field : schema.getFields()) {
if (!this.shouldRemove(field)) {
Field newField;
if (this.children.containsKey(field.name())) {
newField = AvroCompatibilityHelper.createSchemaField(field.name(),
this.children.get(field.name()).removeFields(field.schema(), schemaMap),
field.doc(), AvroUtils.getCompatibleDefaultValue(field));
} else {
newField = AvroCompatibilityHelper.createSchemaField(field.name(),
DO_NOTHING_INSTANCE.removeFields(field.schema(), schemaMap), field.doc(),
AvroUtils.getCompatibleDefaultValue(field));
}
// Avro 1.9 compatible change - replaced deprecated public api getJsonProps with AvroCompatibilityHelper methods
AvroSchemaUtils.copyFieldProperties(field, newField);
newFields.add(newField);
}
}
newRecord.setFields(newFields);
return newRecord;
}
private boolean shouldRemove(Field field) {
// A field should be removed if it is the last component in a specified field name,
// e.g., "memberId" in "header.memberId".
return this.children.containsKey(field.name()) && this.children.get(field.name()).children.isEmpty();
}
private Schema removeFieldsFromUnion(Schema schema, Map<String, Schema> schemaMap) {
List<Schema> newUnion = Lists.newArrayList();
for (Schema unionType : schema.getTypes()) {
newUnion.add(this.removeFields(unionType, schemaMap));
}
return Schema.createUnion(newUnion);
}
private Schema removeFieldsFromArray(Schema schema, Map<String, Schema> schemaMap) {
return Schema.createArray(this.removeFields(schema.getElementType(), schemaMap));
}
private Schema removeFieldsFromMap(Schema schema, Map<String, Schema> schemaMap) {
return Schema.createMap(this.removeFields(schema.getValueType(), schemaMap));
}
}
| 4,532 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/converter/filter/GobblinTrackingEventFlattenFilterConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.filter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.AvroToAvroConverterBase;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ConfigUtils;
/**
* Flatten and filter the map field in GobblinTrackingEvent.
*/
public class GobblinTrackingEventFlattenFilterConverter extends AvroToAvroConverterBase {
public static final String FIELDS_TO_FLATTEN = "fieldsToFlatten";
public static final String NEW_SCHEMA_NAME = "outputSchemaName";
public static final String FIELDS_RENAME_MAP = "fieldsRenameMap";
private static final char OLD_NEW_NAME_SEPARATOR = ':';
private Schema gobblinTrackingEventSchema;
private Set<String> nonMapFields;
private String mapFieldName;
private List<Field> newFields;
private Config config;
private BiMap<String, String> fieldsRenameMap;
@Override
public Converter init(WorkUnitState workUnitState) {
try {
gobblinTrackingEventSchema =
new Schema.Parser().parse(getClass().getClassLoader().getResourceAsStream("GobblinTrackingEvent.avsc"));
} catch (IOException e) {
throw new RuntimeException("Cannot parse GobblinTrackingEvent schema.", e);
}
config = ConfigUtils.propertiesToConfig(workUnitState.getProperties()).getConfig(this.getClass().getSimpleName());
List<String> entryList = ConfigUtils.getStringList(config, FIELDS_RENAME_MAP);
this.fieldsRenameMap = HashBiMap.create();
for (String entry : entryList) {
List<String> oldNewNames = Splitter.on(OLD_NEW_NAME_SEPARATOR).omitEmptyStrings().splitToList(entry);
Preconditions.checkArgument(oldNewNames.size() == 2, "Wrong format for key " + FIELDS_RENAME_MAP);
this.fieldsRenameMap.put(oldNewNames.get(0), oldNewNames.get(1));
}
this.nonMapFields = new HashSet<>();
this.newFields = new ArrayList<>();
List<String> mapFieldNames = new ArrayList<>();
for (Field field : gobblinTrackingEventSchema.getFields()) {
String curFieldName = field.name();
if (!field.schema().getType().equals(Schema.Type.MAP)) {
if (fieldsRenameMap.containsKey(curFieldName)) {
newFields.add(AvroCompatibilityHelper.createSchemaField(fieldsRenameMap.get(curFieldName), field.schema(),
field.doc(), AvroUtils.getCompatibleDefaultValue(field)));
} else {
newFields.add(AvroCompatibilityHelper.createSchemaField(curFieldName, field.schema(), field.doc(),
AvroUtils.getCompatibleDefaultValue(field)));
}
this.nonMapFields.add(curFieldName);
} else {
mapFieldNames.add(curFieldName);
}
}
Preconditions.checkArgument(mapFieldNames.size() == 1, "Input schema does not match GobblinTrackingEvent.");
this.mapFieldName = mapFieldNames.get(0);
for (String fieldToFlatten : ConfigUtils.getStringList(config, FIELDS_TO_FLATTEN)) {
String newFieldName =
this.fieldsRenameMap.containsKey(fieldToFlatten) ? this.fieldsRenameMap.get(fieldToFlatten) : fieldToFlatten;
newFields.add(AvroCompatibilityHelper.createSchemaField(newFieldName, Schema.create(Schema.Type.STRING), "", null));
}
return this;
}
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
Preconditions.checkArgument(AvroUtils.checkReaderWriterCompatibility(gobblinTrackingEventSchema, inputSchema, true));
Schema outputSchema = Schema
.createRecord(ConfigUtils.getString(config, NEW_SCHEMA_NAME, inputSchema.getName()), inputSchema.getDoc(),
inputSchema.getNamespace(), inputSchema.isError());
outputSchema.setFields(newFields);
AvroUtils.addSchemaCreationTime(inputSchema, outputSchema);
return outputSchema;
}
@Override
public Iterable<GenericRecord> convertRecordImpl(Schema outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
GenericRecord genericRecord = new GenericData.Record(outputSchema);
BiMap<String, String> inversedViewOfFieldsRenameMap = this.fieldsRenameMap.inverse();
for (Schema.Field field : outputSchema.getFields()) {
String curFieldName = field.name();
String originalFieldName =
inversedViewOfFieldsRenameMap.containsKey(curFieldName) ? inversedViewOfFieldsRenameMap.get(curFieldName)
: curFieldName;
if (this.nonMapFields.contains(originalFieldName)) {
genericRecord.put(curFieldName, inputRecord.get(originalFieldName));
} else {
genericRecord.put(curFieldName,
AvroUtils.getFieldValue(inputRecord, Joiner.on('.').join(this.mapFieldName, originalFieldName)).or(""));
}
}
return new SingleRecordIterable<>(genericRecord);
}
}
| 4,533 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor/DefaultCheckpointableWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import lombok.EqualsAndHashCode;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
/**
* A {@link CheckpointableWatermark} that wraps a {@link ComparableWatermark} specific to a source.
*/
@EqualsAndHashCode
public class DefaultCheckpointableWatermark implements CheckpointableWatermark {
private static final Gson GSON = GsonInterfaceAdapter.getGson(Object.class);
private final String source;
private final ComparableWatermark comparable;
public DefaultCheckpointableWatermark(String source, ComparableWatermark comparableWatermark) {
this.source = source;
this.comparable = comparableWatermark;
}
public String getSource() {
return this.source;
}
@Override
public ComparableWatermark getWatermark() {
return this.comparable;
}
@Override
public int compareTo(CheckpointableWatermark o) {
if (!(this.source.equals(o.getSource()))) {
throw new RuntimeException("Could not compare two checkpointable watermarks because they have different sources "
+ this.source + ":" + o.getSource());
}
return this.comparable.compareTo(o.getWatermark());
}
@Override
public JsonElement toJson() {
return GSON.toJsonTree(this);
}
@Override
public short calculatePercentCompletion(Watermark lowWatermark, Watermark highWatermark) {
return comparable.calculatePercentCompletion(lowWatermark, highWatermark);
}
@Override
public String toString() {
return String.format("%s : %s ", getSource(), GSON.toJson(this.comparable.toJson()));
}
}
| 4,534 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor/JobCommitPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
/**
* An enumeration of policies on how output data of jobs/tasks should be committed.
*
* @author Yinan Li
*/
public enum JobCommitPolicy {
/**
* Commit output data of a job if and only if all of its tasks successfully complete.
*/
COMMIT_ON_FULL_SUCCESS("full"),
/**
* Commit a job even if some of its tasks fail. It's up to the {@link org.apache.gobblin.publisher.DataPublisher} to
* decide whether data of failed tasks of the job should be committed or not.
*
* @deprecated Use {@link #COMMIT_SUCCESSFUL_TASKS} instead, which provides a less confusing commit semantics,
* and should cover most use cases when {@link #COMMIT_ON_FULL_SUCCESS} is not appropriate.
*/
@Deprecated
COMMIT_ON_PARTIAL_SUCCESS("partial"),
/**
* Commit output data of tasks that successfully complete.
*
* It is recommended to use this commit policy in conjunction with task-level data publishing (i.e., when
* {@link ConfigurationKeys#PUBLISH_DATA_AT_JOB_LEVEL} is set to {@code false}).
*/
COMMIT_SUCCESSFUL_TASKS("successful");
private final String name;
JobCommitPolicy(String name) {
this.name = name;
}
/**
* Get a {@link JobCommitPolicy} for the given job commit policy name.
*
* @param name Job commit policy name
* @return a {@link JobCommitPolicy} for the given job commit policy name
*/
public static JobCommitPolicy forName(String name) {
if (COMMIT_ON_FULL_SUCCESS.name.equalsIgnoreCase(name)) {
return COMMIT_ON_FULL_SUCCESS;
}
if (COMMIT_ON_PARTIAL_SUCCESS.name.equalsIgnoreCase(name)) {
return COMMIT_ON_PARTIAL_SUCCESS;
}
if (COMMIT_SUCCESSFUL_TASKS.name.equalsIgnoreCase(name)) {
return COMMIT_SUCCESSFUL_TASKS;
}
throw new IllegalArgumentException(String.format("Job commit policy with name %s is not supported", name));
}
/**
* Get a {@link JobCommitPolicy} through its name specified in configuration property
* {@link ConfigurationKeys#JOB_COMMIT_POLICY_KEY}.
*
* @param jobProps a {@link Properties} instance carrying job configuration properties
* @return a {@link JobCommitPolicy} with the given name specified in {@link ConfigurationKeys#JOB_COMMIT_POLICY_KEY}
*/
public static JobCommitPolicy getCommitPolicy(Properties jobProps) {
return forName(jobProps.getProperty(ConfigurationKeys.JOB_COMMIT_POLICY_KEY,
ConfigurationKeys.DEFAULT_JOB_COMMIT_POLICY));
}
/**
* Get a {@link JobCommitPolicy} through its name specified in configuration property
* {@link ConfigurationKeys#JOB_COMMIT_POLICY_KEY}.
*
* @param state a {@link State} instance carrying job configuration properties
* @return a {@link JobCommitPolicy} with the given name specified in {@link ConfigurationKeys#JOB_COMMIT_POLICY_KEY}
*/
public static JobCommitPolicy getCommitPolicy(State state) {
return forName(state.getProp(ConfigurationKeys.JOB_COMMIT_POLICY_KEY, ConfigurationKeys.DEFAULT_JOB_COMMIT_POLICY));
}
}
| 4,535 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor/WorkUnitRetryPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
/**
* An enumeration of retry policies determining under what conditions the failed work unit should retry
*
* @author Yinan Li
*/
public enum WorkUnitRetryPolicy {
/**
* Always retry failed/aborted work units regardless of job commit policies.
*/
ALWAYS("always"),
/**
* Only retry failed/aborted work units when
* {@link JobCommitPolicy#COMMIT_ON_PARTIAL_SUCCESS} is used.
* This option is useful for being a global policy for a group of jobs that
* have different commit policies.
*/
ON_COMMIT_ON_PARTIAL_SUCCESS("onpartial"),
/**
* Only retry failed/aborted work units when
* {@link JobCommitPolicy#COMMIT_ON_FULL_SUCCESS} is used.
* This option is useful for being a global policy for a group of jobs that
* have different commit policies.
*/
ON_COMMIT_ON_FULL_SUCCESS("onfull"),
/**
* Never retry failed/aborted work units.
*/
NEVER("never");
private final String name;
WorkUnitRetryPolicy(String name) {
this.name = name;
}
/**
* Get a {@link WorkUnitRetryPolicy} of the given name.
*
* @param name Work unit retry policy name
* @return a {@link WorkUnitRetryPolicy} of the given name
* @throws java.lang.IllegalArgumentException if the name does not represent a
* valid work unit retry policy
*/
public static WorkUnitRetryPolicy forName(String name) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(name));
if (ALWAYS.name.equalsIgnoreCase(name)) {
return ALWAYS;
}
if (ON_COMMIT_ON_PARTIAL_SUCCESS.name.equalsIgnoreCase(name)) {
return ON_COMMIT_ON_PARTIAL_SUCCESS;
}
if (ON_COMMIT_ON_FULL_SUCCESS.name.equalsIgnoreCase(name)) {
return ON_COMMIT_ON_FULL_SUCCESS;
}
if (NEVER.name.equalsIgnoreCase(name)) {
return NEVER;
}
throw new IllegalArgumentException(String.format("Work unit retry policy with name %s is not supported", name));
}
}
| 4,536 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor/limiter/LimiterConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.limiter;
public class LimiterConfigurationKeys {
/**
* Configuration properties used by the LimitingExtractorDecorator
*/
public static final String LIMITER_REPORT_KEY_LIST = "limiter.report.key.list";
public static final String DEFAULT_LIMITER_REPORT_KEY_LIST = "";
}
| 4,537 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor/extract/FlushingExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.ack.Ackable;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metrics.MetricContextUtils;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.runtime.StateStoreBasedWatermarkStorage;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.stream.FlushControlMessage;
import org.apache.gobblin.stream.FlushRecordEnvelope;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.stream.StreamEntity;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.writer.LastWatermarkTracker;
import org.apache.gobblin.writer.WatermarkStorage;
import org.apache.gobblin.writer.WatermarkTracker;
/**
* An abstract class that implements a {@link EventBasedExtractor}. The {@link FlushingExtractor} injects a
* {@link FlushControlMessage} at a frequency determined by {@value #FLUSH_INTERVAL_SECONDS_KEY}.
* The {@link FlushControlMessage} blocks further messages being pushed until the outstanding {@link FlushControlMessage}
* has been acked by the real writer. On a successful ack, the extractor invokes a publish on the underlying DataPublisher,
* which moves the data from the task output location to the final publish location. After a successful publish, the FlushingExtractor
* commits the watermarks to the {@link WatermarkStorage}. Note the watermark committed to the watermark storage is the
* last successfully acked Watermark. Individual extractor implementations should start seeking from the next watermark
* past this watermark. For example, in the case of a Kafka extractor, the consumer should seek to the offset that is
* one more than the last committed watermark.
*
* Individual extractor implementations that extend the FlushingExtractor need to implement the following method:
* <ul>
* <li>readRecordEnvelopeImpl() - to return the next {@link RecordEnvelope}</li>.
* </ul>
*
* The FlushingExtractor allows applications to plug-in pre and post {@link CommitStep}s as actions to be performed before and after
* each commit.
* @param <D> type of {@link RecordEnvelope}
*/
@Slf4j
public abstract class FlushingExtractor<S, D> extends EventBasedExtractor<S, D> {
public static final String GOBBLIN_EXTRACTOR_PRECOMMIT_STEPS = "gobblin.extractor.precommit.steps";
public static final String GOBBLIN_EXTRACTOR_POSTCOMMIT_STEPS = "gobblin.extractor.postcommit.steps";
public static final String FLUSH_INTERVAL_SECONDS_KEY = "stream.flush.interval.secs";
public static final Long DEFAULT_FLUSH_INTERVAL_SECONDS = 60L;
public static final String FLUSH_DATA_PUBLISHER_CLASS = "flush.data.publisher.class";
public static final String DEFAULT_FLUSH_DATA_PUBLISHER_CLASS = "org.apache.gobblin.publisher.BaseDataPublisher";
public static final String WATERMARK_COMMIT_TIME_METRIC = "state.store.metrics.watermarkCommitTime";
public static final String COMMIT_STEP_METRIC_PREFIX = "commit.step.";
@Getter
protected Map<String, CheckpointableWatermark> lastCommittedWatermarks;
private final List<String> preCommitSteps;
private final List<String> postCommitSteps;
private final Map<String, CommitStep> commitStepMap = Maps.newHashMap();
private final AtomicLong watermarkCommitTime = new AtomicLong(0L);
private final List<AtomicLong> preCommitStepTimes = Lists.newArrayList();
private final List<AtomicLong> postCommitStepTimes = Lists.newArrayList();
protected Config config;
@Setter
private Optional<WatermarkStorage> watermarkStorage;
@Getter
protected WatermarkTracker watermarkTracker;
protected Long flushIntervalMillis;
protected Long timeOfLastFlush = System.currentTimeMillis();
private FlushAckable lastFlushAckable;
private boolean hasOutstandingFlush = false;
private Optional<DataPublisher> flushPublisher = Optional.absent();
protected WorkUnitState workUnitState;
public FlushingExtractor(WorkUnitState state) {
super(state);
this.workUnitState = state;
this.config = ConfigFactory.parseProperties(state.getProperties());
this.flushIntervalMillis =
ConfigUtils.getLong(config, FLUSH_INTERVAL_SECONDS_KEY, DEFAULT_FLUSH_INTERVAL_SECONDS) * 1000;
this.watermarkTracker = new LastWatermarkTracker(false);
this.watermarkStorage = Optional.of(new StateStoreBasedWatermarkStorage(state));
this.preCommitSteps = ConfigUtils.getStringList(config, GOBBLIN_EXTRACTOR_PRECOMMIT_STEPS);
this.postCommitSteps = ConfigUtils.getStringList(config, GOBBLIN_EXTRACTOR_POSTCOMMIT_STEPS);
preCommitSteps.stream().map(commitStep -> new AtomicLong(0L)).forEach(this.preCommitStepTimes::add);
postCommitSteps.stream().map(commitStep -> new AtomicLong(0L)).forEach(this.postCommitStepTimes::add);
initFlushPublisher();
MetricContextUtils.registerGauge(this.getMetricContext(), WATERMARK_COMMIT_TIME_METRIC, this.watermarkCommitTime);
initCommitStepMetrics(this.preCommitSteps, this.postCommitSteps);
}
private void initCommitStepMetrics(List<String>... commitStepLists) {
for (List<String> commitSteps : commitStepLists) {
for (String commitStepAlias : commitSteps) {
String metricName = COMMIT_STEP_METRIC_PREFIX + commitStepAlias + ".time";
MetricContextUtils.registerGauge(this.getMetricContext(), metricName, new AtomicLong(0L));
}
}
}
private StreamEntity<D> generateFlushMessageIfNecessary() {
Long currentTime = System.currentTimeMillis();
if ((currentTime - timeOfLastFlush) > this.flushIntervalMillis) {
return generateFlushMessage(currentTime);
}
return null;
}
private StreamEntity<D> generateFlushMessage(Long currentTime) {
log.debug("Injecting flush control message");
FlushControlMessage<D> flushMessage = FlushControlMessage.<D>builder().flushReason("Timed flush").build();
FlushAckable flushAckable = new FlushAckable();
// add a flush ackable to wait for the flush to complete before returning from this flush call
flushMessage.addCallBack(flushAckable);
//Preserve the latest flushAckable.
this.lastFlushAckable = flushAckable;
this.hasOutstandingFlush = true;
timeOfLastFlush = currentTime;
return flushMessage;
}
/**
* Create an {@link DataPublisher} for publishing after a flush. The {@link DataPublisher} is created through a
* DataPublisherFactory which makes requests
* to a {@link org.apache.gobblin.broker.iface.SharedResourcesBroker} to support sharing
* {@link DataPublisher} instances when appropriate.
* @return the {@link DataPublisher}
*/
private void initFlushPublisher() {
if (this.flushPublisher.isPresent()) {
return;
}
String publisherClassName =
ConfigUtils.getString(this.config, FLUSH_DATA_PUBLISHER_CLASS, DEFAULT_FLUSH_DATA_PUBLISHER_CLASS);
try {
this.flushPublisher = (Optional<DataPublisher>) Optional.of(
GobblinConstructorUtils.invokeLongestConstructor(Class.forName(publisherClassName), this.workUnitState));
} catch (ReflectiveOperationException e) {
log.error("Error in instantiating Data Publisher");
throw new RuntimeException(e);
}
}
@Override
public StreamEntity<D> readStreamEntityImpl() throws DataRecordException, IOException {
//Block until an outstanding flush has been Ack-ed.
if (this.hasOutstandingFlush) {
Throwable error = this.lastFlushAckable.waitForAck();
if (error != null) {
throw new RuntimeException("Error waiting for flush ack", error);
}
//Reset outstandingFlush flag
this.hasOutstandingFlush = false;
//Run pre-commit steps
doCommitSequence(preCommitSteps, true);
//Publish task output to final publish location.
publishTaskOutput();
//Provide a callback to the underlying extractor to handle logic for flush ack.
onFlushAck();
//Run post-commit steps
doCommitSequence(postCommitSteps, false);
}
StreamEntity<D> entity = generateFlushMessageIfNecessary();
if (entity != null) {
return entity;
}
//return the next read record.
RecordEnvelope<D> recordEnvelope = readRecordEnvelopeImpl();
if (recordEnvelope instanceof FlushRecordEnvelope) {
StreamEntity<D> flushMessage = generateFlushMessage(System.currentTimeMillis());
return flushMessage;
}
if (recordEnvelope != null) {
this.watermarkTracker.unacknowledgedWatermark(recordEnvelope.getWatermark());
}
return recordEnvelope;
}
/**
* A method that instantiates a {@link CommitStep} given an alias.
* @param commitStepAlias alias or fully qualified class name of the {@link CommitStep}.
* @throws IOException
*/
public CommitStep initCommitStep(String commitStepAlias, boolean isPrecommit) throws IOException {
return null;
}
private void doCommitSequence(List<String> commitSteps, boolean isPrecommit) throws IOException {
for (int i = 0; i < commitSteps.size(); i++) {
long startTimeMillis = System.currentTimeMillis();
String commitStepAlias = commitSteps.get(i);
CommitStep commitStep = commitStepMap.get(commitStepAlias);
if (commitStep == null) {
commitStep = initCommitStep(commitSteps.get(i), isPrecommit);
commitStepMap.put(commitStepAlias, commitStep);
}
log.info("Calling commit step: {}", commitStepAlias);
commitStep.execute();
long commitStepTime = System.currentTimeMillis() - startTimeMillis;
if (isPrecommit) {
preCommitStepTimes.get(i).set(commitStepTime);
} else {
postCommitStepTimes.get(i).set(commitStepTime);
}
}
}
/**
* A callback for the underlying extractor to implement logic for handling the completion of a flush. Underlying
* Extractor can override this method
*/
protected void onFlushAck() throws IOException {
checkPointWatermarks();
}
/**
* A method that returns the latest committed watermarks back to the caller. This method will be typically called
* by the underlying extractor during the initialization phase to retrieve the latest watermarks.
* @param checkPointableWatermarkClass a {@link CheckpointableWatermark} class
* @param partitions a collection of partitions assigned to the extractor
* @return the latest committed watermarks as a map of (source, watermark) pairs. For example, in the case of a KafkaStreamingExtractor,
* this map would be a collection of (TopicPartition, KafkaOffset) pairs.
*/
public Map<String, CheckpointableWatermark> getCommittedWatermarks(Class checkPointableWatermarkClass,
Iterable<String> partitions) {
Preconditions.checkArgument(CheckpointableWatermark.class.isAssignableFrom(checkPointableWatermarkClass),
"Watermark class " + checkPointableWatermarkClass.toString() + " is not a CheckPointableWatermark class");
try {
this.lastCommittedWatermarks =
this.watermarkStorage.get().getCommittedWatermarks(checkPointableWatermarkClass, partitions);
} catch (Exception e) {
// failed to get watermarks ... log a warning message
log.warn("Failed to get watermarks... will start from the beginning", e);
this.lastCommittedWatermarks = Collections.EMPTY_MAP;
}
return this.lastCommittedWatermarks;
}
/**
* Publish task output to final publish location.
*/
protected void publishTaskOutput() throws IOException {
if (!this.flushPublisher.isPresent()) {
throw new IOException("Publish called without a flush publisher");
}
this.flushPublisher.get().publish(Collections.singletonList(workUnitState));
}
@Override
public void shutdown() {
// In case hasOutstandingFlush, we need to manually nack the ackable to make sure the CountDownLatch not hang
if (this.hasOutstandingFlush) {
this.lastFlushAckable.nack(new IOException("Extractor already shutdown"));
}
}
/**
* Persist the watermarks in {@link WatermarkTracker#unacknowledgedWatermarks(Map)} to {@link WatermarkStorage}.
* The method is called when after a {@link FlushControlMessage} has been acknowledged. To make retrieval of
* the last committed watermarks efficient, this method caches the watermarks present in the unacknowledged watermark
* map.
*
* @throws IOException
*/
private void checkPointWatermarks() throws IOException {
Map<String, CheckpointableWatermark> unacknowledgedWatermarks =
this.watermarkTracker.getAllUnacknowledgedWatermarks();
if (this.watermarkStorage.isPresent()) {
long commitBeginTime = System.currentTimeMillis();
this.watermarkStorage.get().commitWatermarks(unacknowledgedWatermarks.values());
this.watermarkCommitTime.set(System.currentTimeMillis() - commitBeginTime);
//Cache the last committed watermarks
for (Map.Entry<String, CheckpointableWatermark> entry : unacknowledgedWatermarks.entrySet()) {
this.lastCommittedWatermarks.put(entry.getKey(), entry.getValue());
}
} else {
log.warn("No watermarkStorage found; Skipping checkpointing");
}
}
/**
* A method to be implemented by the underlying extractor that returns the next record as an instance of
* {@link RecordEnvelope}
* @return the next {@link RecordEnvelope} instance read from the source
*/
public abstract RecordEnvelope<D> readRecordEnvelopeImpl() throws DataRecordException, IOException;
/**
* {@link Ackable} for waiting for the flush control message to be processed
*/
private static class FlushAckable implements Ackable {
private Throwable error;
private final CountDownLatch processed;
public FlushAckable() {
this.processed = new CountDownLatch(1);
}
@Override
public void ack() {
this.processed.countDown();
}
@Override
public void nack(Throwable error) {
this.error = error;
this.processed.countDown();
}
/**
* Wait for ack
* @return any error encountered
*/
public Throwable waitForAck() {
try {
this.processed.await();
return this.error;
} catch (InterruptedException e) {
throw new RuntimeException("interrupted while waiting for ack");
}
}
}
}
| 4,538 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor/extract/EventBasedSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import org.apache.gobblin.configuration.SourceState;
/**
* A base implementation of {@link org.apache.gobblin.source.Source} for
* event-based sources.
*/
public abstract class EventBasedSource<S, D> extends AbstractSource<S, D> {
@Override
public void shutdown(SourceState state) {
}
}
| 4,539 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor/extract/EventBasedExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.instrumented.extractor.InstrumentedExtractor;
/**
* A base {@link org.apache.gobblin.source.extractor.Extractor} class for
* event-based sources.
*/
public abstract class EventBasedExtractor<S, D> extends InstrumentedExtractor<S, D> {
public EventBasedExtractor(WorkUnitState workUnitState) {
super(workUnitState);
}
}
| 4,540 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor/extract/LongWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import java.math.RoundingMode;
import com.google.common.base.Preconditions;
import com.google.common.math.LongMath;
import com.google.common.primitives.Longs;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import org.apache.gobblin.source.extractor.ComparableWatermark;
import org.apache.gobblin.source.extractor.Watermark;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.Setter;
import lombok.ToString;
@ToString
@EqualsAndHashCode
public class LongWatermark implements ComparableWatermark {
private static final Gson GSON = new Gson();
@Getter
@Setter
private long value;
public LongWatermark(long value) {
this.value = value;
}
public void increment() {
this.value++;
}
@Override
public JsonElement toJson() {
return GSON.toJsonTree(this);
}
@Override
public short calculatePercentCompletion(Watermark lowWatermark, Watermark highWatermark) {
Preconditions.checkArgument(lowWatermark instanceof LongWatermark);
Preconditions.checkArgument(highWatermark instanceof LongWatermark);
long total = ((LongWatermark) highWatermark).value - ((LongWatermark) lowWatermark).value;
long pulled = this.value - ((LongWatermark) lowWatermark).value;
Preconditions.checkState(total >= 0);
Preconditions.checkState(pulled >= 0);
if (total == 0) {
return 0;
}
long percent = Math.min(100, LongMath.divide(pulled * 100, total, RoundingMode.HALF_UP));
return (short) percent;
}
@Override
public int compareTo(ComparableWatermark other) {
Preconditions.checkArgument(other instanceof LongWatermark);
return Longs.compare(this.value, ((LongWatermark)other).value);
}
}
| 4,541 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/source/extractor/extract/AbstractSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.JobCommitPolicy;
import org.apache.gobblin.source.extractor.WorkUnitRetryPolicy;
import org.apache.gobblin.source.workunit.ExtractFactory;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.Extract.TableType;
/**
* A base implementation of {@link org.apache.gobblin.source.Source} that provides default behavior.
*
* @author Yinan Li
*/
public abstract class AbstractSource<S, D> implements Source<S, D> {
private final ExtractFactory extractFactory = new ExtractFactory("yyyyMMddHHmmss");
/**
* Get a list of {@link WorkUnitState}s of previous {@link WorkUnit}s subject for retries.
*
* <p>
* We use two keys for configuring work unit retries. The first one specifies
* whether work unit retries are enabled or not. This is for individual jobs
* or a group of jobs that following the same rule for work unit retries.
* The second one that is more advanced is for specifying a retry policy.
* This one is particularly useful for being a global policy for a group of
* jobs that have different job commit policies and want work unit retries only
* for a specific job commit policy. The first one probably is sufficient for
* most jobs that only need a way to enable/disable work unit retries. The
* second one gives users more flexibilities.
* </p>
*
* @param state Source state
* @return list of {@link WorkUnitState}s of previous {@link WorkUnit}s subject for retries
*/
protected List<WorkUnitState> getPreviousWorkUnitStatesForRetry(SourceState state) {
if (Iterables.isEmpty(state.getPreviousWorkUnitStates())) {
return ImmutableList.of();
}
// Determine a work unit retry policy
WorkUnitRetryPolicy workUnitRetryPolicy;
if (state.contains(ConfigurationKeys.WORK_UNIT_RETRY_POLICY_KEY)) {
// Use the given work unit retry policy if specified
workUnitRetryPolicy = WorkUnitRetryPolicy.forName(state.getProp(ConfigurationKeys.WORK_UNIT_RETRY_POLICY_KEY));
} else {
// Otherwise set the retry policy based on if work unit retry is enabled
boolean retryFailedWorkUnits = state.getPropAsBoolean(ConfigurationKeys.WORK_UNIT_RETRY_ENABLED_KEY, true);
workUnitRetryPolicy = retryFailedWorkUnits ? WorkUnitRetryPolicy.ALWAYS : WorkUnitRetryPolicy.NEVER;
}
if (workUnitRetryPolicy == WorkUnitRetryPolicy.NEVER) {
return ImmutableList.of();
}
List<WorkUnitState> previousWorkUnitStates = Lists.newArrayList();
// Get previous work units that were not successfully committed (subject for retries)
for (WorkUnitState workUnitState : state.getPreviousWorkUnitStates()) {
if (workUnitState.getWorkingState() != WorkUnitState.WorkingState.COMMITTED) {
if (state.getPropAsBoolean(ConfigurationKeys.OVERWRITE_CONFIGS_IN_STATESTORE,
ConfigurationKeys.DEFAULT_OVERWRITE_CONFIGS_IN_STATESTORE)) {
// We need to make a copy here since getPreviousWorkUnitStates returns ImmutableWorkUnitStates
// for which addAll is not supported
WorkUnitState workUnitStateCopy = new WorkUnitState(workUnitState.getWorkunit(), state);
workUnitStateCopy.addAll(workUnitState);
workUnitStateCopy.overrideWith(state);
previousWorkUnitStates.add(workUnitStateCopy);
} else {
previousWorkUnitStates.add(workUnitState);
}
}
}
if (workUnitRetryPolicy == WorkUnitRetryPolicy.ALWAYS) {
return previousWorkUnitStates;
}
JobCommitPolicy jobCommitPolicy = JobCommitPolicy
.forName(state.getProp(ConfigurationKeys.JOB_COMMIT_POLICY_KEY, ConfigurationKeys.DEFAULT_JOB_COMMIT_POLICY));
if ((workUnitRetryPolicy == WorkUnitRetryPolicy.ON_COMMIT_ON_PARTIAL_SUCCESS
&& jobCommitPolicy == JobCommitPolicy.COMMIT_ON_PARTIAL_SUCCESS)
|| (workUnitRetryPolicy == WorkUnitRetryPolicy.ON_COMMIT_ON_FULL_SUCCESS
&& jobCommitPolicy == JobCommitPolicy.COMMIT_ON_FULL_SUCCESS)) {
return previousWorkUnitStates;
}
// Return an empty list if job commit policy and work unit retry policy do not match
return ImmutableList.of();
}
/**
* Get a list of previous {@link WorkUnit}s subject for retries.
*
* <p>
* This method uses {@link AbstractSource#getPreviousWorkUnitStatesForRetry(SourceState)}.
* </p>
*
* @param state Source state
* @return list of previous {@link WorkUnit}s subject for retries
*/
protected List<WorkUnit> getPreviousWorkUnitsForRetry(SourceState state) {
List<WorkUnit> workUnits = Lists.newArrayList();
for (WorkUnitState workUnitState : getPreviousWorkUnitStatesForRetry(state)) {
// Make a copy here as getWorkUnit() below returns an ImmutableWorkUnit
workUnits.add(WorkUnit.copyOf(workUnitState.getWorkunit()));
}
return workUnits;
}
public Extract createExtract(TableType type, String namespace, String table) {
return this.extractFactory.getUniqueExtract(type, namespace, table);
}
@Override
public boolean isEarlyStopped() {
return false;
}
}
| 4,542 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/async/Callback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.async;
public interface Callback<T> {
void onSuccess(T result);
void onFailure(Throwable throwable);
}
| 4,543 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/AcknowledgableRecordEnvelope.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.gobblin.ack.Ackable;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
/**
* An envelope for record, watermark pairs that need to be acknowledged.
* @param <D>: the type of the record
*/
@Alpha
public class AcknowledgableRecordEnvelope<D> implements Ackable {
private D _record;
private final AcknowledgableWatermark _watermark;
public AcknowledgableRecordEnvelope(D record, AcknowledgableWatermark watermark) {
_record = record;
_watermark = watermark;
}
public D getRecord() {
return _record;
}
/**
* Create a derived record envelope from this one.
* Derived envelopes share the same watermark.
* The original envelope must be acknowledged separately.
*/
public AcknowledgableRecordEnvelope derivedEnvelope(D record) {
_watermark.incrementAck();
return new AcknowledgableRecordEnvelope(record, _watermark);
}
@Override
public void ack() {
_watermark.ack();
}
/**
* Get the original watermark that was attached to this record,
* typically by a {@link org.apache.gobblin.source.extractor.StreamingExtractor}
*/
public CheckpointableWatermark getWatermark() {
return _watermark.getCheckpointableWatermark();
}
}
| 4,544 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/GenericWriteResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
/**
* A generic write response to wrap responses from other systems.
* Provides default values for things expected from write responses.
* @param <T>
*/
public class GenericWriteResponse<T> implements WriteResponse<T> {
private final T _innerResponse;
public GenericWriteResponse(T innerResponse)
{
_innerResponse = innerResponse;
}
@Override
public T getRawResponse() {
return _innerResponse;
}
@Override
public String getStringResponse() {
return _innerResponse.toString();
}
@Override
public long bytesWritten() {
return -1;
}
}
| 4,545 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/BatchAsyncDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
import org.apache.gobblin.annotation.Alpha;
/**
* An interface for implementing Async Writers for Gobblin.
* The write only handles batched records
*/
@Alpha
public interface BatchAsyncDataWriter<D> extends Closeable {
/**
* Asynchronously write a record, execute the callback on success/failure
*/
Future<WriteResponse> write(Batch<D> batch, @Nullable WriteCallback callback);
/**
* Flushes all pending batches
*/
void flush()
throws IOException;
}
| 4,546 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/MultiWriterWatermarkTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import com.google.common.base.Optional;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
/**
* A helper class that tracks committed and uncommitted watermarks.
* Useful for implementing {@link WatermarkAwareWriter}s that wrap other {@link WatermarkAwareWriter}s.
*
* Note: The current implementation is not meant to be used in a high-throughput scenario
* (e.g. in the path of a write or a callback). See {@link LastWatermarkTracker}.
*/
public class MultiWriterWatermarkTracker implements WatermarkTracker {
private final ConcurrentHashMap<String, Set<CheckpointableWatermark>> candidateCommittables = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Set<CheckpointableWatermark>> unacknowledgedWatermarks = new ConcurrentHashMap<>();
/**
* Reset current state
*/
public synchronized void reset() {
candidateCommittables.clear();
unacknowledgedWatermarks.clear();
}
private synchronized Set<CheckpointableWatermark> getOrCreate(Map<String, Set<CheckpointableWatermark>> map, String key) {
if (map.containsKey(key)) {
return map.get(key);
} else {
Set<CheckpointableWatermark> set = new TreeSet<>();
map.put(key, set);
return set;
}
}
@Override
public void committedWatermarks(Map<String, CheckpointableWatermark> committedMap) {
committedWatermarks(committedMap.values());
}
public void committedWatermarks(Iterable<CheckpointableWatermark> committedStream) {
for (CheckpointableWatermark committed: committedStream) {
committedWatermark(committed);
}
}
@Override
public void committedWatermark(CheckpointableWatermark committed) {
getOrCreate(candidateCommittables, committed.getSource()).add(committed);
}
@Override
public void unacknowledgedWatermark(CheckpointableWatermark unacked) {
getOrCreate(unacknowledgedWatermarks, unacked.getSource()).add(unacked);
}
@Override
public void unacknowledgedWatermarks(Map<String, CheckpointableWatermark> unackedMap) {
for (CheckpointableWatermark unacked: unackedMap.values()) {
unacknowledgedWatermark(unacked);
}
}
@Override
public Map<String, CheckpointableWatermark> getAllCommitableWatermarks() {
Map<String, CheckpointableWatermark> commitables = new HashMap<>(candidateCommittables.size());
for (String source: candidateCommittables.keySet()) {
Optional<CheckpointableWatermark> commitable = getCommittableWatermark(source);
if (commitable.isPresent()) {
commitables.put(commitable.get().getSource(), commitable.get());
}
}
return commitables;
}
@Override
public Map<String, CheckpointableWatermark> getAllUnacknowledgedWatermarks() {
Map<String, CheckpointableWatermark> unackedMap = new HashMap<>(unacknowledgedWatermarks.size());
for (String source: unacknowledgedWatermarks.keySet()) {
Optional<CheckpointableWatermark> unacked = getUnacknowledgedWatermark(source);
if (unacked.isPresent()) {
unackedMap.put(unacked.get().getSource(), unacked.get());
}
}
return unackedMap;
}
public Optional<CheckpointableWatermark> getCommittableWatermark(String source) {
Set<CheckpointableWatermark> unacked = unacknowledgedWatermarks.get(source);
CheckpointableWatermark
minUnacknowledgedWatermark = (unacked == null || unacked.isEmpty())? null: unacked.iterator().next();
CheckpointableWatermark highestCommitableWatermark = null;
for (CheckpointableWatermark commitableWatermark : candidateCommittables.get(source)) {
if ((minUnacknowledgedWatermark == null) || (commitableWatermark.compareTo(minUnacknowledgedWatermark) < 0)) {
// commitableWatermark < minUnacknowledgedWatermark
highestCommitableWatermark = commitableWatermark;
}
}
if (highestCommitableWatermark == null) {
return Optional.absent();
} else {
return Optional.of(highestCommitableWatermark);
}
}
public Optional<CheckpointableWatermark> getUnacknowledgedWatermark(String source) {
Set<CheckpointableWatermark> unacked = unacknowledgedWatermarks.get(source);
if (unacked.isEmpty()) {
return Optional.absent();
} else {
return Optional.of(unacked.iterator().next());
}
}
}
| 4,547 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/WatermarkAwareWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.stream.RecordEnvelope;
/**
* A DataWriter that is WatermarkAware. Required for implementing writers that
* can operate in streaming mode.
*/
@Alpha
public interface WatermarkAwareWriter<D> extends DataWriter<D> {
/**
*
* @return true if the writer can support watermark-bearing record envelopes
*/
default boolean isWatermarkCapable() {
return true;
}
/**
* Write a record (possibly asynchronously), ack the envelope on success.
* @param recordEnvelope: a container for the record and the acknowledgable watermark
* @throws IOException: if this write (or preceding write failures) have caused a fatal exception.
*/
default void writeEnvelope(RecordEnvelope<D> recordEnvelope) throws IOException {
write(recordEnvelope.getRecord());
recordEnvelope.ack();
}
}
| 4,548 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/FutureWrappedWriteCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import lombok.extern.slf4j.Slf4j;
/**
* A helper class that makes it easy to implement a future that is updated by a callback.
*
*/
@Slf4j
public class FutureWrappedWriteCallback implements WriteCallback<Object>, Future<WriteResponse> {
private WriteCallback _innerCallback;
private WriteResponse _writeResponse;
private Throwable _throwable;
private volatile boolean _callbackFired;
public FutureWrappedWriteCallback(WriteCallback innerCallback) {
_writeResponse = null;
_throwable = null;
_innerCallback = innerCallback;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return _callbackFired;
}
@Override
public WriteResponse get()
throws InterruptedException, ExecutionException {
try {
return get(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
throw new ExecutionException(e);
}
}
@Override
public WriteResponse get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
synchronized (this) {
while (!_callbackFired) {
wait(timeout);
}
}
if (_callbackFired) {
if (_throwable != null) {
throw new ExecutionException(_throwable);
} else {
return _writeResponse;
}
} else {
throw new AssertionError("Should not be here if _callbackFired is behaving well");
}
}
@Override
public void onSuccess(WriteResponse writeResponse) {
_writeResponse = writeResponse;
synchronized (this) {
_callbackFired = true;
if (_innerCallback != null) {
try {
_innerCallback.onSuccess(writeResponse);
} catch (Exception e) {
log.error("Ignoring error thrown in callback", e);
}
}
notifyAll();
}
}
@Override
public void onFailure(Throwable throwable) {
synchronized (this) {
_throwable = throwable;
_callbackFired = true;
if (_innerCallback != null) {
_innerCallback.onFailure(throwable);
}
notifyAll();
}
}
}
| 4,549 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/RecordMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.gobblin.annotation.Alpha;
@Alpha
public class RecordMetadata {
private long offset;
public RecordMetadata(long offset) {
this.offset = offset;
}
/**
* Get the offset of current record inside of its wrapping batch
*/
public long getOffset() {
return this.offset;
}
}
| 4,550 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/BatchAccumulator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.Closeable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.annotation.Alpha;
/**
* An accumulator which groups multiple records into a batch
* How batching strategy works depends on the real implementation
* One way to do this is scanning all the internal batches through an iterator
*/
@Alpha
public abstract class BatchAccumulator<D> implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(BatchAccumulator.class);
private volatile boolean closed = false;
private CountDownLatch closeComplete;
private final AtomicInteger appendsInProgress;
protected BatchAccumulator() {
this.appendsInProgress = new AtomicInteger(0);
this.closeComplete = new CountDownLatch(1);
}
/**
* Append a record to this accumulator
* <p>
* This method should never fail unless there is an exception. A future object should always be returned
* which can be queried to see if this record has been completed (completion means the wrapped batch has been
* sent and received acknowledgement and callback has been invoked). Internally it tracks how many parallel appends
* are in progress by incrementing appendsInProgress counter. The real append logic is inside {@link BatchAccumulator#enqueue(Object, WriteCallback)}
* </p>
*
* @param record : record needs to be added
* @param callback : A callback which will be invoked when the whole batch gets sent and acknowledged
* @return A future object which contains {@link RecordMetadata}
*/
public final Future<RecordMetadata> append (D record, WriteCallback callback) throws InterruptedException {
appendsInProgress.incrementAndGet();
try {
if (this.closed) {
throw new RuntimeException ("Cannot append after accumulator has been closed");
}
return this.enqueue(record, callback);
} finally {
appendsInProgress.decrementAndGet();
}
}
public final void waitClose() {
try {
this.closeComplete.await();
} catch (InterruptedException e) {
LOG.error ("accumulator close is interrupted");
}
LOG.info ("accumulator is closed");
}
public boolean isClosed () {
return closed;
}
/**
* Add a record to this accumulator
* <p>
* This method should never fail unless there is an exception. A future object should always be returned
* which can be queried to see if this record has been completed (completion means the wrapped batch has been
* sent and received acknowledgement and callback has been invoked).
* </p>
*
* @param record : record needs to be added
* @param callback : A callback which will be invoked when the whole batch gets sent and acknowledged
* @return A future object which contains {@link RecordMetadata}
*/
public abstract Future<RecordMetadata> enqueue (D record, WriteCallback callback) throws InterruptedException;
/**
* Wait until all the incomplete batches to be acknowledged
*/
public abstract void flush ();
/**
* When close is invoked, all new coming records will be rejected
* Add a busy loop here to ensure all the ongoing appends are completed
*/
public void close () {
closed = true;
while (appendsInProgress.get() > 0) {
LOG.info("Append is still going on, wait for a while");
try {
Thread.sleep(100);
} catch (InterruptedException e) {
LOG.error("close is interrupted while appending data is in progress");
}
}
this.closeComplete.countDown();
}
/**
* Release some resource current batch is allocated
*/
public abstract void deallocate (Batch<D> batch);
public abstract Batch<D> getNextAvailableBatch();
}
| 4,551 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/SyncDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.Closeable;
import java.io.IOException;
import org.apache.gobblin.annotation.Alpha;
/**
* An interface to implement Synchronous (Blocking) Data writers
*/
@Alpha
public interface SyncDataWriter<D> extends Closeable {
/**
* Synchronously write a record
* @param record
* @return WriteResponse from the write
*/
WriteResponse write(D record)
throws IOException;
void cleanup()
throws IOException;
}
| 4,552 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/RecordTooLargeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
public class RecordTooLargeException extends Exception {
}
| 4,553 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/WatermarkTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.Map;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
/**
* An interface for a WatermarkTracker. Implementations are expected to serve as helper
* classes to track watermarks in different use-cases.
*/
public interface WatermarkTracker {
void reset();
void committedWatermarks(Map<String, CheckpointableWatermark> committedMap);
void committedWatermark(CheckpointableWatermark committed);
void unacknowledgedWatermark(CheckpointableWatermark unacked);
void unacknowledgedWatermarks(Map<String, CheckpointableWatermark> unackedMap);
Map<String, CheckpointableWatermark> getAllCommitableWatermarks();
Map<String, CheckpointableWatermark> getAllUnacknowledgedWatermarks();
}
| 4,554 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/PartitionAwareDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.base.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.writer.partitioner.WriterPartitioner;
/**
* A {@link DataWriterBuilder} used with a {@link WriterPartitioner}. When provided with a partitioner, Gobblin will create a
* {@link org.apache.gobblin.writer.DataWriter} per partition. All partitions will be build with identical builders, except
* that {@link #forPartition} will specify the partition.
*
* <p>
* The contract with the {@link PartitionAwareDataWriterBuilder} is as follows:
* * Gobblin will call {@link #validatePartitionSchema(Schema)} before calling build().
* * Gobblin is guaranteed to call {@link #validatePartitionSchema(Schema)} for some instance of
* {@link PartitionAwareDataWriterBuilder} with the same class, but not necessarily for the specific instance
* that will be used to build the {@link DataWriter}.
* * If !partition1.equals(partition2), then Gobblin may call build a writer for partition1 and a writer for
* partition2 in the same job. This should not cause an exception.
* * If partition1.equals(partition2), a single fork will not build writers for both partitions.
* </p>
*
* <p>
* The summary is:
* * Make sure {@link #validatePartitionSchema} returns false if the writer can't handle the schema.
* * {@link #validatePartitionSchema} should not have any side effects on the {@link PartitionAwareDataWriterBuilder}.
* * Different partitions should generate non-colliding writers.
* </p>
*/
@Slf4j
public abstract class PartitionAwareDataWriterBuilder<S, D> extends DataWriterBuilder<S, D> {
protected Optional<GenericRecord> partition = Optional.absent();
/**
* Sets the partition that the build {@link DataWriter} will handle.
* @param partition A {@link GenericRecord} specifying the partition.
* @return A {@link PartitionAwareDataWriterBuilder}.
*/
public PartitionAwareDataWriterBuilder<S, D> forPartition(GenericRecord partition) {
this.partition = Optional.fromNullable(partition);
log.debug("For partition {}", this.partition);
return this;
}
/**
* Checks whether the {@link PartitionAwareDataWriterBuilder} is compatible with a given partition {@link Schema}.
* If this method returns false, the execution will crash with an error. If this method returns true, the
* {@link DataWriterBuilder} is expected to be able to understand the partitioning schema and handle it correctly.
* @param partitionSchema {@link Schema} of {@link GenericRecord} objects that will be passed to {@link #forPartition}.
* @return true if the {@link DataWriterBuilder} can understand the schema and is able to generate partitions from
* this schema.
*/
public abstract boolean validatePartitionSchema(Schema partitionSchema);
}
| 4,555 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/AsyncDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
import org.apache.gobblin.annotation.Alpha;
/**
* An interface for implementing Async Writers for Gobblin.
*/
@Alpha
public interface AsyncDataWriter<D> extends Closeable {
/**
* Asynchronously write a record, execute the callback on success/failure
*/
Future<WriteResponse> write(D record, @Nullable WriteCallback callback);
/**
* Flushes all pending writes
*/
void flush()
throws IOException;
}
| 4,556 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/LastWatermarkTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
/**
* A {@link WatermarkTracker} that only tracks the last committed watermark and doesn't compare
* previous existing watermarks. Useful for {@link WatermarkAwareWriter}s that are ordered and synchronous
* in nature.
*/
public class LastWatermarkTracker implements WatermarkTracker {
private final Map<String, CheckpointableWatermark> _committedWatermarkMap;
private final Map<String, CheckpointableWatermark> _unackedWatermarkMap;
private boolean ignoreUnacknowledged;
public LastWatermarkTracker(boolean ignoreUnacknowledged) {
_committedWatermarkMap = new ConcurrentHashMap<>();
if (ignoreUnacknowledged) {
_unackedWatermarkMap = null;
} else {
_unackedWatermarkMap = new ConcurrentHashMap<>();
}
this.ignoreUnacknowledged = ignoreUnacknowledged;
}
@Override
public void reset() {
_committedWatermarkMap.clear();
if (_unackedWatermarkMap != null) {
_unackedWatermarkMap.clear();
}
}
@Override
public void committedWatermarks(Map<String, CheckpointableWatermark> committedMap) {
_committedWatermarkMap.putAll(committedMap);
}
@Override
public void committedWatermark(CheckpointableWatermark committed) {
_committedWatermarkMap.put(committed.getSource(), committed);
}
@Override
public void unacknowledgedWatermark(CheckpointableWatermark unacked) {
if (_unackedWatermarkMap != null) {
_unackedWatermarkMap.put(unacked.getSource(), unacked);
}
}
@Override
public void unacknowledgedWatermarks(Map<String, CheckpointableWatermark> unackedMap) {
if (_unackedWatermarkMap != null) {
_unackedWatermarkMap.putAll(unackedMap);
}
}
@Override
public Map<String, CheckpointableWatermark> getAllCommitableWatermarks() {
return new HashMap<>(_committedWatermarkMap);
}
@Override
public Map<String, CheckpointableWatermark> getAllUnacknowledgedWatermarks() {
if (_unackedWatermarkMap != null) {
return new HashMap<>(_unackedWatermarkMap);
} else {
return Collections.EMPTY_MAP;
}
}
}
| 4,557 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/WriteResponseFuture.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* Wraps a Future<InnerType> and implements a Future<WriteResponse> when provided with a wrapper class
*/
public class WriteResponseFuture<InnerType> implements Future<WriteResponse> {
private final Future<InnerType> _innerFuture;
private final WriteResponseMapper<InnerType> _writeResponseMapper;
public WriteResponseFuture(Future<InnerType> innerFuture, WriteResponseMapper<InnerType> writeResponseMapper) {
_innerFuture = innerFuture;
_writeResponseMapper = writeResponseMapper;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return _innerFuture.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return _innerFuture.isCancelled();
}
@Override
public boolean isDone() {
return _innerFuture.isDone();
}
@Override
public WriteResponse get()
throws InterruptedException, ExecutionException {
return _writeResponseMapper.wrap(_innerFuture.get());
}
@Override
public WriteResponse get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return _writeResponseMapper.wrap(_innerFuture.get(timeout, unit));
}
}
| 4,558 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/TrackerBasedWatermarkManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.util.ExecutorsUtils;
/**
* Responsible for managing continuous commit of watermarks.
* Uses a {@link FineGrainedWatermarkTracker} to keep track of watermarks.
* Periodically fetches watermarks from the tracker and commits them to WatermarkStorage.
* TODO: Add metrics monitoring
*/
public class TrackerBasedWatermarkManager implements WatermarkManager {
private final FineGrainedWatermarkTracker _watermarkTracker;
private final WatermarkStorage _watermarkStorage;
private final long _commitIntervalMillis;
private final ScheduledExecutorService _watermarkCommitThreadPool;
private final Logger _logger;
private final RetrievalStatus _retrievalStatus;
private final CommitStatus _commitStatus;
@VisibleForTesting
final Runnable _watermarkCommitter = new
Runnable() {
@Override
public void run() {
long startTime = System.nanoTime();
Map<String, CheckpointableWatermark> watermarksToCommit = null;
try {
_retrievalStatus.onAttempt();
watermarksToCommit = _watermarkTracker.getCommittableWatermarks();
_logger.debug("Retrieved watermark {}", watermarksToCommit);
_retrievalStatus.onSuccess(watermarksToCommit);
}
catch (Exception e) {
_retrievalStatus.onFailure(e);
_logger.error("Failed to get watermark", e);
}
// Prevent multiple commits concurrently
synchronized (this) {
if (watermarksToCommit != null && !watermarksToCommit.isEmpty()) {
try {
_commitStatus.onAttempt();
_logger.info("Will commit watermark {}", watermarksToCommit.toString());
//TODO: Not checking if this watermark has already been committed successfully.
_watermarkStorage.commitWatermarks(watermarksToCommit.values());
_commitStatus.onSuccess(watermarksToCommit);
} catch (Exception e) {
_commitStatus.onFailure(e, watermarksToCommit);
_logger.error("Failed to write watermark", e);
}
} else {
_logger.info("Nothing to commit");
}
}
long duration = (System.nanoTime() - startTime)/1000000;
_logger.info("Duration of run {} milliseconds", duration);
}
};
public TrackerBasedWatermarkManager(WatermarkStorage storage, FineGrainedWatermarkTracker watermarkTracker,
long commitIntervalMillis, Optional<Logger> logger) {
Preconditions.checkArgument(storage != null, "WatermarkStorage cannot be null");
Preconditions.checkArgument(watermarkTracker != null, "WatermarkTracker cannot be null");
_watermarkTracker = watermarkTracker;
_watermarkStorage = storage;
_commitIntervalMillis = commitIntervalMillis;
_logger = logger.or(LoggerFactory.getLogger(TrackerBasedWatermarkManager.class));
_watermarkCommitThreadPool = new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(logger,
Optional.of("WatermarkManager-%d")));
_retrievalStatus = new RetrievalStatus();
_commitStatus = new CommitStatus();
}
public void start() {
_watermarkCommitThreadPool
.scheduleWithFixedDelay(_watermarkCommitter, 0, _commitIntervalMillis, TimeUnit.MILLISECONDS);
}
@Override
public void close()
throws IOException {
_logger.info("Watermark committer closing");
_watermarkCommitThreadPool.shutdown();
try {
long startTime = System.nanoTime();
_watermarkCommitThreadPool.awaitTermination(1000, TimeUnit.MILLISECONDS);
long duration = (System.nanoTime() - startTime)/ 1000000;
_logger.info("Duration of termination wait was {} milliseconds", duration);
}
catch (InterruptedException ie) {
throw new IOException("Interrupted while waiting for committer to shutdown", ie);
}
finally {
// final watermark commit
_logger.info("Watermark committer: one last commit before shutting down");
_watermarkCommitter.run();
}
}
@Override
public CommitStatus getCommitStatus() {
return _commitStatus;
}
@Override
public RetrievalStatus getRetrievalStatus() {
return _retrievalStatus;
}
}
| 4,559 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/WriteResponseMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
/**
* An interface to implement a transformer from a system-native response type into a {@link WriteResponse}.
* @param <InnerType>
*/
public interface WriteResponseMapper<InnerType> {
WriteResponse wrap(InnerType innerType);
}
| 4,560 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/Batch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.List;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.annotation.Alpha;
/**
* An Batch class that contains all the records pushed by {@link BatchAccumulator}.
* Internally it maintains a callback list which contains all individual callback related to each record.
* This class also maintains a countdown latch. This is used to block all the threads waiting on the batch
* completion event. All the blocked threads will be resumed after {@link Batch#done()} is invoked
* @param <D> record data type
*/
@Alpha
public abstract class Batch<D>{
public static final String BATCH_TTL = "writer.batch.ttl";
public static final long BATCH_TTL_DEFAULT = 1000; // 1 seconds
public static final String BATCH_SIZE = "writer.batch.size";
public static final long BATCH_SIZE_DEFAULT = 256 * 1024; // 256KB
public static final String BATCH_QUEUE_CAPACITY = "writer.batch.queue.capacity";
public static final long BATCH_QUEUE_CAPACITY_DEFAULT = 100;
private final List<Thunk> thunks;
private final long id;
private long recordCount;
private final CountDownLatch latch = new CountDownLatch(1);
private static AtomicInteger identifier = new AtomicInteger(0);
private static final Logger LOG = LoggerFactory.getLogger(Batch.class);
public Batch () {
recordCount = 0;
thunks = new ArrayList<>();
id = identifier.incrementAndGet();
}
public void done() {
latch.countDown();
}
public long getId() {
return id;
}
/**
* A helper class which wraps the callback
* It may contain more information related to each individual record
*/
final private static class Thunk {
final WriteCallback callback;
final int sizeInBytes;
public Thunk(WriteCallback callback, int sizeInBytes) {
this.callback = callback;
this.sizeInBytes = sizeInBytes;
}
}
/**
* After batch is sent and get acknowledged successfully, this method will be invoked
*/
public void onSuccess (final WriteResponse response) {
for (final Thunk thunk: this.thunks) {
thunk.callback.onSuccess(new WriteResponse() {
@Override
public Object getRawResponse() {
return response.getRawResponse();
}
@Override
public String getStringResponse() {
return response.getStringResponse();
}
@Override
public long bytesWritten() {
return thunk.sizeInBytes;
}
});
}
}
/**
* When batch is sent with an error return, this method will be invoked
*/
public void onFailure (Throwable throwable) {
for (Thunk thunk: this.thunks) {
thunk.callback.onFailure(throwable);
}
}
/**
* Return all the added records
*/
public abstract List<D> getRecords();
/**
* Return current batch size in bytes
*/
public abstract long getCurrentSizeInByte();
/**
* A method to check if the batch has the room to add a new record
* @param record: record needs to be added
* @param largeMessagePolicy: the policy that is in effect for large messages
* @return Indicates if this batch still have enough space to hold a new record
*/
public abstract boolean hasRoom (D record, LargeMessagePolicy largeMessagePolicy);
/**
* Add a record to this batch
* <p>
* Implementation of this method should always ensure the record can be added successfully
* The contract between {@link Batch#tryAppend(Object, WriteCallback, LargeMessagePolicy)} and this method is this method
* is responsible for adding record to internal batch memory and the check for the room space is performed
* by {@link Batch#hasRoom(Object, LargeMessagePolicy)}. All the potential issues for adding a record should
* already be resolved before this method is invoked.
* </p>
*
* @param record: record needs to be added
*/
public abstract void append (D record);
/**
* Get input record size in bytes
*/
public abstract int getRecordSizeInByte(D record) ;
/**
* Try to add a record to this batch
* <p>
* This method first check room space if a give record can be added
* If there is no space for new record, a null is returned; otherwise {@link Batch#append(Object)}
* is invoked and {@link RecordFuture} object is returned. User can call get() method on this object
* which will block current thread until the batch s fully completed (sent and received acknowledgement).
* The future object also contains meta information where this new record is located, usually an offset inside this batch.
* </p>
*
* @param record : record needs to be added
* @param callback : A callback which will be invoked when the whole batch gets sent and acknowledged
* @param largeMessagePolicy : the {@link LargeMessagePolicy} that is in effect for this batch
* @return A future object which contains {@link RecordMetadata}
*/
public Future<RecordMetadata> tryAppend(D record, WriteCallback callback, LargeMessagePolicy largeMessagePolicy)
throws RecordTooLargeException {
if (!hasRoom(record, largeMessagePolicy)) {
LOG.debug ("Cannot add {} to previous batch because the batch already has {} bytes",
record.toString(), getCurrentSizeInByte());
if (largeMessagePolicy == LargeMessagePolicy.FAIL) {
throw new RecordTooLargeException();
}
return null;
}
this.append(record);
thunks.add(new Thunk(callback, getRecordSizeInByte(record)));
RecordFuture future = new RecordFuture(latch, recordCount);
recordCount++;
return future;
}
public void await() throws InterruptedException{
LOG.debug("Batch {} waiting for {} records", this.id, this.recordCount);
this.latch.await();
LOG.debug("Batch {} done with {} records", this.id, this.recordCount);
}
}
| 4,561 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/FineGrainedWatermarkTracker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.Closeable;
import java.io.IOException;
import java.util.Deque;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Meter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import javax.annotation.Nonnull;
import javax.annotation.concurrent.NotThreadSafe;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
/**
* A class to handle fine-grain watermarks.
* Thread-safe only if you know what you are doing :)
*
*/
@NotThreadSafe
@Slf4j
public class FineGrainedWatermarkTracker implements Instrumentable, Closeable {
public static final String WATERMARK_TRACKER_SWEEP_INTERVAL_MS = "watermark.tracker.sweepIntervalMillis";
public static final Long WATERMARK_TRACKER_SWEEP_INTERVAL_MS_DEFAULT = 100L; // 100 milliseconds
private static final String WATERMARK_TRACKER_STABILITY_CHECK_INTERVAL_MS = "watermark.tracker.stabilityCheckIntervalMillis";
private static final Long WATERMARK_TRACKER_STABILITY_CHECK_INTERVAL_MS_DEFAULT = 10000L; // 10 seconds
private static final String WATERMARK_TRACKER_LAG_THRESHOLD = "watermark.tracker.lagThreshold";
private static final Long WATERMARK_TRACKER_LAG_THRESHOLD_DEFAULT = 100000L; // 100,000 unacked watermarks
private static final String WATERMARKS_INSERTED_METER = "watermark.tracker.inserted";
private static final String WATERMARKS_SWEPT_METER = "watermark.tracker.swept";
private static final long MILLIS_TO_NANOS = 1000 * 1000;
private final Map<String, Deque<AcknowledgableWatermark>> _watermarksMap;
private final long _sweepIntervalMillis;
private final long _stabilityCheckIntervalMillis;
private final long _watermarkLagThreshold;
private ScheduledExecutorService _executorService;
private final boolean _instrumentationEnabled;
private MetricContext _metricContext;
protected final Closer _closer;
private Meter _watermarksInserted;
private Meter _watermarksSwept;
private final AtomicBoolean _started;
private final AtomicBoolean _abort;
private boolean _autoStart = true;
public FineGrainedWatermarkTracker(Config config) {
_watermarksMap = new HashMap<>();
_sweepIntervalMillis = ConfigUtils.getLong(config, WATERMARK_TRACKER_SWEEP_INTERVAL_MS,
WATERMARK_TRACKER_SWEEP_INTERVAL_MS_DEFAULT);
_stabilityCheckIntervalMillis = ConfigUtils.getLong(config, WATERMARK_TRACKER_STABILITY_CHECK_INTERVAL_MS,
WATERMARK_TRACKER_STABILITY_CHECK_INTERVAL_MS_DEFAULT);
_watermarkLagThreshold = ConfigUtils.getLong(config, WATERMARK_TRACKER_LAG_THRESHOLD,
WATERMARK_TRACKER_LAG_THRESHOLD_DEFAULT);
_instrumentationEnabled = GobblinMetrics.isEnabled(config);
_closer = Closer.create();
_metricContext = _closer.register(Instrumented.getMetricContext(ConfigUtils.configToState(config),
this.getClass()));
regenerateMetrics();
_started = new AtomicBoolean(false);
_abort = new AtomicBoolean(false);
_sweeper = new Runnable() {
@Override
public void run() {
sweep();
}
};
_stabilityChecker = new Runnable() {
@Override
public void run() {
checkStability();
}
};
}
@VisibleForTesting
/**
* Set the tracker's auto start behavior. Used for testing only.
*/
void setAutoStart(boolean autoStart) {
_autoStart = autoStart;
}
/**
* Track a watermark.
* Assumptions: Track is called sequentially from the same thread for watermarks that are
* progressively increasing.
*/
public void track(AcknowledgableWatermark acknowledgableWatermark) {
if (!_started.get() && _autoStart) {
start();
}
maybeAbort();
String source = acknowledgableWatermark.getCheckpointableWatermark().getSource();
Deque<AcknowledgableWatermark> sourceWatermarks = _watermarksMap.get(source);
if (sourceWatermarks == null) {
sourceWatermarks = new ConcurrentLinkedDeque<>();
_watermarksMap.put(source, sourceWatermarks);
}
sourceWatermarks.add(acknowledgableWatermark);
_watermarksInserted.mark();
}
private void maybeAbort() throws RuntimeException {
if (_abort.get()) {
throw new RuntimeException("Aborting Watermark tracking");
}
}
/**
* Check if the memory footprint of the data structure is within bounds
*/
private void checkStability() {
if ((_watermarksInserted.getCount() - _watermarksSwept.getCount()) > _watermarkLagThreshold) {
log.error("Setting abort flag for Watermark tracking because the lag between the "
+ "watermarksInserted: {} and watermarksSwept: {} is greater than the threshold: {}",
_watermarksInserted.getCount(), _watermarksSwept.getCount(), _watermarkLagThreshold);
_abort.set(true);
}
}
public Map<String, CheckpointableWatermark> getCommittableWatermarks() {
Map<String, CheckpointableWatermark> commitableWatermarks = new HashMap<String, CheckpointableWatermark>(_watermarksMap.size());
for (Map.Entry<String, Deque<AcknowledgableWatermark>> entry: _watermarksMap.entrySet()) {
String source = entry.getKey();
Iterable<AcknowledgableWatermark> watermarks = entry.getValue();
AcknowledgableWatermark highestWatermark = null;
for (AcknowledgableWatermark watermark: watermarks) {
if (watermark.isAcked()) {
highestWatermark = watermark;
} else {
// hopefully we've already found the highest contiguous acked watermark
break;
}
}
if (highestWatermark != null) {
commitableWatermarks.put(source, highestWatermark.getCheckpointableWatermark());
}
}
return commitableWatermarks;
}
public Map<String, CheckpointableWatermark> getUnacknowledgedWatermarks() {
Map<String, CheckpointableWatermark> unackedWatermarks = new HashMap<>(_watermarksMap.size());
for (Map.Entry<String, Deque<AcknowledgableWatermark>> entry: _watermarksMap.entrySet()) {
String source = entry.getKey();
Iterable<AcknowledgableWatermark> watermarks = entry.getValue();
AcknowledgableWatermark lowestUnacked = null;
for (AcknowledgableWatermark watermark: watermarks) {
if (!watermark.isAcked()) {
lowestUnacked = watermark;
break;
}
}
if (lowestUnacked != null) {
unackedWatermarks.put(source, lowestUnacked.getCheckpointableWatermark());
}
}
return unackedWatermarks;
}
/**
* Schedule the sweeper and stability checkers
*/
public synchronized void start() {
if (!_started.get()) {
_executorService = new ScheduledThreadPoolExecutor(1,
ExecutorsUtils.newThreadFactory(Optional.of(LoggerFactory.getLogger(FineGrainedWatermarkTracker.class))));
_executorService.scheduleAtFixedRate(_sweeper, 0, _sweepIntervalMillis, TimeUnit.MILLISECONDS);
_executorService.scheduleAtFixedRate(_stabilityChecker, 0, _stabilityCheckIntervalMillis, TimeUnit.MILLISECONDS);
}
_started.set(true);
}
@Override
public void close()
throws IOException {
try {
if (_executorService != null) {
_executorService.shutdown();
}
} finally {
_closer.close();
}
}
/**
* A helper method to garbage collect acknowledged watermarks
* @return number of elements collected
*/
@VisibleForTesting
synchronized int sweep() {
long startTime = System.nanoTime();
int swept = 0;
for (Map.Entry<String, Deque<AcknowledgableWatermark>> entry : _watermarksMap.entrySet()) {
Deque<AcknowledgableWatermark> watermarks = entry.getValue();
/**
* Keep popping acked elements from the front as long as their next element is also acked.
* So: Acked_A -> Acked_B -> Not-Acked_C -> ... becomes
* Acked_B -> Not-Acked_C -> ...
*
* We keep the acked element around because that represents the highest contiguous acked watermark.
*/
boolean continueIteration = true;
while (continueIteration) {
Iterator<AcknowledgableWatermark> iter = watermarks.iterator();
if (!iter.hasNext()) { // null
continueIteration = false;
continue;
}
AcknowledgableWatermark first = iter.next();
if (first.isAcked()) {
if (!iter.hasNext()) { // Acked_A -> null
continueIteration = false;
continue;
}
AcknowledgableWatermark second = iter.next();
if ((second != null) && second.isAcked()) { // Acked_A -> Acked_B -> ...
watermarks.pop();
swept++;
} else { // Acked_A -> Not_Acked_B
continueIteration = false;
}
} else { // Not_Acked_A -> ..
continueIteration = false;
}
}
}
long duration = (System.nanoTime() - startTime)/ MILLIS_TO_NANOS;
log.debug("Swept {} watermarks in {} millis", swept, duration);
_watermarksSwept.mark(swept);
return swept;
}
private final Runnable _sweeper;
private final Runnable _stabilityChecker;
@Override
public void switchMetricContext(List<Tag<?>> tags) {
_metricContext = _closer
.register(Instrumented.newContextFromReferenceContext(_metricContext, tags, Optional.<String>absent()));
regenerateMetrics();
}
@Override
public void switchMetricContext(MetricContext context) {
_metricContext = context;
regenerateMetrics();
}
/** Default with no additional tags */
@Override
public List<Tag<?>> generateTags(State state) {
return Lists.newArrayList();
}
@Nonnull
@Override
public MetricContext getMetricContext() {
return _metricContext;
}
@Override
public boolean isInstrumentationEnabled() {
return _instrumentationEnabled;
}
/**
* Generates metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
// Set up the metrics that are enabled regardless of instrumentation
_watermarksInserted = _metricContext.meter(WATERMARKS_INSERTED_METER);
_watermarksSwept = _metricContext.meter(WATERMARKS_SWEPT_METER);
}
}
| 4,562 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/WriteResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
/**
* A class for encapsulating the system-native response and general statistics from a write
*/
public interface WriteResponse<T> {
/**
* Get the raw response returned by the underlying write system
*/
T getRawResponse();
/**
* Get a String representation of the response.
*/
String getStringResponse();
/**
* The number of bytes written as part of this write.
* @return The number of bytes written as part of this write.
* -1 if this value is unknown. 0 if nothing was written.
*/
long bytesWritten();
WriteResponse EMPTY = new WriteResponse() {
private final String _emptyResponse = "EmptyResponse";
@Override
public Object getRawResponse() {
return this._emptyResponse;
}
@Override
public String getStringResponse() {
return this._emptyResponse;
}
@Override
public long bytesWritten() {
return -1;
}
};
}
| 4,563 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/AsyncWriterManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import javax.annotation.Nonnull;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.ack.Ackable;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.exception.NonTransientException;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.MetricNames;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.FinalState;
/**
* A Data Writer to use as a base for writing async writers.
* This Data Writer wraps writers that implement the {@link AsyncDataWriter} interface
* and provides the following features:
* 1. Calculate metrics for: number of records in, attempted, successfully written, failed, latency.
* 2. Wait for a specified amount of time on commit for all pending writes to complete.
* 3. Do not proceed if a certain failure threshold is exceeded.
* 4. Support a fixed number of retries on failure of individual records (TODO: retry strategies)
* 5. Support a max number of outstanding / unacknowledged writes
* 6. TODO: Support ordered / unordered write semantics
*
*
*/
public class AsyncWriterManager<D> implements WatermarkAwareWriter<D>, DataWriter<D>, Instrumentable, Closeable, FinalState {
private static final long MILLIS_TO_NANOS = 1000 * 1000;
public static final long COMMIT_TIMEOUT_MILLIS_DEFAULT = 60000L; // 1 minute
public static final long COMMIT_STEP_WAITTIME_MILLIS_DEFAULT = 500; // 500 ms sleep while waiting for commit
public static final double FAILURE_ALLOWANCE_RATIO_DEFAULT = 0.0;
public static final boolean RETRIES_ENABLED_DEFAULT = true;
public static final int NUM_RETRIES_DEFAULT = 5;
public static final int MIN_RETRY_INTERVAL_MILLIS_DEFAULT = 3;
public static final int MAX_OUTSTANDING_WRITES_DEFAULT = 1000;
private final boolean instrumentationEnabled;
private MetricContext metricContext;
protected final Closer closer;
@VisibleForTesting
Meter recordsAttempted;
@VisibleForTesting
Meter recordsIn;
@VisibleForTesting
Meter recordsSuccess;
@VisibleForTesting
Meter recordsFailed;
@VisibleForTesting
Meter bytesWritten;
@VisibleForTesting
Optional<Timer> dataWriterTimer;
private final long commitTimeoutMillis;
private final long commitStepWaitTimeMillis;
private final double failureAllowanceRatio;
private final AsyncDataWriter asyncDataWriter;
private final int numRetries;
private final int minRetryIntervalMillis;
private final Optional<ScheduledThreadPoolExecutor> retryThreadPool;
private final Logger log;
@VisibleForTesting
final Optional<LinkedBlockingQueue<Attempt>> retryQueue;
private final int maxOutstandingWrites;
private final Semaphore writePermits;
private volatile Throwable cachedWriteException = null;
@Override
public boolean isWatermarkCapable() {
return true;
}
/**
* A class to store attempts at writing a record
**/
@Getter
class Attempt {
private final D record;
private final Ackable ackable;
private int attemptNum;
@Setter
private Throwable prevAttemptFailure; // Any failure
@Setter
private long prevAttemptTimestampNanos;
void incAttempt() {
++this.attemptNum;
}
Attempt(D record, Ackable ackable) {
this.record = record;
this.ackable = ackable;
this.attemptNum = 1;
this.prevAttemptFailure = null;
this.prevAttemptTimestampNanos = -1;
}
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
this.metricContext = this.closer
.register(Instrumented.newContextFromReferenceContext(this.metricContext, tags, Optional.<String>absent()));
regenerateMetrics();
}
@Override
public void switchMetricContext(MetricContext context) {
this.metricContext = context;
regenerateMetrics();
}
/** Default with no additional tags */
@Override
public List<Tag<?>> generateTags(State state) {
return Lists.newArrayList();
}
@Nonnull
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
@Override
public boolean isInstrumentationEnabled() {
return this.instrumentationEnabled;
}
/**
* TODO: Figure out what this means for checkpointing.
* Get final state for this object. By default this returns an empty {@link org.apache.gobblin.configuration.State}, but
* concrete subclasses can add information that will be added to the task state.
* @return Empty {@link org.apache.gobblin.configuration.State}.
*/
@Override
public State getFinalState() {
return new State();
}
/**
* Generates metrics for the instrumentation of this class.
*/
protected void regenerateMetrics() {
// Set up the metrics that are enabled regardless of instrumentation
this.recordsIn = this.metricContext.meter(MetricNames.DataWriterMetrics.RECORDS_IN_METER);
this.recordsAttempted = this.metricContext.meter(MetricNames.DataWriterMetrics.RECORDS_ATTEMPTED_METER);
this.recordsSuccess = this.metricContext.meter(MetricNames.DataWriterMetrics.SUCCESSFUL_WRITES_METER);
this.recordsFailed = this.metricContext.meter(MetricNames.DataWriterMetrics.FAILED_WRITES_METER);
this.bytesWritten = this.metricContext.meter(MetricNames.DataWriterMetrics.BYTES_WRITTEN_METER);
if (isInstrumentationEnabled()) {
this.dataWriterTimer = Optional.<Timer>of(this.metricContext.timer(MetricNames.DataWriterMetrics.WRITE_TIMER));
} else {
this.dataWriterTimer = Optional.absent();
}
}
protected AsyncWriterManager(Config config, long commitTimeoutMillis, long commitStepWaitTimeMillis,
double failureAllowanceRatio, boolean retriesEnabled, int numRetries, int minRetryIntervalMillis,
int maxOutstandingWrites, AsyncDataWriter asyncDataWriter, Optional<Logger> loggerOptional) {
Preconditions.checkArgument(commitTimeoutMillis > 0, "Commit timeout must be greater than 0");
Preconditions.checkArgument(commitStepWaitTimeMillis > 0, "Commit step wait time must be greater than 0");
Preconditions.checkArgument(commitStepWaitTimeMillis < commitTimeoutMillis, "Commit step wait time must be less "
+ "than commit timeout");
Preconditions.checkArgument((failureAllowanceRatio <= 1.0 && failureAllowanceRatio >= 0),
"Failure Allowance must be a ratio between 0 and 1");
Preconditions.checkArgument(maxOutstandingWrites > 0, "Max outstanding writes must be greater than 0");
Preconditions.checkNotNull(asyncDataWriter, "Async Data Writer cannot be null");
this.log = loggerOptional.isPresent()? loggerOptional.get() : LoggerFactory.getLogger(AsyncWriterManager.class);
this.closer = Closer.create();
State state = ConfigUtils.configToState(config);
this.instrumentationEnabled = GobblinMetrics.isEnabled(state);
this.metricContext = this.closer.register(Instrumented.getMetricContext(state, asyncDataWriter.getClass()));
regenerateMetrics();
this.commitTimeoutMillis = commitTimeoutMillis;
this.commitStepWaitTimeMillis = commitStepWaitTimeMillis;
this.failureAllowanceRatio = failureAllowanceRatio;
this.minRetryIntervalMillis = minRetryIntervalMillis;
if (retriesEnabled) {
this.numRetries = numRetries;
this.retryQueue = Optional.of(new LinkedBlockingQueue<Attempt>());
this.retryThreadPool = Optional.of(new ScheduledThreadPoolExecutor(1,
ExecutorsUtils.newDaemonThreadFactory(Optional.of(this.log), Optional.of("AsyncWriteManagerRetry-%d"))));
this.retryThreadPool.get().execute(new RetryRunner());
} else {
this.numRetries = 0;
this.retryQueue = Optional.absent();
this.retryThreadPool = Optional.absent();
}
this.maxOutstandingWrites = maxOutstandingWrites;
this.writePermits = new Semaphore(maxOutstandingWrites);
this.asyncDataWriter = asyncDataWriter;
this.closer.register(asyncDataWriter);
}
@Override
public void writeEnvelope(RecordEnvelope<D> recordEnvelope)
throws IOException {
write(recordEnvelope.getRecord(), recordEnvelope);
}
@Override
public void write(final D record)
throws IOException {
write(record, Ackable.NoopAckable);
}
private void write(final D record, Ackable ackable)
throws IOException {
maybeThrow();
int spinNum = 0;
try {
while (!this.writePermits.tryAcquire(100, TimeUnit.MILLISECONDS)) {
++spinNum;
if (spinNum % 50 == 0) {
log.info("Spinning due to pending writes, in = " + this.recordsIn.getCount() +
", success = " + this.recordsSuccess.getCount() + ", failed = " + this.recordsFailed.getCount() +
", maxOutstandingWrites = " + this.maxOutstandingWrites);
}
}
} catch (InterruptedException e) {
Throwables.propagate(e);
}
this.recordsIn.mark();
attemptWrite(new Attempt(record, ackable));
}
/**
* Checks if the current operating metrics would imply that
* we're out of SLA on failures permitted
* @return true if any failure would be fatal
*
* TODO: Add windowed stats to test for x% failures in y time window
*
*/
private boolean isFailureFatal() {
return (this.failureAllowanceRatio == 0.0);
}
private void makeNextWriteThrow(Throwable t) {
log.error("Will make next write throw", t);
this.cachedWriteException = t;
}
private void maybeThrow() {
if (this.cachedWriteException != null) {
throw new NonTransientException("Irrecoverable failure on async write", this.cachedWriteException);
}
}
private void attemptWrite(final Attempt attempt) {
this.recordsAttempted.mark();
attempt.setPrevAttemptTimestampNanos(System.nanoTime());
this.asyncDataWriter.write(attempt.record, new WriteCallback<Object>() {
@Override
public void onSuccess(WriteResponse writeResponse) {
try {
attempt.ackable.ack();
AsyncWriterManager.this.recordsSuccess.mark();
if (writeResponse.bytesWritten() > 0) {
AsyncWriterManager.this.bytesWritten.mark(writeResponse.bytesWritten());
}
if (AsyncWriterManager.this.dataWriterTimer.isPresent()) {
AsyncWriterManager.this.dataWriterTimer.get()
.update(System.nanoTime() - attempt.getPrevAttemptTimestampNanos(), TimeUnit.NANOSECONDS);
}
} finally {
AsyncWriterManager.this.writePermits.release();
}
}
@Override
public void onFailure(Throwable throwable) {
long currTime = System.nanoTime();
if (AsyncWriterManager.this.dataWriterTimer.isPresent()) {
AsyncWriterManager.this.dataWriterTimer.get()
.update(currTime - attempt.getPrevAttemptTimestampNanos(), TimeUnit.NANOSECONDS);
}
if (attempt.attemptNum <= AsyncWriterManager.this.numRetries) { // attempts must == numRetries + 1
log.debug("Attempt {} had failure: {}; re-enqueueing record: {}", attempt.attemptNum, throwable.getMessage(),
attempt.getRecord().toString());
attempt.incAttempt();
attempt.setPrevAttemptFailure(throwable);
AsyncWriterManager.this.retryQueue.get().add(attempt);
} else {
try {
AsyncWriterManager.this.recordsFailed.mark();
log.debug("Failed to write record : {}", attempt.getRecord().toString(), throwable);
// If this failure is fatal, set the writer to throw an exception at this point
if (isFailureFatal()) {
makeNextWriteThrow(throwable);
} else {
// since the failure is not fatal, ack the attempt and move forward
attempt.ackable.ack();
}
} finally {
AsyncWriterManager.this.writePermits.release();
}
}
}
});
}
private class RetryRunner implements Runnable {
private final LinkedBlockingQueue<Attempt> retryQueue;
private final long minRetryIntervalNanos;
public RetryRunner() {
Preconditions
.checkArgument(AsyncWriterManager.this.retryQueue.isPresent(), "RetryQueue must be present for RetryRunner");
this.retryQueue = AsyncWriterManager.this.retryQueue.get();
this.minRetryIntervalNanos =
AsyncWriterManager.this.minRetryIntervalMillis * MILLIS_TO_NANOS; // 3 milliseconds in nanos
}
private void maybeSleep(long lastAttemptTimestampNanos)
throws InterruptedException {
long timeDiff = System.nanoTime() - lastAttemptTimestampNanos;
long timeToSleep = this.minRetryIntervalNanos - timeDiff;
if (timeToSleep > 0) {
Thread.sleep(timeToSleep / MILLIS_TO_NANOS);
}
}
@Override
public void run() {
while (true) {
try {
Attempt attempt = this.retryQueue.take();
if (attempt != null) {
maybeSleep(attempt.getPrevAttemptTimestampNanos());
log.debug("Retry thread will retry record: {}", attempt.getRecord().toString());
attemptWrite(attempt);
}
} catch (InterruptedException e) {
log.info("Retry thread interrupted... will exit");
Throwables.propagate(e);
}
}
}
}
@Override
public void cleanup()
throws IOException {
// legacy api ...
}
@Override
public long recordsWritten() {
return this.recordsSuccess.getCount();
}
@Override
public long bytesWritten()
throws IOException {
return this.bytesWritten.getCount();
}
@Override
public void close()
throws IOException {
log.info("Close called");
this.closer.close();
if (this.retryThreadPool.isPresent()) {
// Shutdown the retry thread pool immediately, no use waiting for in-progress retries
ExecutorsUtils.shutdownExecutorService(this.retryThreadPool.get(), Optional.of(log), 1, TimeUnit.MILLISECONDS);
}
log.info("Successfully done closing");
}
@Override
public void commit()
throws IOException {
/**
* Assuming that commit is called only after all calls to write() have completed.
* So not taking extra precautions to prevent concurrent calls to write from happening.
*
*/
log.info("Commit called, will wait for commitTimeout : {} ms", this.commitTimeoutMillis);
long commitTimeoutNanos = commitTimeoutMillis * MILLIS_TO_NANOS;
long commitStartTime = System.nanoTime();
this.asyncDataWriter.flush();
while (((System.nanoTime() - commitStartTime) < commitTimeoutNanos) && (this.recordsIn.getCount() != (
this.recordsSuccess.getCount() + this.recordsFailed.getCount()))) {
log.debug("Commit waiting... records produced: {}, written: {}, failed: {}", this.recordsIn.getCount(),
this.recordsSuccess.getCount(), this.recordsFailed.getCount());
try {
Thread.sleep(this.commitStepWaitTimeMillis);
} catch (InterruptedException e) {
log.info("Interrupted while waiting for commit to complete");
throw new IOException("Interrupted while waiting for commit to complete", e);
}
}
log.debug("Commit done waiting");
long recordsProducedFinal = this.recordsIn.getCount();
long recordsWrittenFinal = this.recordsSuccess.getCount();
long recordsFailedFinal = this.recordsFailed.getCount();
long unacknowledgedWrites = recordsProducedFinal - recordsWrittenFinal - recordsFailedFinal;
long totalFailures = unacknowledgedWrites + recordsFailedFinal;
if (unacknowledgedWrites > 0) // timeout
{
log.warn("Timeout waiting for all writes to be acknowledged. Missing {} responses out of {}",
unacknowledgedWrites, recordsProducedFinal);
}
if (totalFailures > 0 && recordsProducedFinal > 0) {
log.info("Commit failed to write {} records ({} failed, {} unacknowledged) out of {} produced", totalFailures,
recordsFailedFinal, unacknowledgedWrites, recordsProducedFinal);
double failureRatio = (double) totalFailures / (double) recordsProducedFinal;
if (failureRatio > this.failureAllowanceRatio) {
log.error("Aborting because this is greater than the failureAllowance percentage: {}",
this.failureAllowanceRatio * 100.0);
throw new IOException("Failed to meet failureAllowance SLA", this.cachedWriteException);
} else {
log.warn(
"Committing because the observed failure percentage {} is less than the failureAllowance percentage: {}",
(failureRatio * 100.0), (this.failureAllowanceRatio * 100.0));
}
}
log.info("Successfully committed {} records.", recordsWrittenFinal);
}
/**
* Flush the underlying writer.
*/
@Override
public void flush() throws IOException {
this.asyncDataWriter.flush();
}
public static AsyncWriterManagerBuilder builder() {
return new AsyncWriterManagerBuilder();
}
public static class AsyncWriterManagerBuilder {
private Config config = ConfigFactory.empty();
private long commitTimeoutMillis = COMMIT_TIMEOUT_MILLIS_DEFAULT;
private long commitStepWaitTimeMillis = COMMIT_STEP_WAITTIME_MILLIS_DEFAULT;
private double failureAllowanceRatio = FAILURE_ALLOWANCE_RATIO_DEFAULT;
private boolean retriesEnabled = RETRIES_ENABLED_DEFAULT;
private int numRetries = NUM_RETRIES_DEFAULT;
private int maxOutstandingWrites = MAX_OUTSTANDING_WRITES_DEFAULT;
private AsyncDataWriter asyncDataWriter;
private Optional<Logger> logger = Optional.absent();
public AsyncWriterManagerBuilder config(Config config) {
this.config = config;
return this;
}
public AsyncWriterManagerBuilder commitTimeoutMillis(long commitTimeoutMillis) {
this.commitTimeoutMillis = commitTimeoutMillis;
return this;
}
public AsyncWriterManagerBuilder commitStepWaitTimeInMillis(long commitStepWaitTimeMillis) {
this.commitStepWaitTimeMillis = commitStepWaitTimeMillis;
return this;
}
public AsyncWriterManagerBuilder failureAllowanceRatio(double failureAllowanceRatio) {
Preconditions.checkArgument((failureAllowanceRatio <= 1.0 && failureAllowanceRatio >= 0),
"Failure Allowance must be a ratio between 0 and 1");
this.failureAllowanceRatio = failureAllowanceRatio;
return this;
}
public AsyncWriterManagerBuilder asyncDataWriter(AsyncDataWriter asyncDataWriter) {
this.asyncDataWriter = asyncDataWriter;
return this;
}
public AsyncWriterManagerBuilder retriesEnabled(boolean retriesEnabled) {
this.retriesEnabled = retriesEnabled;
return this;
}
public AsyncWriterManagerBuilder numRetries(int numRetries) {
this.numRetries = numRetries;
return this;
}
public AsyncWriterManagerBuilder maxOutstandingWrites(int maxOutstandingWrites) {
this.maxOutstandingWrites = maxOutstandingWrites;
return this;
}
public AsyncWriterManagerBuilder logger(Optional<Logger> logger) {
this.logger = logger;
return this;
}
public AsyncWriterManager build() {
return new AsyncWriterManager(this.config, this.commitTimeoutMillis, this.commitStepWaitTimeMillis,
this.failureAllowanceRatio, this.retriesEnabled, this.numRetries, MIN_RETRY_INTERVAL_MILLIS_DEFAULT,
// TODO: Make this configurable
this.maxOutstandingWrites, this.asyncDataWriter, this.logger);
}
}
}
| 4,564 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/WatermarkTrackerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import com.google.common.base.Preconditions;
/**
* A Factory for handing out WatermarkTracker instances
*/
public class WatermarkTrackerFactory {
public static class TrackerBehavior {
boolean trackAll = true;
boolean trackLast = false;
boolean ignoreUnacknowledged = false;
boolean validated = false;
private TrackerBehavior() {};
public static TrackerBehavior defaultBehavior() {
return new TrackerBehavior();
}
TrackerBehavior trackAll() {
trackAll = true;
trackLast = false;
return this;
}
TrackerBehavior trackLast() {
trackLast = true;
trackAll = false;
return this;
}
TrackerBehavior ignoreUnacked() {
ignoreUnacknowledged = true;
return this;
}
private void validate() {
Preconditions.checkState(this.trackAll || this.trackLast, "Either trackAll or trackLast must be set");
this.validated = true;
}
TrackerBehavior build() {
validate();
return this;
}
}
public static WatermarkTracker getInstance(TrackerBehavior trackerBehavior) {
Preconditions.checkNotNull(trackerBehavior);
// Check requirements are consistent
if (!trackerBehavior.validated) {
trackerBehavior.validate();
}
if (trackerBehavior.trackLast) {
return new LastWatermarkTracker(trackerBehavior.ignoreUnacknowledged);
}
if (trackerBehavior.trackAll) {
return new MultiWriterWatermarkTracker();
}
throw new AssertionError("Could not find an applicable WatermarkTracker for TrackerBehavior : "
+ trackerBehavior.toString());
}
}
| 4,565 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/AcknowledgableWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.gobblin.ack.Ackable;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
/**
* A Checkpointable Watermark that can be acknowledged. Useful for tracking watermark progress
*/
public class AcknowledgableWatermark implements Comparable<AcknowledgableWatermark>, Ackable {
private final CheckpointableWatermark _checkpointableWatermark;
private final AtomicInteger _acked;
public AcknowledgableWatermark(CheckpointableWatermark watermark) {
_acked = new AtomicInteger(1); // default number of acks needed is 1
_checkpointableWatermark = watermark;
}
@Override
public void ack() {
int ackValue = _acked.decrementAndGet();
if (ackValue < 0) {
throw new AssertionError("The acknowledgement counter for this watermark went negative. Please file a bug!");
}
}
public AcknowledgableWatermark incrementAck() {
_acked.incrementAndGet();
return this;
}
public boolean isAcked() {
return (_acked.get() == 0);
}
public CheckpointableWatermark getCheckpointableWatermark() {
return _checkpointableWatermark;
}
@Override
public int compareTo(AcknowledgableWatermark o) {
return _checkpointableWatermark.compareTo(o._checkpointableWatermark);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AcknowledgableWatermark watermark = (AcknowledgableWatermark) o;
return _checkpointableWatermark.equals(watermark._checkpointableWatermark);
}
@Override
public int hashCode() {
return _checkpointableWatermark.hashCode();
}
}
| 4,566 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/RecordFuture.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.ExecutionException;
import org.apache.gobblin.annotation.Alpha;
/**
* A future object generated after a record was inserted into a batch
* We can include more meta data about this record in the future
*/
@Alpha
public final class RecordFuture implements Future<RecordMetadata>{
CountDownLatch latch;
long offset;
public RecordFuture (CountDownLatch latch, long offset) {
this.latch = latch;
this.offset = offset;
}
@Override
public boolean isDone() {
return this.latch.getCount() == 0L;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean cancel(boolean interrupt) {
return false;
}
@Override
public RecordMetadata get() throws InterruptedException, ExecutionException {
this.latch.await();
return new RecordMetadata(this.offset);
}
@Override
public RecordMetadata get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
boolean occurred = this.latch.await(timeout, unit);
if (!occurred) {
throw new TimeoutException("Timeout after waiting for " + TimeUnit.MILLISECONDS.convert(timeout, unit));
}
return new RecordMetadata(this.offset);
}
} | 4,567 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/GenericWriteResponseWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
public class GenericWriteResponseWrapper<T> implements WriteResponseMapper<T> {
@Override
public WriteResponse wrap(T t) {
return new GenericWriteResponse<>(t);
}
}
| 4,568 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/WatermarkManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.Closeable;
import java.util.Collections;
import java.util.Map;
import lombok.Getter;
import lombok.ToString;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
/**
* An interface for WatermarkManagers: classes that can keep track of watermarks and commit them to watermark storage.
*/
public interface WatermarkManager extends Closeable {
void start();
@Getter
@ToString
class RetrievalStatus {
private long lastWatermarkRetrievalAttemptTimestampMillis = 0;
private long lastWatermarkRetrievalSuccessTimestampMillis = 0;
private long lastWatermarkRetrievalFailureTimestampMillis = 0;
private Map<String, CheckpointableWatermark> lastRetrievedWatermarks = Collections.EMPTY_MAP;
private Exception lastRetrievalException = null;
synchronized void onAttempt() {
this.lastWatermarkRetrievalAttemptTimestampMillis = System.currentTimeMillis();
}
synchronized void onSuccess(Map<String, CheckpointableWatermark> retrievedWatermarks) {
this.lastWatermarkRetrievalSuccessTimestampMillis = System.currentTimeMillis();
this.lastRetrievedWatermarks = retrievedWatermarks;
}
synchronized void onFailure(Exception retrievalException) {
this.lastWatermarkRetrievalFailureTimestampMillis = System.currentTimeMillis();
this.lastRetrievalException = retrievalException;
}
}
@Getter
@ToString
class CommitStatus {
private long lastWatermarkCommitAttemptTimestampMillis = 0;
private long lastWatermarkCommitSuccessTimestampMillis = 0;
private long lastWatermarkCommitFailureTimestampMillis = 0;
private Map<String, CheckpointableWatermark> lastCommittedWatermarks = Collections.EMPTY_MAP;
private Exception lastCommitException = null;
private Map<String, CheckpointableWatermark> lastFailedWatermarks = Collections.EMPTY_MAP;
synchronized void onAttempt() {
lastWatermarkCommitAttemptTimestampMillis = System.currentTimeMillis();
}
synchronized void onSuccess(Map<String, CheckpointableWatermark> watermarksToCommit) {
lastWatermarkCommitSuccessTimestampMillis = System.currentTimeMillis();
lastCommittedWatermarks = watermarksToCommit;
}
synchronized void onFailure(Exception commitException, Map<String, CheckpointableWatermark> watermarksToCommit) {
lastWatermarkCommitFailureTimestampMillis = System.currentTimeMillis();
lastCommitException = commitException;
lastFailedWatermarks = watermarksToCommit;
}
}
CommitStatus getCommitStatus();
RetrievalStatus getRetrievalStatus();
}
| 4,569 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/LargeMessagePolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
/**
* Describes how single messages that are larger than a batch message limit should be treated
*/
public enum LargeMessagePolicy {
DROP, // drop (and log) messages that exceed the threshold
ATTEMPT, // attempt to deliver messages that exceed the threshold
FAIL // throw an error when this happens
}
| 4,570 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/SequentialBasedBatchAccumulator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.LinkedList;
import java.util.ArrayList;
import java.util.Deque;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.Future;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.Futures;
import com.typesafe.config.Config;
import org.apache.gobblin.util.ConfigUtils;
/**
* Sequential and TTL based accumulator
* A producer can add a record to this accumulator. It generates a batch on the first record arrival. All subsequent records
* are added to the same batch until a batch size limit is reached. {@link BufferedAsyncDataWriter} keeps iterating available
* batches from this accumulator, all completed batches (full sized) will be popped out one by one but an incomplete batch
* keeps in the deque until a TTL is expired.
*/
public class SequentialBasedBatchAccumulator<D> extends BatchAccumulator<D> {
private static final LargeMessagePolicy DEFAULT_LARGE_MESSAGE_POLICY = LargeMessagePolicy.FAIL;
private Deque<BytesBoundedBatch<D>> dq = new LinkedList<>();
private IncompleteRecordBatches incomplete = new IncompleteRecordBatches();
private final long batchSizeLimit;
private final long memSizeLimit;
private final double tolerance = 0.95;
private final long expireInMilliSecond;
private final LargeMessagePolicy largeMessagePolicy;
private static final Logger LOG = LoggerFactory.getLogger(SequentialBasedBatchAccumulator.class);
private final ReentrantLock dqLock = new ReentrantLock();
private final Condition notEmpty = dqLock.newCondition();
private final Condition notFull = dqLock.newCondition();
private final long capacity;
public SequentialBasedBatchAccumulator() {
this (1024 * 256, 1000, 100);
}
public SequentialBasedBatchAccumulator(Properties properties) {
this(ConfigUtils.propertiesToConfig(properties));
}
public SequentialBasedBatchAccumulator(Config config) {
this(ConfigUtils.getLong(config, Batch.BATCH_SIZE,
Batch.BATCH_SIZE_DEFAULT),
ConfigUtils.getLong(config, Batch.BATCH_TTL,
Batch.BATCH_TTL_DEFAULT),
ConfigUtils.getLong(config, Batch.BATCH_QUEUE_CAPACITY,
Batch.BATCH_QUEUE_CAPACITY_DEFAULT));
}
public SequentialBasedBatchAccumulator(long batchSizeLimit, long expireInMilliSecond, long capacity) {
this(batchSizeLimit, expireInMilliSecond, capacity, DEFAULT_LARGE_MESSAGE_POLICY);
}
public SequentialBasedBatchAccumulator(long batchSizeLimit,
long expireInMilliSecond,
long capacity,
LargeMessagePolicy largeMessagePolicy) {
this.batchSizeLimit = batchSizeLimit;
this.expireInMilliSecond = expireInMilliSecond;
this.capacity = capacity;
this.memSizeLimit = (long) (this.tolerance * this.batchSizeLimit);
this.largeMessagePolicy = largeMessagePolicy;
}
public long getNumOfBatches () {
this.dqLock.lock();
try {
return this.dq.size();
} finally {
this.dqLock.unlock();
}
}
/**
* Add a data to internal deque data structure
*/
public final Future<RecordMetadata> enqueue (D record, WriteCallback callback) throws InterruptedException {
final ReentrantLock lock = this.dqLock;
lock.lock();
try {
BytesBoundedBatch last = dq.peekLast();
if (last != null) {
Future<RecordMetadata> future = null;
try {
future = last.tryAppend(record, callback, this.largeMessagePolicy);
} catch (RecordTooLargeException e) {
// Ok if the record was too large for the current batch
}
if (future != null) {
return future;
}
}
// Create a new batch because previous one has no space
BytesBoundedBatch batch = new BytesBoundedBatch(this.memSizeLimit, this.expireInMilliSecond);
LOG.debug("Batch " + batch.getId() + " is generated");
Future<RecordMetadata> future = null;
try {
future = batch.tryAppend(record, callback, this.largeMessagePolicy);
} catch (RecordTooLargeException e) {
// If a new batch also wasn't able to accomodate the new message
throw new RuntimeException("Failed due to a message that was too large", e);
}
// The future might be null, since the largeMessagePolicy might be set to DROP
if (future == null) {
assert largeMessagePolicy.equals(LargeMessagePolicy.DROP);
LOG.error("Batch " + batch.getId() + " is silently marked as complete, dropping a huge record: "
+ record);
future = Futures.immediateFuture(new RecordMetadata(0));
callback.onSuccess(WriteResponse.EMPTY);
return future;
}
// if queue is full, we should not add more
while (dq.size() >= this.capacity) {
LOG.debug("Accumulator size {} is greater than capacity {}, waiting", dq.size(), this.capacity);
this.notFull.await();
}
dq.addLast(batch);
incomplete.add(batch);
this.notEmpty.signal();
return future;
} finally {
lock.unlock();
}
}
/**
* A threadsafe helper class to hold RecordBatches that haven't been ack'd yet
* This is mainly used for flush operation so that all the batches waiting in
* the incomplete set will be blocked
*/
private final static class IncompleteRecordBatches {
private final Set<Batch> incomplete;
public IncompleteRecordBatches() {
this.incomplete = new HashSet<>();
}
public void add(Batch batch) {
synchronized (incomplete) {
this.incomplete.add(batch);
}
}
public void remove(Batch batch) {
synchronized (incomplete) {
boolean removed = this.incomplete.remove(batch);
if (!removed)
throw new IllegalStateException("Remove from the incomplete set failed. This should be impossible.");
}
}
public ArrayList<Batch> all() {
synchronized (incomplete) {
return new ArrayList (this.incomplete);
}
}
}
/**
* If accumulator has been closed, below actions are performed:
* 1) remove and return the first batch if available.
* 2) return null if queue is empty.
* If accumulator has not been closed, below actions are performed:
* 1) if queue.size == 0, block current thread until more batches are available or accumulator is closed.
* 2) if queue size == 1, remove and return the first batch if TTL has expired, else return null.
* 3) if queue size > 1, remove and return the first batch element.
*/
public Batch<D> getNextAvailableBatch () {
final ReentrantLock lock = SequentialBasedBatchAccumulator.this.dqLock;
try {
lock.lock();
if (SequentialBasedBatchAccumulator.this.isClosed()) {
return dq.poll();
} else {
while (dq.size() == 0) {
LOG.debug ("ready to sleep because of queue is empty");
SequentialBasedBatchAccumulator.this.notEmpty.await();
if (SequentialBasedBatchAccumulator.this.isClosed()) {
return dq.poll();
}
}
if (dq.size() > 1) {
BytesBoundedBatch candidate = dq.poll();
SequentialBasedBatchAccumulator.this.notFull.signal();
LOG.debug ("retrieve batch " + candidate.getId());
return candidate;
}
if (dq.size() == 1) {
if (dq.peekFirst().isTTLExpire()) {
LOG.debug ("Batch " + dq.peekFirst().getId() + " is expired");
BytesBoundedBatch candidate = dq.poll();
SequentialBasedBatchAccumulator.this.notFull.signal();
return candidate;
} else {
return null;
}
} else {
throw new RuntimeException("Should never get to here");
}
}
} catch (InterruptedException e) {
LOG.error("Wait for next batch is interrupted. " + e.toString());
} finally {
lock.unlock();
}
return null;
}
public void close() {
super.close();
this.dqLock.lock();
try {
this.notEmpty.signal();
} finally {
this.dqLock.unlock();
}
}
/**
* This will block until all the incomplete batches are acknowledged
*/
public void flush() {
try {
ArrayList<Batch> batches = this.incomplete.all();
int numOutstandingRecords = 0;
for (Batch batch: batches) {
numOutstandingRecords += batch.getRecords().size();
}
LOG.debug ("Flush called on {} batches with {} records total", batches.size(), numOutstandingRecords);
for (Batch batch: batches) {
batch.await();
}
} catch (Exception e) {
LOG.error ("Error happened while flushing batches");
}
}
/**
* Once batch is acknowledged, remove it from incomplete list
*/
public void deallocate (Batch<D> batch) {
this.incomplete.remove(batch);
}
}
| 4,571 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/WriteCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.gobblin.async.Callback;
public interface WriteCallback<T> extends Callback<WriteResponse<T>> {
WriteCallback EMPTY = new WriteCallback<Object>() {
@Override
public void onSuccess(WriteResponse result) {}
@Override
public void onFailure(Throwable throwable) {}
};
}
| 4,572 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/BufferedAsyncDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import org.apache.gobblin.annotation.Alpha;
/**
* An async data writer which can achieve the buffering and batching capability.
* Internally it uses {@link BatchAccumulator} to accumulate input records. The records
* will be converted to batches according to the accumulator implementation. The {@link RecordProcessor}
* is responsible to iterate all available batches and write each batch via a {@link BatchAsyncDataWriter}
*
* @param <D> data record type
*/
@Alpha
public class BufferedAsyncDataWriter<D> implements AsyncDataWriter<D> {
private RecordProcessor<D> processor;
private BatchAccumulator<D> accumulator;
private ExecutorService service;
private volatile boolean running;
private final long startTime;
private static final Logger LOG = LoggerFactory.getLogger(BufferedAsyncDataWriter.class);
private static final WriteResponseMapper<RecordMetadata> WRITE_RESPONSE_WRAPPER =
new WriteResponseMapper<RecordMetadata>() {
@Override
public WriteResponse wrap(final RecordMetadata recordMetadata) {
return new WriteResponse<RecordMetadata>() {
@Override
public RecordMetadata getRawResponse() {
return recordMetadata;
}
@Override
public String getStringResponse() {
return recordMetadata.toString();
}
@Override
public long bytesWritten() {
// Don't know how many bytes were written
return -1;
}
};
}
};
public BufferedAsyncDataWriter (BatchAccumulator<D> accumulator, BatchAsyncDataWriter<D> dataWriter) {
this.processor = new RecordProcessor (accumulator, dataWriter);
this.accumulator = accumulator;
this.service = Executors.newFixedThreadPool(1);
this.running = true;
this.startTime = System.currentTimeMillis();
try {
this.service.execute(this.processor);
this.service.shutdown();
} catch (Exception e) {
LOG.error("Cannot start internal thread to consume the data");
}
}
private class RecordProcessor<D> implements Runnable, Closeable{
BatchAccumulator<D> accumulator;
BatchAsyncDataWriter<D> writer;
public void close() throws IOException {
this.writer.close();
}
public RecordProcessor (BatchAccumulator<D> accumulator, BatchAsyncDataWriter<D> writer) {
this.accumulator = accumulator;
this.writer = writer;
}
public void run() {
LOG.info ("Start iterating accumulator");
/**
* A main loop to process available batches
*/
while (running) {
Batch<D> batch = this.accumulator.getNextAvailableBatch();
if (batch != null) {
this.writer.write(batch, this.createBatchCallback(batch));
}
}
// Wait until all the ongoing appends finished
accumulator.waitClose();
LOG.info ("Start to process remaining batches");
/**
* A main loop to process remaining batches
*/
Batch<D> batch;
while ((batch = this.accumulator.getNextAvailableBatch()) != null) {
this.writer.write(batch, this.createBatchCallback(batch));
}
// Wait until all the batches get acknowledged
accumulator.flush();
}
/**
* A callback which handles the post-processing logic after a batch has sent out and
* receives the result
*/
private WriteCallback createBatchCallback (final Batch<D> batch) {
return new WriteCallback<Object>() {
@Override
public void onSuccess(WriteResponse writeResponse) {
LOG.debug ("Batch " + batch.getId() + " is on success with size " + batch.getCurrentSizeInByte() + " num of record " + batch.getRecords().size());
batch.onSuccess(writeResponse);
batch.done();
accumulator.deallocate(batch);
}
@Override
public void onFailure(Throwable throwable) {
LOG.info ("Batch " + batch.getId() + " is on failure");
batch.onFailure(throwable);
batch.done();
accumulator.deallocate(batch);
}
};
}
}
/**
* Asynchronously write a record, execute the callback on success/failure
*/
public Future<WriteResponse> write(D record, @Nullable WriteCallback callback) {
try {
Future<RecordMetadata> future = this.accumulator.append(record, callback);
return new WriteResponseFuture (future, WRITE_RESPONSE_WRAPPER);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
/**
* Flushes all pending writes
*/
public void flush() throws IOException {
this.accumulator.flush();
}
/**
* Force to close all the resources and drop all the pending requests
*/
public void forceClose() {
LOG.info ("Force to close the buffer data writer (not supported)");
}
/**
* Close all the resources, this will be blocked until all the request are sent and gets acknowledged
*/
public void close() throws IOException {
try {
this.running = false;
this.accumulator.close();
if (!this.service.awaitTermination(60, TimeUnit.SECONDS)) {
forceClose();
} else {
LOG.info ("Closed properly: elapsed " + (System.currentTimeMillis() - startTime) + " milliseconds");
}
} catch (InterruptedException e) {
LOG.error ("Interruption happened during close " + e.toString());
} finally {
this.processor.close();
}
}
}
| 4,573 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/BytesBoundedBatch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.gobblin.annotation.Alpha;
import java.util.LinkedList;
import java.util.List;
/**
* A batch which internally saves each record in memory with bounded size limit
* Also a TTL value is configured, so that an eviction policy can be applied form upper layer.
*/
@Alpha
public class BytesBoundedBatch<D> extends Batch<D>{
private RecordMemory memory;
private final long creationTimestamp;
private final long memSizeLimit;
private final long ttlInMilliSeconds;
public static final int OVERHEAD_SIZE_IN_BYTES = 15;
public BytesBoundedBatch(long memSizeLimit, long ttlInMilliSeconds) {
this.creationTimestamp = System.currentTimeMillis();
this.memory = new RecordMemory();
this.memSizeLimit = memSizeLimit;
this.ttlInMilliSeconds = ttlInMilliSeconds;
}
public boolean isTTLExpire() {
return (System.currentTimeMillis() - creationTimestamp) >= ttlInMilliSeconds;
}
private long getInternalSize(D record) {
return (record).toString().length() + this.OVERHEAD_SIZE_IN_BYTES;
}
public class RecordMemory {
private List<D> records;
private long byteSize;
public RecordMemory () {
byteSize = 0;
records = new LinkedList<>();
}
void append (D record) {
byteSize += BytesBoundedBatch.this.getInternalSize(record);
records.add(record);
}
boolean hasRoom (D record, LargeMessagePolicy largeMessagePolicy) {
if (records.isEmpty() && largeMessagePolicy == LargeMessagePolicy.ATTEMPT) {
// there is always space for one record, no matter how big :)
return true;
}
long recordLen = BytesBoundedBatch.this.getInternalSize(record);
return (byteSize + recordLen) <= BytesBoundedBatch.this.memSizeLimit;
}
long getByteSize() {
return byteSize;
}
List<D> getRecords() {
return records;
}
}
public List<D> getRecords() {
return memory.getRecords();
}
public boolean hasRoom (D object, LargeMessagePolicy largeMessagePolicy) {
return memory.hasRoom(object, largeMessagePolicy);
}
public void append (D object) {
memory.append(object);
}
public int getRecordSizeInByte (D record) {
return (record).toString().length();
}
public long getCurrentSizeInByte() {
return memory.getByteSize();
}
}
| 4,574 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/WatermarkAwareWriterWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import com.google.common.base.Optional;
import org.apache.gobblin.stream.RecordEnvelope;
/**
* A convenience wrapper class for WatermarkAware writers.
*/
public abstract class WatermarkAwareWriterWrapper<D> extends WriterWrapper<D> implements WatermarkAwareWriter<D> {
private Optional<WatermarkAwareWriter> watermarkAwareWriter = Optional.absent();
public final void setWatermarkAwareWriter(WatermarkAwareWriter watermarkAwareWriter) {
this.watermarkAwareWriter = Optional.of(watermarkAwareWriter);
}
public final boolean isWatermarkCapable() {
return watermarkAwareWriter.get().isWatermarkCapable();
}
public void writeEnvelope(final RecordEnvelope<D> recordEnvelope) throws IOException {
watermarkAwareWriter.get().writeEnvelope(recordEnvelope);
}
}
| 4,575 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/writer/partitioner/WriterPartitioner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.partitioner;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
/**
* Partitions records in the writer phase.
*
* Implementations must have a constructor with signature <init>({@link org.apache.gobblin.configuration.State}).
*/
public interface WriterPartitioner<D> {
/**
* @return The schema that {@link GenericRecord} returned by {@link #partitionForRecord} will have.
*/
public Schema partitionSchema();
/**
* Returns the partition that the input record belongs to. If
* partitionFoRecord(record1).equals(partitionForRecord(record2)), then record1 and record2
* belong to the same partition.
* @param record input to compute partition for.
* @return {@link GenericRecord} representing partition record belongs to.
*/
public GenericRecord partitionForRecord(D record);
}
| 4,576 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/compression/CompressionConfigParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compression;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* Functions for parsing compression configuration
*/
public class CompressionConfigParser {
private static final String COMPRESSION_TYPE_KEY = "type";
/**
* Retrieve configuration settings for a given branch.
* @param taskState Task state
* @param numBranches # of branches in the state
* @param branch Branch to retrieve
* @return Map of properties for compression
*/
public static Map<String, Object> getConfigForBranch(State taskState, int numBranches, int branch) {
String typePropertyName =
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_CODEC_TYPE, numBranches, branch);
String compressionType = taskState.getProp(typePropertyName);
if (compressionType == null) {
return null;
}
return ImmutableMap.<String, Object>of(COMPRESSION_TYPE_KEY, compressionType);
}
/**
* Return compression type
* @param properties Compression config settings
* @return String representing compression type, null if none exists
*/
public static String getCompressionType(Map<String, Object> properties) {
return (String) properties.get(COMPRESSION_TYPE_KEY);
}
private CompressionConfigParser() {
}
}
| 4,577 |
0 | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/main/java/org/apache/gobblin/compression/CompressionFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compression;
import java.util.Map;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.codec.GzipCodec;
import org.apache.gobblin.codec.StreamCodec;
/**
* This class has logic to create compression stream codecs based on configuration parameters.
* Note: Interface will likely change to support dynamic registration of compression codecs
*/
@Alpha
public class CompressionFactory {
public static StreamCodec buildStreamCompressor(Map<String, Object> properties) {
String type = CompressionConfigParser.getCompressionType(properties);
switch (type) {
case GzipCodec.TAG:
return new GzipCodec();
default:
throw new IllegalArgumentException("Can't build compressor of type " + type);
}
}
private CompressionFactory() {
// can't instantiate
}
}
| 4,578 |
0 | Create_ds/gobblin/gobblin-core-base/src/jmh/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core-base/src/jmh/java/org/apache/gobblin/writer/FineGrainedWatermarkTrackerBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Group;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Control;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.source.extractor.DefaultCheckpointableWatermark;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.util.ExecutorsUtils;
@Warmup(iterations = 3)
@Measurement(iterations = 10)
@org.openjdk.jmh.annotations.Fork(value = 3)
@BenchmarkMode(value = Mode.Throughput)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public class FineGrainedWatermarkTrackerBenchmark {
@State(value = Scope.Group)
public static class TrackerState {
private FineGrainedWatermarkTracker _watermarkTracker;
private ScheduledExecutorService _executorService;
private long _index;
private final Random _random = new Random();
@Setup
public void setup() throws Exception {
Properties properties = new Properties();
Config config = ConfigFactory.parseProperties(properties);
_watermarkTracker = new FineGrainedWatermarkTracker(config);
_index = 0;
_executorService = new ScheduledThreadPoolExecutor(40,
ExecutorsUtils.newThreadFactory(Optional.of(LoggerFactory.getLogger(FineGrainedWatermarkTrackerBenchmark.class))));
}
@TearDown
public void tearDown() throws IOException {
_watermarkTracker.close();
_executorService.shutdown();
}
}
@Benchmark
@Group("trackImmediate")
public void trackImmediateAcks(Control control, TrackerState trackerState) throws Exception {
if (!control.stopMeasurement) {
AcknowledgableWatermark wmark = new AcknowledgableWatermark(new DefaultCheckpointableWatermark(
"0", new LongWatermark(trackerState._index)));
trackerState._watermarkTracker.track(wmark);
trackerState._index++;
wmark.ack();
}
}
@Benchmark
@Group("trackDelayed")
public void trackWithDelayedAcks(Control control, TrackerState trackerState) throws Exception {
if (!control.stopMeasurement) {
final AcknowledgableWatermark wmark = new AcknowledgableWatermark(new DefaultCheckpointableWatermark(
"0", new LongWatermark(trackerState._index)));
trackerState._watermarkTracker.track(wmark);
trackerState._index++;
int delay = trackerState._random.nextInt(10);
trackerState._executorService.schedule(new Runnable() {
@Override
public void run() {
wmark.ack();
}
}, delay, TimeUnit.MILLISECONDS);
}
}
@Benchmark
@Group("scheduledDelayed")
public void scheduledDelayedAcks(Control control, TrackerState trackerState) throws Exception {
if (!control.stopMeasurement) {
final AcknowledgableWatermark wmark = new AcknowledgableWatermark(new DefaultCheckpointableWatermark(
"0", new LongWatermark(trackerState._index)));
trackerState._index++;
int delay = trackerState._random.nextInt(10);
trackerState._executorService.schedule(new Runnable() {
@Override
public void run() {
wmark.ack();
}
}, delay, TimeUnit.MILLISECONDS);
}
}
@Benchmark
@Group("scheduledNoRandom")
public void scheduledNoRandomDelayedAcks(Control control, TrackerState trackerState) throws Exception {
if (!control.stopMeasurement) {
final AcknowledgableWatermark wmark = new AcknowledgableWatermark(new DefaultCheckpointableWatermark(
"0", new LongWatermark(trackerState._index)));
trackerState._index++;
int delay = 10;
trackerState._executorService.schedule(new Runnable() {
@Override
public void run() {
wmark.ack();
}
}, delay, TimeUnit.MILLISECONDS);
}
}
} | 4,579 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/HiveRegistrationUnitComparatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.State;
public class HiveRegistrationUnitComparatorTest {
@Test
public void testCheckExistingIsSuperstate()
throws Exception {
String key1 = "key1";
String value1 = "value1";
String key2 = "key2";
String value2 = "value2";
State existingState = new State();
State newState = new State();
HiveRegistrationUnitComparator comparator =
new HiveRegistrationUnitComparator<>(null, null);
comparator.checkExistingIsSuperstate(existingState, newState);
Assert.assertFalse(comparator.result);
newState.setProp(key1, value1);
comparator =
new HiveRegistrationUnitComparator<>(null, null);
comparator.checkExistingIsSuperstate(existingState, newState);
Assert.assertTrue(comparator.result);
existingState.setProp(key1, value2);
comparator =
new HiveRegistrationUnitComparator<>(null, null);
comparator.checkExistingIsSuperstate(existingState, newState);
Assert.assertTrue(comparator.result);
existingState.setProp(key1, value1);
existingState.setProp(key2, value2);
comparator =
new HiveRegistrationUnitComparator<>(null, null);
comparator.checkExistingIsSuperstate(existingState, newState);
Assert.assertFalse(comparator.result);
}
}
| 4,580 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/HiveConfFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import org.apache.hadoop.hive.conf.HiveConf;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import static org.apache.gobblin.hive.HiveMetaStoreClientFactory.HIVE_METASTORE_TOKEN_SIGNATURE;
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTOREURIS;
public class HiveConfFactoryTest {
@Test
public void testHiveConfFactory() throws Exception {
HiveConf hiveConf = HiveConfFactory.get(Optional.absent(), SharedResourcesBrokerFactory.getImplicitBroker());
HiveConf hiveConf1 = HiveConfFactory.get(Optional.absent(), SharedResourcesBrokerFactory.getImplicitBroker());
Assert.assertEquals(hiveConf, hiveConf1);
// When there's no hcatURI specified, the default hive-site should be loaded.
Assert.assertTrue(hiveConf.getVar(METASTOREURIS).equals("file:///test"));
Assert.assertTrue(hiveConf.get(HIVE_METASTORE_TOKEN_SIGNATURE).equals("file:///test"));
HiveConf hiveConf2 = HiveConfFactory.get(Optional.of("hcat1"), SharedResourcesBrokerFactory.getImplicitBroker());
HiveConf hiveConf3 = HiveConfFactory.get(Optional.of("hcat1"), SharedResourcesBrokerFactory.getImplicitBroker());
Assert.assertEquals(hiveConf2, hiveConf3);
HiveConf hiveConf4 = HiveConfFactory.get(Optional.of("hcat11"), SharedResourcesBrokerFactory.getImplicitBroker());
Assert.assertNotEquals(hiveConf3, hiveConf4);
Assert.assertNotEquals(hiveConf4, hiveConf);
// THe uri should be correctly set.
Assert.assertEquals(hiveConf3.getVar(METASTOREURIS), "hcat1");
Assert.assertEquals(hiveConf3.get(HIVE_METASTORE_TOKEN_SIGNATURE), "hcat1");
Assert.assertEquals(hiveConf4.getVar(METASTOREURIS), "hcat11");
Assert.assertEquals(hiveConf4.get(HIVE_METASTORE_TOKEN_SIGNATURE), "hcat11");
}
} | 4,581 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/HiveMetaStoreClientFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.thrift.TException;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.apache.gobblin.hive.HiveMetaStoreClientFactory.HIVE_METASTORE_TOKEN_SIGNATURE;
public class HiveMetaStoreClientFactoryTest {
@Test
public void testCreate() throws TException {
HiveConf hiveConf = new HiveConf();
HiveMetaStoreClientFactory factory = new HiveMetaStoreClientFactory(hiveConf);
// Since we havE a specified hive-site in the classpath, so have to null it out here to proceed the test
// The original value it will get if no local hive-site is placed, will be an empty string.
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "");
hiveConf.set(HIVE_METASTORE_TOKEN_SIGNATURE, "");
IMetaStoreClient msc = factory.create();
String dbName = "test_db";
String description = "test database";
String location = "file:/tmp/" + dbName;
Database db = new Database(dbName, description, location, null);
msc.dropDatabase(dbName, true, true);
msc.createDatabase(db);
db = msc.getDatabase(dbName);
Assert.assertEquals(db.getName(), dbName);
Assert.assertEquals(db.getDescription(), description);
Assert.assertEquals(db.getLocationUri(), location);
}
}
| 4,582 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/HiveTableTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.util.ArrayList;
import java.util.List;
import org.testng.annotations.Test;
import junit.framework.Assert;
import org.apache.gobblin.configuration.State;
public class HiveTableTest {
@Test
public void testPopulateFieldTypeCasting() throws Exception {
// Test one property of each type in HiveRegistrationUnit storageProps
State props = new State();
Long lastAccessTime = System.currentTimeMillis();
props.setProp(HiveConstants.LAST_ACCESS_TIME, String.valueOf(lastAccessTime));
State storageProps = new State();
storageProps.setProp(HiveConstants.LOCATION, "/tmp");
storageProps.setProp(HiveConstants.COMPRESSED, "false");
storageProps.setProp(HiveConstants.NUM_BUCKETS, "1");
storageProps.setProp(HiveConstants.BUCKET_COLUMNS, "col1, col2");
HiveTable.Builder builder = new HiveTable.Builder();
builder.withTableName("tableName");
builder.withDbName("dbName");
builder.withProps(props);
builder.withStorageProps(storageProps);
HiveTable hiveTable = builder.build();
Assert.assertEquals(hiveTable.getLastAccessTime().get().longValue(), lastAccessTime.longValue());
Assert.assertEquals(hiveTable.getLocation().get(), "/tmp");
Assert.assertEquals(hiveTable.isCompressed.get().booleanValue(), false);
Assert.assertEquals(hiveTable.getNumBuckets().get().intValue(), 1);
List<String> bucketColumns = new ArrayList<>();
bucketColumns.add("col1");
bucketColumns.add("col2");
Assert.assertEquals(hiveTable.getBucketColumns().get().get(0), bucketColumns.get(0));
Assert.assertEquals(hiveTable.getBucketColumns().get().get(1), bucketColumns.get(1));
}
}
| 4,583 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/HiveMetastoreClientPoolTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
@Test
public class HiveMetastoreClientPoolTest {
public void testExtraHiveConf()
throws IOException {
String additionalHiveConf = "myhive.metastore.sasl.enabled";
Properties props = new Properties();
props.setProperty("hive.additionalConfig.targetUri", "test-target");
props.setProperty("hive.additionalConfig." + additionalHiveConf, "false");
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(props, Optional.of("test"));
Assert.assertNull(pool.getHiveConf().get(additionalHiveConf));
pool = HiveMetastoreClientPool.get(props, Optional.of("test-target"));
Assert.assertFalse(Boolean.valueOf(pool.getHiveConf().get(additionalHiveConf)));
}
}
| 4,584 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/spec/SimpleHiveSpecTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec;
import com.google.common.base.MoreObjects;
import com.google.common.base.Optional;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.spec.activity.DropPartitionActivity;
import org.apache.gobblin.hive.spec.activity.DropTableActivity;
import org.apache.gobblin.hive.spec.predicate.PartitionNotExistPredicate;
import org.apache.gobblin.hive.spec.predicate.TableNotExistPredicate;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import java.util.ArrayList;
@Test(groups = {"gobblin.hive"})
public class SimpleHiveSpecTest {
private final String dbName = "db";
private final String tableName = "tbl";
private final String pathString = "tbl";
private SimpleHiveSpec simpleHiveSpec;
@BeforeClass
public void setup(){
HiveTable.Builder tableBuilder = new HiveTable.Builder();
tableBuilder.withDbName(dbName).withTableName(tableName);
HiveTable hiveTable = tableBuilder.build();
SimpleHiveSpec.Builder specBuilder = new SimpleHiveSpec.Builder(new Path(pathString))
.withPartition(Optional.absent())
.withTable(hiveTable);
simpleHiveSpec = specBuilder.build();
}
@Test(priority=1)
public void testBuildSimpleSpec() {
Assert.assertEquals(simpleHiveSpec.getTable().getDbName(), dbName);
Assert.assertEquals(simpleHiveSpec.getTable().getTableName(), tableName);
Assert.assertEquals(0, simpleHiveSpec.getPostActivities().size());
Assert.assertEquals(0, simpleHiveSpec.getPreActivities().size());
Assert.assertEquals(0, simpleHiveSpec.getPredicates().size());
Assert.assertEquals(Optional.absent(), simpleHiveSpec.getPartition());
String actualString = MoreObjects.toStringHelper(simpleHiveSpec).omitNullValues().add("path", pathString)
.add("db", dbName).add("table", tableName)
.add("partition", Optional.absent().orNull()).toString();
Assert.assertEquals(actualString, simpleHiveSpec.toString());
}
@Test(priority=2)
public void testActivity(){
DropPartitionActivity dropPartitionActivity = new DropPartitionActivity(dbName, tableName,
new ArrayList<>(), new ArrayList<>());
DropTableActivity dropTableActivity = new DropTableActivity(dbName, tableName);
simpleHiveSpec.getPreActivities().add(dropPartitionActivity);
simpleHiveSpec.getPreActivities().add(dropTableActivity);
simpleHiveSpec.getPostActivities().add(dropPartitionActivity);
Assert.assertEquals(simpleHiveSpec.getPreActivities().size(), 2);
Assert.assertEquals(simpleHiveSpec.getPostActivities().size(), 1);
}
@Test(priority=3)
public void testPredicate(){
TableNotExistPredicate tableNotExistPredicate = new TableNotExistPredicate(dbName, tableName);
PartitionNotExistPredicate partitionNotExistPredicate = new PartitionNotExistPredicate(dbName, tableName,
new ArrayList<>(), new ArrayList<>());
simpleHiveSpec.getPredicates().add(tableNotExistPredicate);
simpleHiveSpec.getPredicates().add(partitionNotExistPredicate);
Assert.assertEquals(simpleHiveSpec.getPredicates().size(), 2);
}
}
| 4,585 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/avro/HiveAvroSerDeManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.avro;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveTable;
@Test(singleThreaded = true)
public class HiveAvroSerDeManagerTest {
private static String TEST_DB = "testDB";
private static String TEST_TABLE = "testTable";
private Path testBasePath;
@BeforeClass
public void setUp() throws IOException {
FileSystem fs = FileSystem.getLocal(new Configuration());
this.testBasePath = new Path("testdir");
fs.delete(this.testBasePath, true);
fs.delete(this.testBasePath, true);
fs.mkdirs(this.testBasePath);
Files.copy(this.getClass().getResourceAsStream("/test-hive-table/hive-test.avro"),
Paths.get(this.testBasePath.toString(), "hive-test.avro"), StandardCopyOption.REPLACE_EXISTING);
}
/**
* Test that the schema is written to the schema literal
*/
@Test
public void testSchemaLiteral() throws IOException {
State state = new State();
HiveAvroSerDeManager manager = new HiveAvroSerDeManager(state);
HiveRegistrationUnit registrationUnit = (new HiveTable.Builder()).withDbName(TEST_DB).withTableName(TEST_TABLE).build();
manager.addSerDeProperties(this.testBasePath, registrationUnit);
Assert.assertTrue(registrationUnit.getSerDeProps().getProp(HiveAvroSerDeManager.SCHEMA_LITERAL).contains("example.avro"));
}
@Test
public void testSchemaUrl() throws IOException {
State state = new State();
state.setProp(HiveAvroSerDeManager.SCHEMA_LITERAL_LENGTH_LIMIT, "10");
validateSchemaUrl(state, HiveAvroSerDeManager.DEFAULT_SCHEMA_FILE_NAME, false);
}
@Test
public void testSchemaUrlWithExistingFile() throws IOException {
State state = new State();
state.setProp(HiveAvroSerDeManager.SCHEMA_LITERAL_LENGTH_LIMIT, "10");
validateSchemaUrl(state, HiveAvroSerDeManager.DEFAULT_SCHEMA_FILE_NAME, true);
}
@Test
public void testSchemaUrlWithTempFile() throws IOException {
final String SCHEMA_FILE_NAME = "test_temp.avsc";
State state = new State();
state.setProp(HiveAvroSerDeManager.SCHEMA_LITERAL_LENGTH_LIMIT, "10");
state.setProp(HiveAvroSerDeManager.USE_SCHEMA_TEMP_FILE, "true");
state.setProp(HiveAvroSerDeManager.SCHEMA_FILE_NAME, SCHEMA_FILE_NAME);
state.setProp(HiveAvroSerDeManager.USE_SCHEMA_TEMP_FILE, "true");
validateSchemaUrl(state, SCHEMA_FILE_NAME, false);
}
@Test
public void testSchemaUrlWithTempFileAndExistingFile() throws IOException {
final String SCHEMA_FILE_NAME = "test_temp.avsc";
State state = new State();
state.setProp(HiveAvroSerDeManager.SCHEMA_LITERAL_LENGTH_LIMIT, "10");
state.setProp(HiveAvroSerDeManager.USE_SCHEMA_TEMP_FILE, "true");
state.setProp(HiveAvroSerDeManager.SCHEMA_FILE_NAME, SCHEMA_FILE_NAME);
state.setProp(HiveAvroSerDeManager.USE_SCHEMA_TEMP_FILE, "true");
validateSchemaUrl(state, SCHEMA_FILE_NAME, true);
}
private void validateSchemaUrl(State state, String targetSchemaFileName, boolean createConflictingFile) throws IOException {
HiveAvroSerDeManager manager = new HiveAvroSerDeManager(state);
HiveRegistrationUnit registrationUnit = (new HiveTable.Builder()).withDbName(TEST_DB).withTableName(TEST_TABLE).build();
// Clean up existing file
String targetPathStr = new Path(this.testBasePath, targetSchemaFileName).toString();
File targetFile = new File(targetPathStr);
targetFile.delete();
// create a conflicting file
if (createConflictingFile) {
targetFile.createNewFile();
}
manager.addSerDeProperties(this.testBasePath, registrationUnit);
Assert.assertNull(registrationUnit.getSerDeProps().getProp(HiveAvroSerDeManager.SCHEMA_LITERAL));
String schemaUrl = registrationUnit.getSerDeProps().getProp(HiveAvroSerDeManager.SCHEMA_URL);
Assert.assertEquals(schemaUrl, targetPathStr);
Assert.assertTrue(IOUtils.contentEquals(this.getClass().getResourceAsStream("/test-hive-table/hive-test.avsc"),
new FileInputStream(schemaUrl)));
}
@AfterClass
public void tearDown() throws IOException {
FileSystem fs = FileSystem.getLocal(new Configuration());
fs.delete(this.testBasePath, true);
}
}
| 4,586 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/orc/HiveOrcSerDeManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.orc;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.apache.gobblin.binary_creation.AvroTestTools;
import org.apache.gobblin.binary_creation.OrcTestTools;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.util.HadoopUtils;
import static org.apache.gobblin.hive.orc.HiveOrcSerDeManager.ENABLED_ORC_TYPE_CHECK;
@Test(singleThreaded = true)
public class HiveOrcSerDeManagerTest {
private static String TEST_DB = "testDB";
private static String TEST_TABLE = "testTable";
private Path testBasePath;
private Path testRegisterPath;
@BeforeClass
public void setUp() throws IOException, SerDeException {
FileSystem fs = FileSystem.getLocal(new Configuration());
this.testBasePath = new Path("orctestdir");
this.testRegisterPath = new Path(this.testBasePath, "register");
fs.delete(this.testBasePath, true);
fs.mkdirs(this.testRegisterPath);
OrcTestTools orcTestTools = new OrcTestTools();
orcTestTools.writeJsonResourceRecordsAsBinary("avro_input", fs, this.testBasePath, null);
HadoopUtils.copyFile(fs, new Path(this.testBasePath, "input"), fs, new Path(this.testRegisterPath,
"prefix-hive-test.orc"), true, new Configuration());
AvroTestTools avroTestTools = new AvroTestTools();
avroTestTools.writeJsonResourceRecordsAsBinary("avro_input", fs, this.testBasePath, null);
HadoopUtils.copyFile(fs, new Path(this.testBasePath, "input.avro"), fs, new Path(this.testRegisterPath, "hive-test.notOrc"), true, new Configuration());
}
/**
* Test that the schema is written to the schema literal and attributes required for initializing orc serde object.
*/
@Test
public void testOrcSchemaLiteral() throws IOException {
State state = new State();
HiveOrcSerDeManager manager = new HiveOrcSerDeManager(state);
HiveRegistrationUnit registrationUnit = (new HiveTable.Builder()).withDbName(TEST_DB).withTableName(TEST_TABLE).build();
manager.addSerDeProperties(this.testRegisterPath, registrationUnit);
List<String> columns = Arrays.asList(registrationUnit.getSerDeProps().getProp(serdeConstants.LIST_COLUMNS).split(","));
Assert.assertTrue(columns.get(0).equals("name"));
Assert.assertTrue(columns.get(1).equals("timestamp"));
List<String> columnTypes = Arrays.asList(registrationUnit.getSerDeProps().getProp(serdeConstants.LIST_COLUMN_TYPES).split(","));
Assert.assertTrue(columnTypes.get(0).equals("string"));
Assert.assertTrue(columnTypes.get(1).equals("bigint"));
}
/**
* Test empty extension
*/
@Test
public void testEmptyExtension() throws IOException {
State state = new State();
state.setProp(ENABLED_ORC_TYPE_CHECK, true);
state.setProp(HiveOrcSerDeManager.FILE_EXTENSIONS_KEY, ",");
HiveOrcSerDeManager manager = new HiveOrcSerDeManager(state);
HiveRegistrationUnit registrationUnit = (new HiveTable.Builder()).withDbName(TEST_DB).withTableName(TEST_TABLE).build();
manager.addSerDeProperties(this.testRegisterPath, registrationUnit);
examineSchema(registrationUnit);
}
/**
* Test custom serde config
*/
@Test
public void testCustomSerdeConfig() throws IOException {
State state = new State();
state.setProp(HiveOrcSerDeManager.SERDE_TYPE_KEY, OrcSerde.class.getName());
state.setProp(HiveOrcSerDeManager.INPUT_FORMAT_CLASS_KEY, "customInputFormat");
state.setProp(HiveOrcSerDeManager.OUTPUT_FORMAT_CLASS_KEY, "customOutputFormat");
HiveOrcSerDeManager manager = new HiveOrcSerDeManager(state);
HiveRegistrationUnit registrationUnit = (new HiveTable.Builder()).withDbName(TEST_DB).withTableName(TEST_TABLE).build();
manager.addSerDeProperties(this.testRegisterPath, registrationUnit);
examineSchema(registrationUnit);
Assert.assertEquals(registrationUnit.getSerDeType().get(), OrcSerde.class.getName());
Assert.assertEquals(registrationUnit.getInputFormat().get(), "customInputFormat");
Assert.assertEquals(registrationUnit.getOutputFormat().get(), "customOutputFormat");
}
/**
* Test that error is raised if no orc files found during schema retrieval
*/
@Test(expectedExceptions = FileNotFoundException.class, expectedExceptionsMessageRegExp = "No files in Dataset:orctestdir/register found for schema retrieval")
public void testNoOrcFiles() throws IOException {
State state = new State();
state.setProp(ENABLED_ORC_TYPE_CHECK, true);
state.setProp(HiveOrcSerDeManager.FILE_EXTENSIONS_KEY, ".notOrc");
HiveOrcSerDeManager manager = new HiveOrcSerDeManager(state);
HiveRegistrationUnit registrationUnit = (new HiveTable.Builder()).withDbName(TEST_DB).withTableName(TEST_TABLE).build();
manager.addSerDeProperties(this.testRegisterPath, registrationUnit);
}
/**
* Test prefix filter
*/
@Test(expectedExceptions = FileNotFoundException.class, expectedExceptionsMessageRegExp = "No files in Dataset:orctestdir/register found for schema retrieval")
public void testPrefixFilter() throws IOException {
State state = new State();
state.setProp(HiveOrcSerDeManager.IGNORED_FILE_PREFIXES_KEY, "prefix-");
HiveOrcSerDeManager manager = new HiveOrcSerDeManager(state);
HiveRegistrationUnit registrationUnit = (new HiveTable.Builder()).withDbName(TEST_DB).withTableName(TEST_TABLE).build();
manager.addSerDeProperties(this.testRegisterPath, registrationUnit);
}
private void examineSchema(HiveRegistrationUnit registrationUnit) {
List<String> columns = Arrays.asList(registrationUnit.getSerDeProps().getProp(serdeConstants.LIST_COLUMNS).split(","));
Assert.assertTrue(columns.get(0).equals("name"));
Assert.assertTrue(columns.get(1).equals("timestamp"));
List<String> columnTypes = Arrays.asList(registrationUnit.getSerDeProps().getProp(serdeConstants.LIST_COLUMN_TYPES).split(","));
Assert.assertTrue(columnTypes.get(0).equals("string"));
Assert.assertTrue(columnTypes.get(1).equals("bigint"));
}
@AfterClass
public void tearDown() throws IOException {
FileSystem fs = FileSystem.getLocal(new Configuration());
fs.delete(this.testBasePath, true);
}
}
| 4,587 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/metastore/HiveMetaStoreBasedRegisterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.metastore;
import com.google.common.base.Optional;
import java.io.IOException;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.spec.SimpleHiveSpec;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.util.AvroUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
public class HiveMetaStoreBasedRegisterTest {
@Test
public void testUpdateSchemaMethod() throws IOException {
final String databaseName = "testdb";
final String tableName = "testtable";
State state = new State();
state.setProp(HiveMetaStoreBasedRegister.FETCH_LATEST_SCHEMA, true);
state.setProp(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS, MockSchemaRegistry.class.getName());
HiveMetaStoreBasedRegister register = new HiveMetaStoreBasedRegister(state, Optional.absent());
Schema writerSchema = new Schema.Parser().parse("{\"type\": \"record\", \"name\": \"TestEvent\","
+ " \"namespace\": \"test.namespace\", \"fields\": [{\"name\":\"testName\"," + " \"type\": \"int\"}]}");
AvroUtils.setSchemaCreationTime(writerSchema, "111");
//Build hiveTable
HiveTable.Builder builder = new HiveTable.Builder();
builder.withDbName(databaseName).withTableName(tableName);
State serdeProps = new State();
serdeProps.setProp("avro.schema.literal", writerSchema.toString());
builder.withSerdeProps(serdeProps);
HiveTable hiveTable = builder.build();
HiveTable existingTable = builder.build();
hiveTable.setInputFormat(AvroContainerInputFormat.class.getName());
hiveTable.setOutputFormat(AvroContainerOutputFormat.class.getName());
hiveTable.setSerDeType(AvroSerDe.class.getName());
existingTable.setInputFormat(AvroContainerInputFormat.class.getName());
existingTable.setOutputFormat(AvroContainerOutputFormat.class.getName());
existingTable.setSerDeType(AvroSerDe.class.getName());
SimpleHiveSpec.Builder specBuilder = new SimpleHiveSpec.Builder(new Path("pathString"))
.withPartition(Optional.absent())
.withTable(hiveTable);
Table table = HiveMetaStoreUtils.getTable(hiveTable);
SimpleHiveSpec simpleHiveSpec = specBuilder.build();
//Test new schema equals existing schema, we don't change anything
register.updateSchema(simpleHiveSpec, table, existingTable);
Assert.assertEquals(table.getSd().getSerdeInfo().getParameters()
.get(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()), writerSchema.toString());
//Test new schema does not equal to existing schema, and latest schema does not equals to existing schema
//We set schema to writer schema
register.schemaRegistry.get().register(writerSchema, "writerSchema");
Schema existingSchema = new Schema.Parser().parse("{\"type\": \"record\", \"name\": \"TestEvent_1\","
+ " \"namespace\": \"test.namespace\", \"fields\": [{\"name\":\"testName_1\"," + " \"type\": \"double\"}]}");
AvroUtils.setSchemaCreationTime(existingSchema, "110");
existingTable.getSerDeProps()
.setProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), existingSchema.toString());
register.updateSchema(simpleHiveSpec, table, existingTable);
Assert.assertEquals(table.getSd().getSerdeInfo().getParameters()
.get(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()), writerSchema.toString());
//Test new schema does not equal to existing schema, latest schema equals to existing schema,
//in this case, table schema should be existingSchema
register.schemaRegistry.get().register(existingSchema, "existingSchema");
register.updateSchema(simpleHiveSpec, table, existingTable);
Assert.assertEquals(table.getSd().getSerdeInfo().getParameters()
.get(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()), existingSchema.toString());
}
public static class MockSchemaRegistry extends KafkaSchemaRegistry<String, Schema> {
static Schema latestSchema = Schema.create(Schema.Type.STRING);
public MockSchemaRegistry(Properties props) {
super(props);
}
@Override
protected Schema fetchSchemaByKey(String key) throws SchemaRegistryException {
return null;
}
@Override
public Schema getLatestSchemaByTopic(String topic) throws SchemaRegistryException {
return latestSchema;
}
@Override
public String register(Schema schema) throws SchemaRegistryException {
return null;
}
@Override
public String register(Schema schema, String name) throws SchemaRegistryException {
this.latestSchema = schema;
return schema.toString();
}
}
}
| 4,588 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/metastore/HiveMetaStoreUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.metastore;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.function.Consumer;
import java.util.function.Function;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveTable;
import static org.apache.gobblin.hive.metastore.HiveMetaStoreUtils.getParameters;
public class HiveMetaStoreUtilsTest {
@Test
public void testGetTableAvro() {
final String databaseName = "testdb";
final String tableName = "testtable";
HiveTable.Builder builder = new HiveTable.Builder();
builder.withDbName(databaseName).withTableName(tableName);
State serdeProps = new State();
serdeProps.setProp("avro.schema.literal", "{\"type\": \"record\", \"name\": \"TestEvent\","
+ " \"namespace\": \"test.namespace\", \"fields\": [{\"name\":\"a\"," + " \"type\": \"int\"}]}");
builder.withSerdeProps(serdeProps);
HiveTable hiveTable = builder.build();
hiveTable.setInputFormat(AvroContainerInputFormat.class.getName());
hiveTable.setOutputFormat(AvroContainerOutputFormat.class.getName());
hiveTable.setSerDeType(AvroSerDe.class.getName());
Table table = HiveMetaStoreUtils.getTable(hiveTable);
Assert.assertEquals(table.getDbName(), databaseName);
Assert.assertEquals(table.getTableName(), tableName);
StorageDescriptor sd = table.getSd();
Assert.assertEquals(sd.getInputFormat(), AvroContainerInputFormat.class.getName());
Assert.assertEquals(sd.getOutputFormat(), AvroContainerOutputFormat.class.getName());
Assert.assertNotNull(sd.getSerdeInfo());
Assert.assertEquals(sd.getSerdeInfo().getSerializationLib(), AvroSerDe.class.getName());
List<FieldSchema> fields = sd.getCols();
Assert.assertTrue(fields != null && fields.size() == 1);
FieldSchema fieldA = fields.get(0);
Assert.assertEquals(fieldA.getName(), "a");
Assert.assertEquals(fieldA.getType(), "int");
}
@Test
public void testGetTableOrc() {
final String databaseName = "db";
final String tableName = "tbl";
HiveTable.Builder builder = new HiveTable.Builder();
builder.withDbName(databaseName).withTableName(tableName);
HiveTable hiveTable = builder.build();
// SerDe props are
State serdeProps = new State();
serdeProps.setProp("columns", "timestamp,namespace,name,metadata");
serdeProps.setProp("columns.types", "bigint,string,string,map<string,string>");
hiveTable.getProps().addAll(serdeProps);
hiveTable.setInputFormat(OrcInputFormat.class.getName());
hiveTable.setOutputFormat(OrcOutputFormat.class.getName());
hiveTable.setSerDeType(OrcSerde.class.getName());
Table table = HiveMetaStoreUtils.getTable(hiveTable);
Assert.assertEquals(table.getDbName(), databaseName);
Assert.assertEquals(table.getTableName(), tableName);
StorageDescriptor sd = table.getSd();
Assert.assertEquals(sd.getInputFormat(), OrcInputFormat.class.getName());
Assert.assertEquals(sd.getOutputFormat(), OrcOutputFormat.class.getName());
Assert.assertNotNull(sd.getSerdeInfo());
Assert.assertEquals(sd.getSerdeInfo().getSerializationLib(), OrcSerde.class.getName());
// verify column name
List<FieldSchema> fields = sd.getCols();
Assert.assertTrue(fields != null && fields.size() == 4);
FieldSchema fieldA = fields.get(0);
Assert.assertEquals(fieldA.getName(), "timestamp");
Assert.assertEquals(fieldA.getType(), "bigint");
FieldSchema fieldB = fields.get(1);
Assert.assertEquals(fieldB.getName(), "namespace");
Assert.assertEquals(fieldB.getType(), "string");
FieldSchema fieldC = fields.get(2);
Assert.assertEquals(fieldC.getName(), "name");
Assert.assertEquals(fieldC.getType(), "string");
FieldSchema fieldD = fields.get(3);
Assert.assertEquals(fieldD.getName(), "metadata");
Assert.assertEquals(fieldD.getType(), "map<string,string>");
}
@Test
public void testGetTableAvroInvalidSchema() {
final String databaseName = "testdb";
final String tableName = "testtable";
HiveTable.Builder builder = new HiveTable.Builder();
builder.withDbName(databaseName).withTableName(tableName);
State serdeProps = new State();
serdeProps.setProp("avro.schema.literal", "invalid schema");
builder.withSerdeProps(serdeProps);
HiveTable hiveTable = builder.build();
hiveTable.setInputFormat(AvroContainerInputFormat.class.getName());
hiveTable.setOutputFormat(AvroContainerOutputFormat.class.getName());
hiveTable.setSerDeType(AvroSerDe.class.getName());
Table table = HiveMetaStoreUtils.getTable(hiveTable);
Assert.assertEquals(table.getDbName(), databaseName);
Assert.assertEquals(table.getTableName(), tableName);
StorageDescriptor sd = table.getSd();
Assert.assertEquals(sd.getInputFormat(), AvroContainerInputFormat.class.getName());
Assert.assertEquals(sd.getOutputFormat(), AvroContainerOutputFormat.class.getName());
Assert.assertNotNull(sd.getSerdeInfo());
Assert.assertEquals(sd.getSerdeInfo().getSerializationLib(), AvroSerDe.class.getName());
List<FieldSchema> fields = sd.getCols();
Assert.assertTrue(fields != null && fields.size() == 0);
}
@Test
public void testInVokeDetermineSchemaOrThrowExceptionMethod() {
try {
HiveMetaStoreUtils.inVokeDetermineSchemaOrThrowExceptionMethod(new Properties(), new Configuration());
} catch (Exception e) {
Assert.assertFalse(e instanceof NoSuchMethodException);
}
}
@Test
public void testGetHiveTable() throws Exception {
final String databaseName = "testdb";
final String tableName = "testtable";
final String tableSdLoc = "/tmp/testtable";
final String partitionName = "partitionName";
State serdeProps = new State();
serdeProps.setProp("avro.schema.literal", "{\"type\": \"record\", \"name\": \"TestEvent\","
+ " \"namespace\": \"test.namespace\", \"fields\": [{\"name\":\"testName\"," + " \"type\": \"int\"}]}");
List<FieldSchema> fieldSchemas = new ArrayList<>();
fieldSchemas.add(new FieldSchema("testName","int", "testContent"));
SerDeInfo si = new SerDeInfo();
si.setParameters(getParameters(serdeProps));
si.setName(tableName);
StorageDescriptor sd = new StorageDescriptor(fieldSchemas, tableSdLoc,
AvroContainerInputFormat.class.getName(), AvroContainerOutputFormat.class.getName(),
false, 0, si, null, Lists.<Order>newArrayList(), null);
sd.setParameters(getParameters(serdeProps));
Table table = new Table(tableName, databaseName, "testOwner", 0, 0, 0, sd,
Lists.<FieldSchema>newArrayList(), Maps.<String,String>newHashMap(), "", "", "");
table.addToPartitionKeys(new FieldSchema(partitionName, "string", "some comment"));
HiveTable hiveTable = HiveMetaStoreUtils.getHiveTable(table);
Assert.assertEquals(hiveTable.getDbName(), databaseName);
Assert.assertEquals(hiveTable.getTableName(), tableName);
Assert.assertTrue(hiveTable.getInputFormat().isPresent());
Assert.assertTrue(hiveTable.getOutputFormat().isPresent());
Assert.assertEquals(hiveTable.getInputFormat().get(), AvroContainerInputFormat.class.getName());
Assert.assertEquals(hiveTable.getOutputFormat().get(), AvroContainerOutputFormat.class.getName());
Assert.assertNotNull(hiveTable.getSerDeType());
List<HiveRegistrationUnit.Column> fields = hiveTable.getColumns();
Assert.assertTrue(fields != null && fields.size() == 1);
HiveRegistrationUnit.Column fieldA = fields.get(0);
Assert.assertEquals(fieldA.getName(), "testName");
Assert.assertEquals(fieldA.getType(), "int");
}
@Test
public void testContainsUnionType_AvroSucceeds() {
final State serdeProps = new State();
final String avroSchema = "{\"type\": \"record\", \"name\": \"TestEvent\",\"namespace\": \"test.namespace\", \"fields\": [{\"name\":\"fieldName\", \"type\": %s}]}";
Consumer<String> assertContainsNonOptionalUnionType = fieldType -> {
serdeProps.setProp("avro.schema.literal", String.format(avroSchema, fieldType));
HiveTable hiveTable = createTestHiveTable_Avro(serdeProps);
Assert.assertEquals(hiveTable.getColumns().size(), 1);
Assert.assertTrue(HiveMetaStoreUtils.containsNonOptionalUnionTypeColumn(hiveTable));
};
assertContainsNonOptionalUnionType.accept("[\"string\", \"int\"]");
assertContainsNonOptionalUnionType.accept("[\"string\", \"int\", \"null\"]");
assertContainsNonOptionalUnionType.accept("[{\"type\":\"map\",\"values\":[\"boolean\",\"null\", {\"type\": \"array\", \"items\":\"string\"}]},\"null\"]");
}
@Test
public void testContainsUnionType_AvroFails() {
final State serdeProps = new State();
serdeProps.setProp("avro.schema.literal", "{\"type\": \"record\", \"name\": \"TestEvent\",\"namespace\": \"test.namespace\", "
+ "\"fields\": ["
+ "{\"name\":\"someString\", \"type\": \"string\"}, "
+ "{\"name\":\"aNullableInt\", \"type\": [\"null\", \"int\"]},"
+ "{\"name\":\"nonNullableInt\", \"type\": [\"int\"]},"
+ "{\"name\":\"nonArray\", \"type\": [{\"type\": \"array\", \"items\":{\"type\":\"map\",\"values\":\"string\"}}]}"
+ "]}");
HiveTable hiveTable = createTestHiveTable_Avro(serdeProps);
Assert.assertEquals(hiveTable.getColumns().size(), 4);
Assert.assertFalse(HiveMetaStoreUtils.containsNonOptionalUnionTypeColumn(hiveTable));
}
@Test
public void testContainsUnionType_AvroNoSchemaLiteral() {
HiveTable table = new HiveTable.Builder().withDbName("db").withTableName("tb").build();
Assert.assertThrows(RuntimeException.class, () -> HiveMetaStoreUtils.containsNonOptionalUnionTypeColumn(table));
}
@Test
public void testContainsUnionType_OrcUnionType() {
final State serdeProps = new State();
serdeProps.setProp("columns", "someInt,someString,someMap,someUT");
// NOTE: unlike in avro, all values in ORC are nullable, so it's not necessary to test null permutations
serdeProps.setProp("columns.types", "bigint,string,map<string,string>,uniontype<string,int>");
HiveTable hiveTable = createTestHiveTable_ORC(serdeProps);
Assert.assertEquals(hiveTable.getColumns().size(), 4);
Assert.assertTrue(HiveMetaStoreUtils.containsNonOptionalUnionTypeColumn(hiveTable));
}
@Test
public void testContainsUnionType_OrcNestedValue() {
final State serdeProps = new State();
serdeProps.setProp("columns", "nestedNonOptionalUT");
serdeProps.setProp("columns.types", "map<string,array<struct<i:int,someUT:uniontype<array<string>,struct<i:int>>>>>");
HiveTable hiveTable = createTestHiveTable_ORC(serdeProps);
Assert.assertEquals(hiveTable.getColumns().size(), 1);
Assert.assertTrue(HiveMetaStoreUtils.containsNonOptionalUnionTypeColumn(hiveTable));
}
@Test
public void testContainsUnionType_OrcNestedUnionPrimitive() {
final State serdeProps = new State();
serdeProps.setProp("columns", "nesteduniontypeint");
serdeProps.setProp("columns.types", "uniontype<array<map<string,struct<i:int,someUt:uniontype<int>>>>>");
HiveTable hiveTable = createTestHiveTable_ORC(serdeProps);
Assert.assertEquals(hiveTable.getColumns().size(), 1);
Assert.assertFalse(HiveMetaStoreUtils.containsNonOptionalUnionTypeColumn(hiveTable));
}
@Test
public void testContainsUnionType_OrcPrimitive() {
final State serdeProps = new State();
serdeProps.setProp("columns", "timestamp,uniontypeint");
serdeProps.setProp("columns.types", "bigint,uniontype<int>");
HiveTable hiveTable = createTestHiveTable_ORC(serdeProps);
Assert.assertEquals(hiveTable.getColumns().size(), 2);
Assert.assertFalse(HiveMetaStoreUtils.containsNonOptionalUnionTypeColumn(hiveTable));
}
private HiveTable createTestHiveTable_ORC(State props) {
return createTestHiveTable("testDb", "testTable", props, (hiveTable) -> {
hiveTable.setInputFormat(OrcInputFormat.class.getName());
hiveTable.setOutputFormat(OrcOutputFormat.class.getName());
hiveTable.setSerDeType(OrcSerde.class.getName());
return null;
});
}
private HiveTable createTestHiveTable_Avro(State props) {
return createTestHiveTable("testDB", "testTable", props, (hiveTable) -> {
hiveTable.setInputFormat(AvroContainerInputFormat.class.getName());
hiveTable.setOutputFormat(AvroContainerOutputFormat.class.getName());
hiveTable.setSerDeType(AvroSerDe.class.getName());
return null;
});
}
private HiveTable createTestHiveTable(String dbName, String tableName, State props, Function<HiveTable, Void> additionalSetup) {
HiveTable.Builder builder = new HiveTable.Builder();
HiveTable hiveTable = builder.withDbName(dbName).withTableName(tableName).withProps(props).build();
additionalSetup.apply(hiveTable);
// Serialize then deserialize as a way to quickly setup tables for other tests in util class
Table table = HiveMetaStoreUtils.getTable(hiveTable);
return HiveMetaStoreUtils.getHiveTable(table);
}
}
| 4,589 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/policy/HiveSnapshotRegistrationPolicyTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.policy;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import static org.apache.gobblin.hive.policy.HiveRegistrationPolicyBaseTest.examine;
@Test(groups = {"gobblin.hive"})
public class HiveSnapshotRegistrationPolicyTest {
private Path path;
@Test
public void testGetHiveSpecs()
throws IOException {
State state = new State();
this.path = new Path(getClass().getResource("/test-hive-table/snapshot1").toString());
//Test when directory contain zero snapshot
Collection<HiveSpec> specs = new HiveSnapshotRegistrationPolicy(state).getHiveSpecs(this.path);
Assert.assertEquals(specs.size(), 0);
//Test when directory contain snapshots sub-directory
this.path = new Path(getClass().getResource("/test-hive-table/").toString());
Assert.assertEquals(specs.size(), 0);
state.appendToListProp(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME, "db1");
state.appendToListProp(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_DATABASE_NAMES, "db2");
state.appendToListProp(HiveRegistrationPolicyBase.HIVE_TABLE_NAME, "tbl1");
state.appendToListProp(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_TABLE_NAMES, "tbl2,tbl3");
specs = new HiveSnapshotRegistrationPolicy(state).getHiveSpecs(this.path);
Assert.assertEquals(specs.size(), 6);
Iterator<HiveSpec> iterator = specs.iterator();
HiveSpec spec = iterator.next();
examine(spec, "db1", "tbl1");
spec = iterator.next();
examine(spec, "db1", "tbl2");
spec = iterator.next();
examine(spec, "db1", "tbl3");
spec = iterator.next();
examine(spec, "db2", "tbl1");
spec = iterator.next();
examine(spec, "db2", "tbl2");
spec = iterator.next();
examine(spec, "db2", "tbl3");
}
}
| 4,590 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/test/java/org/apache/gobblin/hive/policy/HiveRegistrationPolicyBaseTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.policy;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.Properties;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.spec.SimpleHiveSpec;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase.ADDITIONAL_HIVE_DATABASE_NAMES;
/**
* Unit test for {@link HiveRegistrationPolicyBase}
*
* @author Ziyang Liu
*/
@Test(groups = {"gobblin.hive"})
public class HiveRegistrationPolicyBaseTest {
private Path path;
@Test
public void testGetHiveSpecs()
throws IOException {
State state = new State();
state.appendToListProp(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME, "db1");
state.appendToListProp(ADDITIONAL_HIVE_DATABASE_NAMES, "db2");
state.appendToListProp(HiveRegistrationPolicyBase.HIVE_TABLE_NAME, "tbl1");
state.appendToListProp(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_TABLE_NAMES, "tbl2,tbl3");
this.path = new Path(getClass().getResource("/test-hive-table").toString());
Collection<HiveSpec> specs = new HiveRegistrationPolicyBase(state).getHiveSpecs(this.path);
Assert.assertEquals(specs.size(), 6);
Iterator<HiveSpec> iterator = specs.iterator();
HiveSpec spec = iterator.next();
examine(spec, "db1", "tbl1");
spec = iterator.next();
examine(spec, "db1", "tbl2");
spec = iterator.next();
examine(spec, "db1", "tbl3");
spec = iterator.next();
examine(spec, "db2", "tbl1");
spec = iterator.next();
examine(spec, "db2", "tbl2");
spec = iterator.next();
examine(spec, "db2", "tbl3");
}
// Testing fetching additional hive databases from config object. Specifically, we verifies if dataset-level DB
// config could overwrite the same configuration set in the job level.
public void testGetDatabasesNames() throws Exception {
State jobState = new State();
jobState.setProp(ADDITIONAL_HIVE_DATABASE_NAMES, "db1");
Properties properties = new Properties();
properties.setProperty(ADDITIONAL_HIVE_DATABASE_NAMES, "db2");
Config configObj = ConfigUtils.propertiesToConfig(properties);
HiveRegistrationPolicyBase policyBase = new HiveRegistrationPolicyBase(jobState);
// Setting the config object manually.
policyBase.configForTopic = Optional.fromNullable(configObj);
// Construct a random Path
File dir = Files.createTempDir();
dir.deleteOnExit();
Path dummyPath = new Path(dir.getAbsolutePath() + " /random");
int dbCount = 0 ;
for (String dbName : policyBase.getDatabaseNames(dummyPath)) {
Assert.assertEquals(dbName, "db2");
dbCount += 1;
}
Assert.assertEquals(dbCount, 1);
}
@Test
public void testGetHiveSpecsWithDBFilter()
throws IOException {
State state = new State();
state.appendToListProp(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME, "db1");
state.appendToListProp(ADDITIONAL_HIVE_DATABASE_NAMES, "db2");
state.appendToListProp(HiveRegistrationPolicyBase.HIVE_TABLE_NAME, "tbl1");
state.appendToListProp(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_TABLE_NAMES, "tbl2,tbl3,$PRIMARY_TABLE_col");
state.appendToListProp("db2." + HiveRegistrationPolicyBase.HIVE_TABLE_NAME, "$PRIMARY_TABLE_col,tbl4,tbl5");
this.path = new Path(getClass().getResource("/test-hive-table").toString());
Collection<HiveSpec> specs = new HiveRegistrationPolicyBase(state).getHiveSpecs(this.path);
Assert.assertEquals(specs.size(), 7);
Iterator<HiveSpec> iterator = specs.iterator();
HiveSpec spec = iterator.next();
examine(spec, "db1", "tbl1");
spec = iterator.next();
examine(spec, "db1", "tbl2");
spec = iterator.next();
examine(spec, "db1", "tbl3");
spec = iterator.next();
examine(spec, "db1", "tbl1_col");
spec = iterator.next();
examine(spec, "db2", "tbl1_col");
spec = iterator.next();
examine(spec, "db2", "tbl4");
spec = iterator.next();
examine(spec, "db2", "tbl5");
}
@Test
public void testTableRegexp()
throws IOException {
State state = new State();
String regexp = ".*test_bucket/(.*)/staging/.*";
Optional<Pattern> pattern = Optional.of(Pattern.compile(regexp));
Path path = new Path("s3://test_bucket/topic/staging/2017-10-21/");
state.appendToListProp(HiveRegistrationPolicyBase.HIVE_DATABASE_REGEX, regexp);
HiveRegistrationPolicyBase registrationPolicyBase = new HiveRegistrationPolicyBase(state);
String resultTable = registrationPolicyBase.getDatabaseOrTableName(path, HiveRegistrationPolicyBase.HIVE_DATABASE_NAME, HiveRegistrationPolicyBase.HIVE_DATABASE_REGEX, pattern );
Assert.assertEquals(resultTable, "topic");
}
@Test(expectedExceptions = IllegalStateException.class)
public void testTableRegexpWithoutGroupShouldFail()
throws IOException {
State state = new State();
String regexp = ".*test_bucket/.*/staging/.*";
Optional<Pattern> pattern = Optional.of(Pattern.compile(regexp));
Path path = new Path("s3://test_bucket/topic/staging/2017-10-21/");
state.appendToListProp(HiveRegistrationPolicyBase.HIVE_DATABASE_REGEX, regexp);
HiveRegistrationPolicyBase registrationPolicyBase = new HiveRegistrationPolicyBase(state);
String resultTable = registrationPolicyBase.getDatabaseOrTableName(path, HiveRegistrationPolicyBase.HIVE_DATABASE_NAME, HiveRegistrationPolicyBase.HIVE_DATABASE_REGEX, pattern );
Assert.assertEquals(resultTable, "topic");
}
@Test(expectedExceptions = IllegalStateException.class)
public void testTableRegexpWithoutMatchShouldFail()
throws IOException {
State state = new State();
String regexp = "^hdfs://(.*)";
Optional<Pattern> pattern = Optional.of(Pattern.compile(regexp));
Path path = new Path("s3://test_bucket/topic/staging/2017-10-21/");
state.appendToListProp(HiveRegistrationPolicyBase.HIVE_DATABASE_REGEX, regexp);
HiveRegistrationPolicyBase registrationPolicyBase = new HiveRegistrationPolicyBase(state);
String resultTable = registrationPolicyBase.getDatabaseOrTableName(path, HiveRegistrationPolicyBase.HIVE_DATABASE_NAME, HiveRegistrationPolicyBase.HIVE_DATABASE_REGEX, pattern );
Assert.assertEquals(resultTable, "topic");
}
static void examine(HiveSpec spec, String dbName, String tableName) {
Assert.assertEquals(spec.getClass(), SimpleHiveSpec.class);
Assert.assertEquals(spec.getTable().getDbName(), dbName);
Assert.assertEquals(spec.getTable().getTableName(), tableName);
}
}
| 4,591 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/hadoop/hive/ql/io | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/hadoop/hive/ql/io/orc/TypeDescriptionToObjectInspectorUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.io.orc;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.SettableListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.SettableMapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.SettableStructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.orc.OrcProto;
import org.apache.orc.OrcUtils;
import org.apache.orc.TypeDescription;
public class TypeDescriptionToObjectInspectorUtil {
public static ObjectInspector getObjectInspector(TypeDescription orcSchema) {
return createObjectInspector(0, OrcUtils.getOrcTypes(orcSchema));
}
static class Field implements StructField {
private final String name;
private final ObjectInspector inspector;
private final int offset;
Field(String name, ObjectInspector inspector, int offset) {
this.name = name;
this.inspector = inspector;
this.offset = offset;
}
@Override
public String getFieldName() {
return name;
}
@Override
public ObjectInspector getFieldObjectInspector() {
return inspector;
}
@Override
public int getFieldID() {
return offset;
}
@Override
public String getFieldComment() {
return null;
}
}
static class OrcStructInspector extends SettableStructObjectInspector {
private List<StructField> fields;
OrcStructInspector(int columnId, List<OrcProto.Type> types) {
OrcProto.Type type = types.get(columnId);
int fieldCount = type.getSubtypesCount();
fields = new ArrayList<>(fieldCount);
for(int i=0; i < fieldCount; ++i) {
int fieldType = type.getSubtypes(i);
fields.add(new Field(type.getFieldNames(i),
createObjectInspector(fieldType, types), i));
}
}
@Override
public List<StructField> getAllStructFieldRefs() {
return fields;
}
@Override
public StructField getStructFieldRef(String s) {
for(StructField field: fields) {
if (field.getFieldName().equalsIgnoreCase(s)) {
return field;
}
}
return null;
}
@Override
public Object getStructFieldData(Object object, StructField field) {
if (object == null) {
return null;
}
int offset = ((Field) field).offset;
OrcStruct struct = (OrcStruct) object;
if (offset >= struct.getNumFields()) {
return null;
}
return struct.getFieldValue(offset);
}
@Override
public List<Object> getStructFieldsDataAsList(Object object) {
if (object == null) {
return null;
}
OrcStruct struct = (OrcStruct) object;
List<Object> result = new ArrayList<Object>(struct.getNumFields());
for (int i=0; i<struct.getNumFields(); i++) {
result.add(struct.getFieldValue(i));
}
return result;
}
@Override
public String getTypeName() {
StringBuilder buffer = new StringBuilder();
buffer.append("struct<");
for(int i=0; i < fields.size(); ++i) {
StructField field = fields.get(i);
if (i != 0) {
buffer.append(",");
}
buffer.append(field.getFieldName());
buffer.append(":");
buffer.append(field.getFieldObjectInspector().getTypeName());
}
buffer.append(">");
return buffer.toString();
}
@Override
public Category getCategory() {
return Category.STRUCT;
}
@Override
public Object create() {
return new OrcStruct(0);
}
@Override
public Object setStructFieldData(Object struct, StructField field,
Object fieldValue) {
OrcStruct orcStruct = (OrcStruct) struct;
int offset = ((Field) field).offset;
// if the offset is bigger than our current number of fields, grow it
if (orcStruct.getNumFields() <= offset) {
orcStruct.setNumFields(offset+1);
}
orcStruct.setFieldValue(offset, fieldValue);
return struct;
}
}
static class OrcMapObjectInspector
implements MapObjectInspector, SettableMapObjectInspector {
private ObjectInspector key;
private ObjectInspector value;
OrcMapObjectInspector(int columnId, List<OrcProto.Type> types) {
OrcProto.Type type = types.get(columnId);
key = createObjectInspector(type.getSubtypes(0), types);
value = createObjectInspector(type.getSubtypes(1), types);
}
@Override
public ObjectInspector getMapKeyObjectInspector() {
return key;
}
@Override
public ObjectInspector getMapValueObjectInspector() {
return value;
}
@Override
public Object getMapValueElement(Object map, Object key) {
return ((map == null || key == null)? null : ((Map) map).get(key));
}
@Override
@SuppressWarnings("unchecked")
public Map<Object, Object> getMap(Object map) {
if (map == null) {
return null;
}
return (Map) map;
}
@Override
public int getMapSize(Object map) {
if (map == null) {
return -1;
}
return ((Map) map).size();
}
@Override
public String getTypeName() {
return "map<" + key.getTypeName() + "," + value.getTypeName() + ">";
}
@Override
public Category getCategory() {
return Category.MAP;
}
@Override
public Object create() {
return new LinkedHashMap<>();
}
@Override
public Object put(Object map, Object key, Object value) {
((Map) map).put(key, value);
return map;
}
@Override
public Object remove(Object map, Object key) {
((Map) map).remove(key);
return map;
}
@Override
public Object clear(Object map) {
((Map) map).clear();
return map;
}
}
static class OrcUnionObjectInspector implements UnionObjectInspector {
private List<ObjectInspector> children;
protected OrcUnionObjectInspector() {
super();
}
OrcUnionObjectInspector(int columnId,
List<OrcProto.Type> types) {
OrcProto.Type type = types.get(columnId);
children = new ArrayList<ObjectInspector>(type.getSubtypesCount());
for(int i=0; i < type.getSubtypesCount(); ++i) {
children.add(createObjectInspector(type.getSubtypes(i),
types));
}
}
@Override
public List<ObjectInspector> getObjectInspectors() {
return children;
}
@Override
public byte getTag(Object obj) {
return ((OrcUnion) obj).getTag();
}
@Override
public Object getField(Object obj) {
return ((OrcUnion) obj).getObject();
}
@Override
public String getTypeName() {
StringBuilder builder = new StringBuilder("uniontype<");
boolean first = true;
for(ObjectInspector child: children) {
if (first) {
first = false;
} else {
builder.append(",");
}
builder.append(child.getTypeName());
}
builder.append(">");
return builder.toString();
}
@Override
public Category getCategory() {
return Category.UNION;
}
}
static class OrcListObjectInspector
implements ListObjectInspector, SettableListObjectInspector {
private ObjectInspector child;
OrcListObjectInspector(int columnId, List<OrcProto.Type> types) {
OrcProto.Type type = types.get(columnId);
child = createObjectInspector(type.getSubtypes(0), types);
}
@Override
public ObjectInspector getListElementObjectInspector() {
return child;
}
@Override
public Object getListElement(Object list, int i) {
if (list == null || i < 0 || i >= getListLength(list)) {
return null;
}
return ((List) list).get(i);
}
@Override
public int getListLength(Object list) {
if (list == null) {
return -1;
}
return ((List) list).size();
}
@Override
@SuppressWarnings("unchecked")
public List<?> getList(Object list) {
if (list == null) {
return null;
}
return (List) list;
}
@Override
public String getTypeName() {
return "array<" + child.getTypeName() + ">";
}
@Override
public Category getCategory() {
return Category.LIST;
}
@Override
public Object create(int size) {
ArrayList<Object> result = new ArrayList<Object>(size);
for(int i = 0; i < size; ++i) {
result.add(null);
}
return result;
}
@Override
public Object set(Object list, int index, Object element) {
List l = (List) list;
for(int i=l.size(); i < index+1; ++i) {
l.add(null);
}
l.set(index, element);
return list;
}
@Override
public Object resize(Object list, int newSize) {
((ArrayList) list).ensureCapacity(newSize);
return list;
}
}
static ObjectInspector createObjectInspector(int columnId,
List<OrcProto.Type> types){
OrcProto.Type type = types.get(columnId);
switch (type.getKind()) {
case FLOAT:
return PrimitiveObjectInspectorFactory.writableFloatObjectInspector;
case DOUBLE:
return PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
case BOOLEAN:
return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
case BYTE:
return PrimitiveObjectInspectorFactory.writableByteObjectInspector;
case SHORT:
return PrimitiveObjectInspectorFactory.writableShortObjectInspector;
case INT:
return PrimitiveObjectInspectorFactory.writableIntObjectInspector;
case LONG:
return PrimitiveObjectInspectorFactory.writableLongObjectInspector;
case BINARY:
return PrimitiveObjectInspectorFactory.writableBinaryObjectInspector;
case STRING:
return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
case CHAR:
if (!type.hasMaximumLength()) {
throw new UnsupportedOperationException(
"Illegal use of char type without length in ORC type definition.");
}
return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
TypeInfoFactory.getCharTypeInfo(type.getMaximumLength()));
case VARCHAR:
if (!type.hasMaximumLength()) {
throw new UnsupportedOperationException(
"Illegal use of varchar type without length in ORC type definition.");
}
return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
TypeInfoFactory.getVarcharTypeInfo(type.getMaximumLength()));
case TIMESTAMP:
return PrimitiveObjectInspectorFactory.writableTimestampObjectInspector;
case DATE:
return PrimitiveObjectInspectorFactory.writableDateObjectInspector;
case DECIMAL:
int precision = type.hasPrecision() ? type.getPrecision() : HiveDecimal.SYSTEM_DEFAULT_PRECISION;
int scale = type.hasScale()? type.getScale() : HiveDecimal.SYSTEM_DEFAULT_SCALE;
return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
TypeInfoFactory.getDecimalTypeInfo(precision, scale));
case STRUCT:
return new OrcStructInspector(columnId, types);
case UNION:
return new OrcUnionObjectInspector(columnId, types);
case MAP:
return new OrcMapObjectInspector(columnId, types);
case LIST:
return new OrcListObjectInspector(columnId, types);
default:
throw new UnsupportedOperationException("Unknown type " +
type.getKind());
}
}
}
| 4,592 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HivePartition.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.gobblin.annotation.Alpha;
import lombok.Getter;
/**
* A class that represents a Hive partition.
*
* <p>
* This class is used in {@link org.apache.gobblin.hive.spec.HiveSpec} instead of Hive's
* {@link org.apache.hadoop.hive.metastore.api.Partition} class to minimize the dependency on the hive-metastore API
* (since it is unstable and may go through backward incompatible changes). {@link HivePartition} and Hive's
* {@link org.apache.hadoop.hive.metastore.api.Partition} can be converted to each other using
* {@link org.apache.gobblin.hive.metastore.HiveMetaStoreUtils}.
* </p>
*
* @author Ziyang Liu
*/
@Getter
@Alpha
public class HivePartition extends HiveRegistrationUnit {
private final List<String> values;
private HivePartition(Builder builder) {
super(builder);
this.values = ImmutableList.<String> copyOf(builder.values);
}
@Override
public String toString() {
return super.toString() + " Values: " + this.values.toString();
}
public static class Builder extends HiveRegistrationUnit.Builder<Builder> {
private List<String> values = Lists.newArrayList();
public Builder withPartitionValues(List<String> values) {
this.values = values;
return this;
}
@Override
public HivePartition build() {
return new HivePartition(this);
}
}
}
| 4,593 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegistrationUnitComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import com.google.common.base.Strings;
import java.util.Set;
import org.apache.avro.Schema;
import org.apache.gobblin.util.AvroUtils;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
/**
* A comparator between an existing {@link HiveRegistrationUnit} and a new {@link HiveRegistrationUnit}. It is
* used to determine whether the existing {@link HiveRegistrationUnit} should be altered to match the new
* {@link HiveRegistrationUnit}.
*
* <p>
* Since altering a Hive table/partition is relatively expensive, when registering a new table/partition, if the
* table/partition exists, it is usually beneficial to check whether the existing table/partition needs to be
* altered before altering it.
* </p>
*
* <p>
* This class does <em>not</em> implement {@link java.util.Comparator} and does not conform to the contract of
* {@link java.util.Comparator}.
* </p>
*
* <p>
* Sample usage:
*
* <pre> {@code
* HiveRegistrationUnitComparator<?> comparator = new HiveRegistrationUnitComparator<>(existingTable, newTable);
* boolean needToUpdate = comparator.compareInputFormat().compareOutputFormat().compareNumBuckets()
* .compareIsCompressed().compareRawLocation().result();
* }}
* </pre>
*
* Or to compare all fields:
*
* <pre> {@code
* HiveRegistrationUnitComparator<?> comparator = new HiveRegistrationUnitComparator<>(existingTable, newTable);
* boolean needToUpdate = comparator.compareAll().result();
* }}
* </pre>
* </p>
*
* @author Ziyang Liu
*/
@Alpha
public class HiveRegistrationUnitComparator<T extends HiveRegistrationUnitComparator<?>> {
private static String SCHEMA_CREATION_TIME = "schema.creationTime";
protected final HiveRegistrationUnit existingUnit;
protected final HiveRegistrationUnit newUnit;
protected boolean result = false;
public HiveRegistrationUnitComparator(HiveRegistrationUnit existingUnit, HiveRegistrationUnit newUnit) {
this.existingUnit = existingUnit;
this.newUnit = newUnit;
}
/**
* Compare the raw locations (without schema and authority).
*
* <p>
* This is useful since existing tables/partitions in the Hive metastore have absolute paths in the location
* property, but the new table/partition may have a raw path.
* </p>
*/
@SuppressWarnings("unchecked")
public T compareRawLocation() {
if (!this.result) {
this.result |= (!new Path(this.existingUnit.getLocation().get()).toUri().getRawPath()
.equals(new Path(this.newUnit.getLocation().get()).toUri().getRawPath()));
}
return (T) this;
}
@SuppressWarnings("unchecked")
public T compareInputFormat() {
if (!this.result) {
compare(this.existingUnit.getInputFormat(), this.newUnit.getInputFormat());
}
return (T) this;
}
@SuppressWarnings("unchecked")
public T compareOutputFormat() {
if (!this.result) {
compare(this.existingUnit.getOutputFormat(), this.newUnit.getOutputFormat());
}
return (T) this;
}
@SuppressWarnings("unchecked")
public T compareIsCompressed() {
if (!this.result) {
compare(this.existingUnit.getIsCompressed(), this.newUnit.getIsCompressed());
}
return (T) this;
}
@SuppressWarnings("unchecked")
public T compareNumBuckets() {
if (!this.result) {
compare(this.existingUnit.getNumBuckets(), this.newUnit.getNumBuckets());
}
return (T) this;
}
@SuppressWarnings("unchecked")
public T compareBucketCols() {
if (!this.result) {
compare(this.existingUnit.getBucketColumns(), this.newUnit.getBucketColumns());
}
return (T) this;
}
@SuppressWarnings("unchecked")
public T compareIsStoredAsSubDirs() {
if (!this.result) {
compare(this.existingUnit.getIsStoredAsSubDirs(), this.newUnit.getIsStoredAsSubDirs());
}
return (T) this;
}
private State extractSchemaVersion(State state) {
//FIXME: This is a temp fix for special character in schema string, need to investigate the root
//cause of why we see different encoding here and have a permanent fix for this
State newState = new State(state);
String schemaFromState = state.getProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName());
if (!Strings.isNullOrEmpty(schemaFromState)) {
String schemaVersion = AvroUtils.getSchemaCreationTime(new Schema.Parser().parse(schemaFromState));
if (!Strings.isNullOrEmpty(schemaVersion)) {
newState.removeProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName());
newState.setProp(SCHEMA_CREATION_TIME, schemaVersion);
}
}
return newState;
}
@SuppressWarnings("unchecked")
public T compareParameters() {
if (!this.result) {
checkExistingIsSuperstate(this.existingUnit.getProps(), this.newUnit.getProps());
checkExistingIsSuperstate(this.existingUnit.getStorageProps(), this.newUnit.getStorageProps());
checkExistingIsSuperstate(extractSchemaVersion(this.existingUnit.getSerDeProps()), extractSchemaVersion(this.newUnit.getSerDeProps()));
}
return (T) this;
}
/**
* Compare all parameters.
*/
@SuppressWarnings("unchecked")
public T compareAll() {
this.compareInputFormat().compareOutputFormat().compareIsCompressed().compareIsStoredAsSubDirs().compareNumBuckets()
.compareBucketCols().compareRawLocation().compareParameters();
return (T) this;
}
/**
* Compare an existing value and a new value, and set {@link #result} accordingly.
*
* <p>
* This method returns false if newValue is absent (i.e., the existing value doesn't need to be updated).
* This is because when adding a table/partition to Hive, Hive automatically sets default values for
* some of the unspecified parameters. Therefore existingValue being present and newValue being absent
* doesn't mean the existing value needs to be updated.
* </p>
*/
protected <E> void compare(Optional<E> existingValue, Optional<E> newValue) {
boolean different;
if (!newValue.isPresent()) {
different = false;
} else {
different = !existingValue.isPresent() || !existingValue.get().equals(newValue.get());
}
this.result |= different;
}
/**
* Compare an existing state and a new {@link State} to ensure that the existing {@link State} contains all entries in the new
* {@link State}, and update {@link #result} accordingly.
*/
protected void checkExistingIsSuperstate(State existingState, State newState) {
checkExistingIsSuperset(existingState.getProperties().entrySet(), newState.getProperties().entrySet());
}
/**
* Compare an existing state and a new {@link Set} to ensure that the existing {@link Set} contains all entries in the new
* {@link Set}, and update {@link #result} accordingly.
*/
protected <E> void checkExistingIsSuperset(Set<E> existingSet, Set<E> newSet) {
this.result |= !existingSet.containsAll(newSet);
}
/**
* Get the result of comparison.
* @return true if the existing {@link HiveRegistrationUnit} needs to be altered, false otherwise.
*/
public boolean result() {
boolean resultCopy = this.result;
this.result = false;
return resultCopy;
}
}
| 4,594 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveTableComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
/**
* An extension to {@link HiveRegistrationUnitComparator} for {@link HiveTable}s.
*
* @author Ziyang Liu
*/
public class HiveTableComparator<T extends HiveTableComparator<?>> extends HiveRegistrationUnitComparator<T> {
public HiveTableComparator(HiveTable existingTable, HiveTable newTable) {
super(existingTable, newTable);
}
@SuppressWarnings("unchecked")
public T compareOwner() {
if (!this.result) {
compare(((HiveTable) this.existingUnit).getOwner(), ((HiveTable) this.newUnit).getOwner());
}
return (T) this;
}
@SuppressWarnings("unchecked")
public T compareRetention() {
if (!this.result) {
compare(((HiveTable) this.existingUnit).getRetention(), ((HiveTable) this.newUnit).getRetention());
}
return (T) this;
}
@SuppressWarnings("unchecked")
@Override
public T compareAll() {
super.compareAll().compareOwner().compareRetention();
return (T) this;
}
}
| 4,595 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveMetaStoreClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
import org.apache.commons.pool2.BasePooledObjectFactory;
import org.apache.commons.pool2.PooledObject;
import org.apache.commons.pool2.impl.DefaultPooledObject;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaHook;
import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
import org.apache.hadoop.hive.ql.metadata.HiveUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
/**
* An implementation of {@link BasePooledObjectFactory} for {@link IMetaStoreClient}.
*/
@Slf4j
public class HiveMetaStoreClientFactory extends BasePooledObjectFactory<IMetaStoreClient> {
private static final Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClientFactory.class);
public static final String HIVE_METASTORE_TOKEN_SIGNATURE = "hive.metastore.token.signature";
@Getter
private HiveConf hiveConf;
public HiveMetaStoreClientFactory(Optional<String> hcatURI) {
this(getHiveConf(hcatURI));
}
private static HiveConf getHiveConf(Optional<String> hcatURI) {
try {
return HiveConfFactory.get(hcatURI, SharedResourcesBrokerFactory.getImplicitBroker());
} catch (IOException nce) {
throw new RuntimeException("Implicit broker is not correctly configured, failed to fetch a HiveConf object", nce);
}
}
public HiveMetaStoreClientFactory(HiveConf hiveConf) {
this.hiveConf = hiveConf;
}
public HiveMetaStoreClientFactory() {
this(Optional.<String> absent());
}
private IMetaStoreClient createMetaStoreClient() throws MetaException {
HiveMetaHookLoader hookLoader = new HiveMetaHookLoader() {
@Override
public HiveMetaHook getHook(Table tbl) throws MetaException {
if (tbl == null) {
return null;
}
try {
HiveStorageHandler storageHandler =
HiveUtils.getStorageHandler(hiveConf, tbl.getParameters().get(META_TABLE_STORAGE));
return storageHandler == null ? null : storageHandler.getMetaHook();
} catch (HiveException e) {
LOG.error(e.toString());
throw new MetaException("Failed to get storage handler: " + e);
}
}
};
return RetryingMetaStoreClient.getProxy(hiveConf, hookLoader, HiveMetaStoreClient.class.getName());
}
@Override
public IMetaStoreClient create() {
try {
return createMetaStoreClient();
} catch (MetaException e) {
throw new RuntimeException("Unable to create " + IMetaStoreClient.class.getSimpleName(), e);
}
}
@Override
public PooledObject<IMetaStoreClient> wrap(IMetaStoreClient client) {
return new DefaultPooledObject<>(client);
}
@Override
public void destroyObject(PooledObject<IMetaStoreClient> client) {
client.getObject().close();
}
}
| 4,596 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import org.apache.gobblin.annotation.Alpha;
/**
* A class containing constants used in {@link HiveTable} and {@link HivePartition}.
*/
@Alpha
public class HiveConstants {
private HiveConstants() {
}
/**
* Table and partition properties
*/
public static final String CREATE_TIME = "create.time";
public static final String LAST_ACCESS_TIME = "last.access.time";
public static final String SCHEMA_TIMESTAMP = "schema.timestamp";
/**
* Table properties
*/
public static final String OWNER = "owner";
public static final String TABLE_TYPE = "table.type";
public static final String RETENTION = "retention";
/**
* Storage properties
*/
public static final String LOCATION = "location";
// A storage parameter that is managed by Spark for Spark Datasource tables
public static final String PATH = "path";
public static final String INPUT_FORMAT = "input.format";
public static final String OUTPUT_FORMAT = "output.format";
public static final String COMPRESSED = "compressed";
public static final String NUM_BUCKETS = "num.buckets";
public static final String BUCKET_COLUMNS = "bucket.columns";
public static final String STORED_AS_SUB_DIRS = "stored.as.sub.dirs";
/**
* SerDe properties
*/
public static final String SERDE_TYPE = "serde.type";
}
| 4,597 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.util.List;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.reflect.TypeToken;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import lombok.Getter;
import lombok.Setter;
import lombok.ToString;
/**
* A class that represents a Hive table.
*
* <p>
* This class is used in {@link org.apache.gobblin.hive.spec.HiveSpec} instead of Hive's
* {@link org.apache.hadoop.hive.metastore.api.Table} class to minimize the dependency on the hive-metastore API
* (since it is unstable and may go through backward incompatible changes). {@link HiveTable} and Hive's
* {@link org.apache.hadoop.hive.metastore.api.Table} can be converted to each other using
* {@link org.apache.gobblin.hive.metastore.HiveMetaStoreUtils}.
* </p>
*
* @author Ziyang Liu
*/
@Getter
@Alpha
@ToString
public class HiveTable extends HiveRegistrationUnit {
@Setter
private List<Column> partitionKeys;
private Optional<String> owner;
private Optional<String> tableType;
private Optional<Long> retention;
private HiveTable(Builder builder) {
super(builder);
this.partitionKeys = ImmutableList.<Column> copyOf(builder.partitionKeys);
}
@SuppressWarnings("serial")
@Override
protected void populateTablePartitionFields(State state) {
super.populateTablePartitionFields(state);
this.owner = populateField(state, HiveConstants.OWNER, new TypeToken<String>() {});
this.tableType = populateField(state, HiveConstants.TABLE_TYPE, new TypeToken<String>() {});
this.retention = populateField(state, HiveConstants.RETENTION, new TypeToken<Long>() {});
}
public void setOwner(String owner) {
this.owner = Optional.of(owner);
}
public void setTableType(String tableType) {
this.tableType = Optional.of(tableType);
}
public void setRetention(long retention) {
this.retention = Optional.of(retention);
}
@Override
protected void updateTablePartitionFields(State state, String key, Object value) {
super.updateTablePartitionFields(state, key, value);
boolean isExistingField = true;
switch (key) {
case HiveConstants.OWNER:
this.owner = Optional.of((String) value);
break;
case HiveConstants.TABLE_TYPE:
this.tableType = Optional.of((String) value);
break;
case HiveConstants.RETENTION:
this.retention = Optional.of((Long) value);
break;
default:
isExistingField = false;
}
if (isExistingField) {
state.removeProp(key);
}
}
public static class Builder extends HiveRegistrationUnit.Builder<Builder> {
private List<Column> partitionKeys = Lists.newArrayList();
public Builder withPartitionKeys(List<Column> partitionKeys) {
this.partitionKeys = partitionKeys;
return this;
}
@Override
public HiveTable build() {
return new HiveTable(this);
}
}
}
| 4,598 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveSerDeManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.avro.HiveAvroSerDeManager;
import org.apache.gobblin.hive.orc.HiveOrcSerDeManager;
/**
* This class manages SerDe properties (including schema properties) for Hive registration.
*
* @author Ziyang Liu
*/
@Alpha
public abstract class HiveSerDeManager {
public static final String HIVE_ROW_FORMAT = "hive.row.format";
protected final State props;
protected HiveSerDeManager(State props) {
this.props = props;
}
/**
* Add the appropriate SerDe properties (including schema properties) to the given {@link HiveRegistrationUnit}.
*
* @param path The {@link Path} from where the schema should be obtained.
* @param hiveUnit The {@link HiveRegistrationUnit} where the serde properties should be added to.
* @throws IOException
*/
public abstract void addSerDeProperties(Path path, HiveRegistrationUnit hiveUnit) throws IOException;
/**
* Add the appropriate SerDe properties (including schema properties) to the target {@link HiveRegistrationUnit}
* using the SerDe properties from the source {@link HiveRegistrationUnit}.
*
* <p>
* A benefit of doing this is to avoid obtaining the schema multiple times when creating a table and a partition
* with the same schema, or creating several tables and partitions with the same schema. After the first
* table/partition is created, one can use the same SerDe properties to create the other tables/partitions.
* </p>
*/
public abstract void addSerDeProperties(HiveRegistrationUnit source, HiveRegistrationUnit target) throws IOException;
/**
* Update the schema in the existing {@link HiveRegistrationUnit} into the schema in the new
* {@link HiveRegistrationUnit}.
*/
public abstract void updateSchema(HiveRegistrationUnit existingUnit, HiveRegistrationUnit newUnit) throws IOException;
/**
* Whether two {@link HiveRegistrationUnit} have the same schema.
*/
public abstract boolean haveSameSchema(HiveRegistrationUnit unit1, HiveRegistrationUnit unit2) throws IOException;
public enum Implementation {
AVRO(HiveAvroSerDeManager.class.getName()),
ORC(HiveOrcSerDeManager.class.getName());
private final String schemaManagerClassName;
private Implementation(String schemaManagerClassName) {
this.schemaManagerClassName = schemaManagerClassName;
}
@Override
public String toString() {
return this.schemaManagerClassName;
}
}
/**
* Get an instance of {@link HiveSerDeManager}.
*
* @param props A {@link State} object. To get a specific implementation of {@link HiveSerDeManager}, specify either
* one of the values in {@link Implementation} (e.g., AVRO or ORC) or the name of a class that implements
* {@link HiveSerDeManager} in property {@link #HIVE_ROW_FORMAT}. The {@link State} object is also used to
* instantiate the {@link HiveSerDeManager}.
*/
public static HiveSerDeManager get(State props) {
String type = props.getProp(HIVE_ROW_FORMAT, Implementation.AVRO.name());
Optional<Implementation> implementation = Enums.getIfPresent(Implementation.class, type.toUpperCase());
try {
if (implementation.isPresent()) {
return (HiveSerDeManager) ConstructorUtils.invokeConstructor(Class.forName(implementation.get().toString()),
props);
}
return (HiveSerDeManager) ConstructorUtils.invokeConstructor(Class.forName(type), props);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(
"Unable to instantiate " + HiveSerDeManager.class.getSimpleName() + " with type " + type, e);
}
}
}
| 4,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.