index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveLockFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import com.google.common.util.concurrent.Striped;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.locks.Lock;
/**
* A lock factory that provide a get method for a HiveLockImpl for a specific object
*/
public class HiveLockFactory {
protected Properties properties;
private final Striped<Lock> locks = Striped.lazyWeakLock(Integer.MAX_VALUE);
public HiveLockFactory(Properties _properties) {
this.properties = _properties;
}
public HiveLockImpl get(String name) {
return new HiveLockImpl<Lock>(locks.get(name)) {
@Override
public void lock() throws IOException {
this.lock.lock();
}
@Override
public void unlock() throws IOException {
this.lock.unlock();
}
};
}
}
| 4,600 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveSerDeWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedSerde;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat;
import org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat;
import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
import org.apache.hadoop.hive.serde2.AbstractSerDe;
import org.apache.hadoop.hive.serde2.SerDe;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.mapred.TextInputFormat;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.Either;
/**
* A wrapper around {@link SerDe} that bundles input format, output format and file extension with a {@link SerDe},
* and provides additional functionalities.
*
* @author Ziyang Liu
*/
@Alpha
@SuppressWarnings("deprecation")
public class HiveSerDeWrapper {
private static final String SERDE_SERIALIZER_PREFIX = "serde.serializer.";
private static final String SERDE_DESERIALIZER_PREFIX = "serde.deserializer.";
public static final String SERDE_SERIALIZER_TYPE = SERDE_SERIALIZER_PREFIX + "type";
public static final String SERDE_SERIALIZER_INPUT_FORMAT_TYPE = SERDE_SERIALIZER_PREFIX + "input.format.type";
public static final String SERDE_SERIALIZER_OUTPUT_FORMAT_TYPE = SERDE_SERIALIZER_PREFIX + "output.format.type";
public static final String SERDE_DESERIALIZER_TYPE = SERDE_DESERIALIZER_PREFIX + "type";
public static final String SERDE_DESERIALIZER_INPUT_FORMAT_TYPE = SERDE_DESERIALIZER_PREFIX + "input.format.type";
public static final String SERDE_DESERIALIZER_OUTPUT_FORMAT_TYPE = SERDE_DESERIALIZER_PREFIX + "output.format.type";
public enum BuiltInHiveSerDe {
AVRO(AvroSerDe.class.getName(), AvroContainerInputFormat.class.getName(),
AvroContainerOutputFormat.class.getName()),
ORC(OrcSerde.class.getName(), OrcInputFormat.class.getName(), OrcOutputFormat.class.getName()),
PARQUET(ParquetHiveSerDe.class.getName(), MapredParquetInputFormat.class.getName(),
MapredParquetOutputFormat.class.getName()),
TEXTFILE(LazySimpleSerDe.class.getName(), TextInputFormat.class.getName(),
HiveIgnoreKeyTextOutputFormat.class.getName());
private final String serDeClassName;
private final String inputFormatClassName;
private final String outputFormatClassName;
private BuiltInHiveSerDe(String serDeClassName, String inputFormatClassName, String outputFormatClassName) {
this.serDeClassName = serDeClassName;
this.inputFormatClassName = inputFormatClassName;
this.outputFormatClassName = outputFormatClassName;
}
@Override
public String toString() {
return this.serDeClassName;
}
}
private Optional<Either<AbstractSerDe, VectorizedSerde>> serDe = Optional.absent();
private final String serDeClassName;
private final String inputFormatClassName;
private final String outputFormatClassName;
private HiveSerDeWrapper(BuiltInHiveSerDe hiveSerDe) {
this(hiveSerDe.serDeClassName, hiveSerDe.inputFormatClassName, hiveSerDe.outputFormatClassName);
}
private HiveSerDeWrapper(String serDeClassName, String inputFormatClassName, String outputFormatClassName) {
this.serDeClassName = serDeClassName;
this.inputFormatClassName = inputFormatClassName;
this.outputFormatClassName = outputFormatClassName;
}
/**
* Get the {@link SerDe} instance associated with this {@link HiveSerDeWrapper}.
* This method performs lazy initialization.
*/
public Object getSerDe() throws IOException {
if (!this.serDe.isPresent()) {
try {
Object serde = Class.forName(this.serDeClassName).newInstance();
if (serde instanceof OrcSerde) {
this.serDe = Optional.of(Either.right(VectorizedSerde.class.cast(serde)));
} else {
this.serDe = Optional.of(Either.left(AbstractSerDe.class.cast(serde)));
}
} catch (Throwable t) {
throw new IOException("Failed to instantiate SerDe " + this.serDeClassName, t);
}
}
return this.serDe.get().get();
}
/**
* Get the input format class name associated with this {@link HiveSerDeWrapper}.
*/
public String getInputFormatClassName() {
return this.inputFormatClassName;
}
/**
* Get the output format class name associated with this {@link HiveSerDeWrapper}.
*/
public String getOutputFormatClassName() {
return this.outputFormatClassName;
}
/**
* Get an instance of {@link HiveSerDeWrapper}.
*
* @param serDeType The SerDe type. This should be one of the available {@link HiveSerDeWrapper.BuiltInHiveSerDe}s.
*/
public static HiveSerDeWrapper get(String serDeType) {
return get(serDeType, Optional.<String> absent(), Optional.<String> absent());
}
/**
* Get an instance of {@link HiveSerDeWrapper}.
*
* @param serDeType The SerDe type. If serDeType is one of the available {@link HiveSerDeWrapper.BuiltInHiveSerDe},
* the other three parameters are not used. Otherwise, serDeType should be the class name of a {@link SerDe},
* and the other three parameters must be present.
*/
public static HiveSerDeWrapper get(String serDeType, Optional<String> inputFormatClassName,
Optional<String> outputFormatClassName) {
Optional<BuiltInHiveSerDe> hiveSerDe = Enums.getIfPresent(BuiltInHiveSerDe.class, serDeType.toUpperCase());
if (hiveSerDe.isPresent()) {
return new HiveSerDeWrapper(hiveSerDe.get());
}
Preconditions.checkArgument(inputFormatClassName.isPresent(),
"Missing input format class name for SerDe " + serDeType);
Preconditions.checkArgument(outputFormatClassName.isPresent(),
"Missing output format class name for SerDe " + serDeType);
return new HiveSerDeWrapper(serDeType, inputFormatClassName.get(), outputFormatClassName.get());
}
/**
* Get an instance of {@link HiveSerDeWrapper} from a {@link State}.
*
* @param state The state should contain property {@link #SERDE_SERIALIZER_TYPE}, and optionally contain properties
* {@link #SERDE_SERIALIZER_INPUT_FORMAT_TYPE}, {@link #SERDE_SERIALIZER_OUTPUT_FORMAT_TYPE} and
*/
public static HiveSerDeWrapper getSerializer(State state) {
Preconditions.checkArgument(state.contains(SERDE_SERIALIZER_TYPE),
"Missing required property " + SERDE_SERIALIZER_TYPE);
return get(state.getProp(SERDE_SERIALIZER_TYPE),
Optional.fromNullable(state.getProp(SERDE_SERIALIZER_INPUT_FORMAT_TYPE)),
Optional.fromNullable(state.getProp(SERDE_SERIALIZER_OUTPUT_FORMAT_TYPE)));
}
/**
* Get an instance of {@link HiveSerDeWrapper} from a {@link State}.
*
* @param state The state should contain property {@link #SERDE_DESERIALIZER_TYPE}, and optionally contain properties
* {@link #SERDE_DESERIALIZER_INPUT_FORMAT_TYPE}, {@link #SERDE_DESERIALIZER_OUTPUT_FORMAT_TYPE} and
*/
public static HiveSerDeWrapper getDeserializer(State state) {
Preconditions.checkArgument(state.contains(SERDE_DESERIALIZER_TYPE),
"Missing required property " + SERDE_DESERIALIZER_TYPE);
return get(state.getProp(SERDE_DESERIALIZER_TYPE),
Optional.fromNullable(state.getProp(SERDE_DESERIALIZER_INPUT_FORMAT_TYPE)),
Optional.fromNullable(state.getProp(SERDE_DESERIALIZER_OUTPUT_FORMAT_TYPE)));
}
}
| 4,601 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegisterUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicy;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.hive.spec.HiveSpec;
/**
* Utility class for registering data into Hive.
*/
public class HiveRegisterUtils {
private HiveRegisterUtils() {
}
/**
* Register the given {@link Path}s.
*
* @param paths The {@link Path}s to be registered.
* @param state A {@link State} which will be used to instantiate a {@link HiveRegister} and a
* {@link HiveRegistrationPolicy} for registering the given The {@link Path}s.
*/
public static void register(Iterable<String> paths, State state) throws IOException {
try (HiveRegister hiveRegister = HiveRegister.get(state)) {
HiveRegistrationPolicy policy = HiveRegistrationPolicyBase.getPolicy(state);
for (String path : paths) {
for (HiveSpec spec : policy.getHiveSpecs(new Path(path))) {
hiveRegister.register(spec);
}
}
}
}
}
| 4,602 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HivePartitionComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import com.google.common.base.Optional;
/**
* An extension to {@link HiveRegistrationUnitComparator} for {@link HivePartition}s.
*
* @author Ziyang Liu
*/
public class HivePartitionComparator<T extends HivePartitionComparator<?>> extends HiveRegistrationUnitComparator<T> {
public HivePartitionComparator(HivePartition existingPartition, HivePartition newPartition) {
super(existingPartition, newPartition);
}
@SuppressWarnings("unchecked")
public T compareValues() {
if (!this.result) {
compare(Optional.of(((HivePartition) this.existingUnit).getValues()),
Optional.of(((HivePartition) this.newUnit).getValues()));
}
return (T) this;
}
@SuppressWarnings("unchecked")
@Override
public T compareAll() {
super.compareAll().compareValues();
return (T) this;
}
}
| 4,603 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegisterStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.util.Arrays;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.ListenableFuture;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.hive.spec.HiveSpec;
/**
* {@link CommitStep} to perform a Hive registration.
*/
@Slf4j
@AllArgsConstructor
public class HiveRegisterStep implements CommitStep {
public HiveRegisterStep(Optional<String> metastoreURI, HiveSpec hiveSpec, HiveRegProps props) {
this(metastoreURI, hiveSpec, props, true);
}
private final Optional<String> metastoreURI;
private final HiveSpec hiveSpec;
private final HiveRegProps props;
private final boolean verifyBeforeRegistering;
@Override
public boolean isCompleted() throws IOException {
// TODO: this is complicated due to preactivities, postactivities, etc. but unnecessary for now because exactly once
// is not enabled.
return false;
}
@Override
public void execute() throws IOException {
if (this.verifyBeforeRegistering) {
if (!this.hiveSpec.getTable().getLocation().isPresent()) {
throw getException("Table does not have a location parameter.");
}
Path tablePath = new Path(this.hiveSpec.getTable().getLocation().get());
FileSystem fs = this.hiveSpec.getPath().getFileSystem(new Configuration());
if (!fs.exists(tablePath)) {
throw getException(String.format("Table location %s does not exist.", tablePath));
}
if (this.hiveSpec.getPartition().isPresent()) {
if (!this.hiveSpec.getPartition().get().getLocation().isPresent()) {
throw getException("Partition does not have a location parameter.");
}
Path partitionPath = new Path(this.hiveSpec.getPartition().get().getLocation().get());
if (!fs.exists(this.hiveSpec.getPath())) {
throw getException(String.format("Partition location %s does not exist.", partitionPath));
}
}
}
try (HiveRegister hiveRegister = HiveRegister.get(this.props, this.metastoreURI)) {
log.info("Registering Hive Spec " + this.hiveSpec);
ListenableFuture<Void> future = hiveRegister.register(this.hiveSpec);
future.get();
} catch (InterruptedException | ExecutionException ie) {
throw new IOException("Hive registration was interrupted.", ie);
}
}
private IOException getException(String message) {
return new IOException(
String.format("Failed to register Hive Spec %s. %s", this.hiveSpec, message)
);
}
@Override
public String toString() {
String table = this.hiveSpec.getTable().getDbName() + "." + this.hiveSpec.getTable().getTableName();
String partitionInfo = this.hiveSpec.getPartition().isPresent()
? " partition " + Arrays.toString(this.hiveSpec.getPartition().get().getValues().toArray()) : "";
String location = this.hiveSpec.getPartition().isPresent() ? this.hiveSpec.getPartition().get().getLocation().get()
: this.hiveSpec.getTable().getLocation().get();
return String.format("Register %s%s with location %s in Hive metastore %s.", table, partitionInfo, location,
this.metastoreURI.isPresent() ? this.metastoreURI.get() : "default");
}
}
| 4,604 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/TableDeregisterStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import lombok.AllArgsConstructor;
import java.io.IOException;
import org.apache.hadoop.hive.metastore.api.Table;
import com.google.common.base.Optional;
import org.apache.gobblin.commit.CommitStep;
/**
* {@link CommitStep} to deregister a Hive table.
*/
@AllArgsConstructor
public class TableDeregisterStep implements CommitStep {
private Table table;
private final Optional<String> metastoreURI;
private final HiveRegProps props;
@Override
public boolean isCompleted() throws IOException {
return false;
}
@Override
public void execute() throws IOException {
try (HiveRegister hiveRegister = HiveRegister.get(this.props, this.metastoreURI)) {
hiveRegister.dropTableIfExists(this.table.getDbName(), this.table.getTableName());
}
}
@Override
public String toString() {
return String.format("Deregister table %s.%s on Hive metastore %s.", this.table.getDbName(),
this.table.getTableName(),
this.metastoreURI.isPresent() ? this.metastoreURI.get() : "default");
}
}
| 4,605 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveConfFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import com.google.common.base.Optional;
import org.apache.gobblin.broker.EmptyKey;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import static org.apache.gobblin.hive.HiveMetaStoreClientFactory.HIVE_METASTORE_TOKEN_SIGNATURE;
/**
* The factory that creates a {@link HiveConf} as shared resource.
* {@link EmptyKey} is fair since {@link HiveConf} seems to be read-only.
*/
public class HiveConfFactory<S extends ScopeType<S>> implements SharedResourceFactory<HiveConf, SharedHiveConfKey, S> {
static final String FACTORY_NAME = "hiveConfFactory";
@Override
public String getName() {
return FACTORY_NAME;
}
@Override
public SharedResourceFactoryResponse<HiveConf> createResource(SharedResourcesBroker<S> broker,
ScopedConfigView<S, SharedHiveConfKey> config)
throws NotConfiguredException {
SharedHiveConfKey sharedHiveConfKey = config.getKey();
HiveConf rawConf = new HiveConf();
if (!sharedHiveConfKey.hiveConfUri.equals(SharedHiveConfKey.INSTANCE.toConfigurationKey()) && StringUtils
.isNotEmpty(sharedHiveConfKey.hiveConfUri)) {
rawConf.setVar(HiveConf.ConfVars.METASTOREURIS, sharedHiveConfKey.hiveConfUri);
rawConf.set(HIVE_METASTORE_TOKEN_SIGNATURE, sharedHiveConfKey.hiveConfUri);
}
return new ResourceInstance<>(rawConf);
}
/**
*
* @param hcatURI User specified hcatURI.
* @param broker A shared resource broker
* @return a {@link HiveConf} with specified hcatURI if any.
* @throws IOException
*/
public static <S extends ScopeType<S>> HiveConf get(Optional<String> hcatURI, SharedResourcesBroker<S> broker)
throws IOException {
try {
SharedHiveConfKey confKey =
hcatURI.isPresent() && StringUtils.isNotBlank(hcatURI.get()) ? new SharedHiveConfKey(hcatURI.get())
: SharedHiveConfKey.INSTANCE;
return broker.getSharedResource(new HiveConfFactory<>(), confKey);
} catch (NotConfiguredException nce) {
throw new IOException(nce);
}
}
@Override
public S getAutoScope(SharedResourcesBroker<S> broker, ConfigView<S, SharedHiveConfKey> config) {
return broker.selfScope().getType().rootScope();
}
}
| 4,606 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveLockImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
/**
* A wrapper lock to be used by hive.
* @param <T> The class of the real lock
*/
public abstract class HiveLockImpl<T> {
protected T lock;
public HiveLockImpl(T _lock){
this.lock = _lock;
}
public abstract void lock() throws IOException;
public abstract void unlock() throws IOException;
}
| 4,607 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/SharedHiveConfKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import lombok.EqualsAndHashCode;
import org.apache.gobblin.broker.iface.SharedResourceKey;
/**
* {@link SharedResourceKey} for {@link org.apache.gobblin.hive.HiveConfFactory}. Contains an identifier for
* a cluster's Hive Metastore URI.
*/
@EqualsAndHashCode
public class SharedHiveConfKey implements SharedResourceKey {
public final String hiveConfUri;
/**
* A singleton instance used with empty hcatURI.
* */
public static final SharedHiveConfKey INSTANCE = new SharedHiveConfKey("");
public SharedHiveConfKey(String hiveConfUri) {
this.hiveConfUri = hiveConfUri;
}
@Override
public String toConfigurationKey() {
return this.hiveConfUri;
}
}
| 4,608 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegProps.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import java.util.List;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister;
/**
* An extension to {@link State} for Hive registration.
*
* @author Ziyang Liu
*/
@Alpha
@Getter
@EqualsAndHashCode(callSuper = true)
public class HiveRegProps extends State {
public static final String HIVE_DB_ROOT_DIR = "hive.db.root.dir";
public static final String HIVE_REGISTER_THREADS = "hive.register.threads";
public static final String HIVE_MAX_WAIT_MILLS_BORROW_CLIENT = "hive.register.maxWaitMills.borrow.client";
public static final long DEFAULT_HIVE_MAX_WAIT_MILLS_BORROW_CLIENT = -1L;
public static final int DEFAULT_HIVE_REGISTER_THREADS = 20;
public static final String HIVE_TABLE_PARTITION_PROPS = "hive.table.partition.props";
public static final String HIVE_STORAGE_PROPS = "hive.storage.props";
public static final String HIVE_SERDE_PROPS = "hive.serde.props";
public static final String HIVE_UPSTREAM_DATA_ATTR_NAMES= "hive.upstream.data.attr.names";
private static final Splitter SPLITTER = Splitter.on(':').trimResults().omitEmptyStrings();
private final State tablePartitionProps;
private final State storageProps;
private final State serdeProps;
private Optional<String> runtimeTableProps;
/**
* @param props A {@link State} object that includes both properties required by {@link HiveMetaStoreBasedRegister} to do
* Hive registration, as well as the Hive properties that will be added to the Hive table when creating the table,
* e.g., orc.compress=SNAPPY
*
* <p>
* The Hive table properties should be a comma-separated list associated with {@link #HIVE_TABLE_PARTITION_PROPS} in the
* given {@link State}.
* </p>
*/
public HiveRegProps(State props) {
super(props);
this.tablePartitionProps = createHiveProps(HIVE_TABLE_PARTITION_PROPS);
if (props.contains(HiveMetaStoreUtils.RUNTIME_PROPS)) {
runtimeTableProps = Optional.of(props.getProp(HiveMetaStoreUtils.RUNTIME_PROPS));
}
else{
runtimeTableProps = Optional.absent();
}
this.storageProps = createHiveProps(HIVE_STORAGE_PROPS);
this.serdeProps = createHiveProps(HIVE_SERDE_PROPS);
}
/**
* @param props Properties required by {@link HiveMetaStoreBasedRegister} to do Hive registration
* @param tableProps Hive properties that will be added to the Hive table when creating the table,
* e.g., orc.compress=SNAPPY
*/
public HiveRegProps(State props, State tableProps, State storageProps, State serdeProps) {
super(props);
this.tablePartitionProps = tableProps;
if (props.contains(HiveMetaStoreUtils.RUNTIME_PROPS)) {
runtimeTableProps = Optional.of(props.getProp(HiveMetaStoreUtils.RUNTIME_PROPS));
}
else{
runtimeTableProps = Optional.absent();
}
this.storageProps = storageProps;
this.serdeProps = serdeProps;
}
/**
* Create a {@link State} object that contains Hive table properties. These properties are obtained from
* {@link #HIVE_TABLE_PARTITION_PROPS}, which is a list of comma-separated properties. Each property is in the form
* of '[key]=[value]'.
*/
private State createHiveProps(String propKey) {
State state = new State();
if (!contains(propKey)) {
return state;
}
for (String propValue : getPropAsList(propKey)) {
List<String> tokens = SPLITTER.splitToList(propValue);
Preconditions.checkState(tokens.size() == 2, propValue + " is not a valid Hive table/partition property");
state.setProp(tokens.get(0), tokens.get(1));
}
return state;
}
/**
* Get Hive database root dir from {@link #HIVE_DB_ROOT_DIR}.
*
* @return {@link Optional#absent()} if {@link #HIVE_DB_ROOT_DIR} is not specified.
*/
public Optional<String> getDbRootDir() {
return Optional.fromNullable(getProp(HIVE_DB_ROOT_DIR));
}
/**
* Get the name of registered HiveTable's upstream data attributes.
* E.g., When data consumed from Kafka is registered into Hive Table, it is expected
* to have Hive Metadata indicating the Kafka topic.
*
* HIVE_UPSTREAM_DATA_ATTR_NAMES is comma separated string, each item representing a upstream data attr.
* E.g. hive.upstream.data.attr.names=topic.name,some.else
*
* @return {@link Optional#absent()} if {@link #HIVE_UPSTREAM_DATA_ATTR_NAMES} is not specified.
*/
public Optional<String> getUpstreamDataAttrName(){
return Optional.fromNullable(getProp(HIVE_UPSTREAM_DATA_ATTR_NAMES));
}
/**
* Get number of threads from {@link #HIVE_REGISTER_THREADS}, with a default value of
* {@link #DEFAULT_HIVE_REGISTER_THREADS}.
*/
public int getNumThreads() {
return getPropAsInt(HIVE_REGISTER_THREADS, DEFAULT_HIVE_REGISTER_THREADS);
}
/**
* Get max wait mills when borrow a hive client from pool from {@link #HIVE_MAX_WAIT_MILLS_BORROW_CLIENT}, with a default value of
* {@link #DEFAULT_HIVE_MAX_WAIT_MILLS_BORROW_CLIENT}.
*/
public long getMaxWaitMillisBorrowingClient() {
return getPropAsLong(HIVE_MAX_WAIT_MILLS_BORROW_CLIENT, DEFAULT_HIVE_MAX_WAIT_MILLS_BORROW_CLIENT);
}
}
| 4,609 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/PartitionDeregisterStep.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.util.Arrays;
import lombok.AllArgsConstructor;
import java.io.IOException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import com.google.common.base.Optional;
import org.apache.gobblin.commit.CommitStep;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
/**
* {@link CommitStep} to deregister a Hive partition.
*/
@AllArgsConstructor
public class PartitionDeregisterStep implements CommitStep {
private Table table;
private Partition partition;
private final Optional<String> metastoreURI;
private final HiveRegProps props;
@Override
public boolean isCompleted() throws IOException {
return false;
}
@Override
public void execute() throws IOException {
HiveTable hiveTable = HiveMetaStoreUtils.getHiveTable(this.table);
try (HiveRegister hiveRegister = HiveRegister.get(this.props, this.metastoreURI)) {
hiveRegister.dropPartitionIfExists(this.partition.getDbName(), this.partition.getTableName(),
hiveTable.getPartitionKeys(), this.partition.getValues());
}
}
@Override
public String toString() {
return String.format("Deregister partition %s.%s %s on Hive metastore %s.", this.partition.getDbName(),
this.partition.getTableName(), Arrays.toString(this.partition.getValues().toArray()),
this.metastoreURI.isPresent() ? this.metastoreURI.get() : "default");
}
}
| 4,610 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegister.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.Closeable;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveRegistrationUnit.Column;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.spec.HiveSpecWithPostActivities;
import org.apache.gobblin.hive.spec.HiveSpecWithPreActivities;
import org.apache.gobblin.hive.spec.HiveSpecWithPredicates;
import org.apache.gobblin.hive.spec.activity.Activity;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
/**
* A class for registering Hive tables and partitions.
*
* @author Ziyang Liu
*/
@Slf4j
@Alpha
public abstract class HiveRegister implements Closeable {
public static final String HIVE_REGISTER_TYPE = "hive.register.type";
public static final String DEFAULT_HIVE_REGISTER_TYPE =
"org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister";
public static final String HIVE_TABLE_COMPARATOR_TYPE = "hive.table.comparator.type";
public static final String DEFAULT_HIVE_TABLE_COMPARATOR_TYPE = HiveTableComparator.class.getName();
public static final String HIVE_PARTITION_COMPARATOR_TYPE = "hive.partition.comparator.type";
public static final String DEFAULT_HIVE_PARTITION_COMPARATOR_TYPE = HivePartitionComparator.class.getName();
public static final String HIVE_METASTORE_URI_KEY = "hive.metastore.uri";
public static final String HIVE_REGISTER_CLOSE_TIMEOUT_SECONDS_KEY = "hiveRegister.close.timeout.seconds";
protected static final String HIVE_DB_EXTENSION = ".db";
@Getter
protected final HiveRegProps props;
protected final Optional<String> hiveDbRootDir;
protected final ListeningExecutorService executor;
protected final Map<String, Future<Void>> futures = Maps.newConcurrentMap();
protected final long timeOutSeconds;
protected HiveRegister(State state) {
this.props = new HiveRegProps(state);
this.hiveDbRootDir = this.props.getDbRootDir();
this.executor = ExecutorsUtils.loggingDecorator(ScalingThreadPoolExecutor
.newScalingThreadPool(0, this.props.getNumThreads(), TimeUnit.SECONDS.toMillis(10),
ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of(getClass().getSimpleName()))));
this.timeOutSeconds = this.props.getPropAsLong(HIVE_REGISTER_CLOSE_TIMEOUT_SECONDS_KEY, -1L);
}
/**
* Register a table or partition given a {@link HiveSpec}. This method is asynchronous and returns immediately.
* This methods evaluates the {@link Predicate}s and executes the {@link Activity}s specified in the
* {@link HiveSpec}. The actual registration happens in {@link #registerPath(HiveSpec)}, which subclasses
* should implement.
*
* @return a {@link ListenableFuture} for the process of registering the given {@link HiveSpec}.
*/
public ListenableFuture<Void> register(final HiveSpec spec) {
ListenableFuture<Void> future = this.executor.submit(new Callable<Void>() {
@Override
public Void call()
throws Exception {
try {
if (spec instanceof HiveSpecWithPredicates && !evaluatePredicates((HiveSpecWithPredicates) spec)) {
log.info("Skipping " + spec + " since predicates return false");
return null;
}
if (spec instanceof HiveSpecWithPreActivities) {
for (Activity activity : ((HiveSpecWithPreActivities) spec).getPreActivities()) {
activity.execute(HiveRegister.this);
}
}
registerPath(spec);
if (spec instanceof HiveSpecWithPostActivities) {
for (Activity activity : ((HiveSpecWithPostActivities) spec).getPostActivities()) {
activity.execute(HiveRegister.this);
}
}
return null;
} catch (Exception e) {
log.error("Exception during hive registration", e);
throw e;
}
}
});
this.futures.put(getSpecId(spec), future);
return future;
}
private String getSpecId(HiveSpec spec) {
Optional<HivePartition> partition = spec.getPartition();
if (partition.isPresent()) {
return String.format("%s.%s@%s", spec.getTable().getDbName(), spec.getTable().getTableName(),
Arrays.toString(partition.get().getValues().toArray()));
} else {
return String.format("%s.%s", spec.getTable().getDbName(), spec.getTable().getTableName());
}
}
private boolean evaluatePredicates(HiveSpecWithPredicates spec) {
for (Predicate<HiveRegister> pred : spec.getPredicates()) {
if (!pred.apply(this)) {
return false;
}
}
return true;
}
/**
* Register the path specified in the given {@link HiveSpec}.
*
* <p>
* This method should not evaluate {@link Predicate}s or execute {@link Activity}s associated with
* the {@link HiveSpec}, since these are done in {@link #register(HiveSpec)}.
* </p>
*/
protected abstract void registerPath(HiveSpec spec)
throws IOException;
/**
* Create a Hive database if not exists.
*
* @param dbName the name of the database to be created.
* @return true if the db is successfully created; false if the db already exists.
* @throws IOException
*/
public abstract boolean createDbIfNotExists(String dbName)
throws IOException;
/**
* Create a Hive table if not exists.
*
* @param table a {@link HiveTable} to be created.
* @return true if the table is successfully created; false if the table already exists.
* @throws IOException
*/
public abstract boolean createTableIfNotExists(HiveTable table)
throws IOException;
/**
* Add a Hive partition to a table if not exists.
*
* @param table the {@link HiveTable} to which the partition should be added.
* @param partition a {@link HivePartition} to be added.
* @return true if the partition is successfully added; false if the partition already exists.
* @throws IOException
*/
public abstract boolean addPartitionIfNotExists(HiveTable table, HivePartition partition)
throws IOException;
/**
* Determines whether a Hive table exists.
*
* @param dbName the database name
* @param tableName the table name
* @return true if the table exists, false otherwise.
* @throws IOException
*/
public abstract boolean existsTable(String dbName, String tableName)
throws IOException;
/**
* Determines whether a Hive partition exists.
*
* @param dbName the database name
* @param tableName the table name
* @param partitionKeys a list of {@link Columns} representing the key of the partition
* @param partitionValues a list of Strings representing the value of the partition
* @return true if the partition exists, false otherwise.
* @throws IOException
*/
public abstract boolean existsPartition(String dbName, String tableName, List<Column> partitionKeys,
List<String> partitionValues)
throws IOException;
/**
* Drop a table if exists.
*
* @param dbName the database name
* @param tableName the table name
* @throws IOException
*/
public abstract void dropTableIfExists(String dbName, String tableName)
throws IOException;
/**
* Drop a partition if exists.
*
* @param dbName the database name
* @param tableName the table name
* @param partitionKeys a list of {@link Columns} representing the key of the partition
* @param partitionValues a list of Strings representing the value of the partition
* @throws IOException
*/
public abstract void dropPartitionIfExists(String dbName, String tableName, List<Column> partitionKeys,
List<String> partitionValues)
throws IOException;
/**
* Get a {@link HiveTable} using the given db name and table name.
*
* @param dbName the database name
* @param tableName the table name
* @return an {@link Optional} of {@link HiveTable} if the table exists, otherwise {@link Optional#absent()}.
* @throws IOException
*/
public abstract Optional<HiveTable> getTable(String dbName, String tableName)
throws IOException;
/**
* Get a {@link HivePartition} using the given db name, table name, partition keys and partition values.
*
* @param dbName the database name
* @param tableName the table name
* @param partitionKeys a list of {@link Columns} representing the key of the partition
* @param partitionValues a list of Strings representing the value of the partition
* @return an {@link Optional} of {@link HivePartition} if the partition exists, otherwise {@link Optional#absent()}.
* @throws IOException
*/
public abstract Optional<HivePartition> getPartition(String dbName, String tableName, List<Column> partitionKeys,
List<String> partitionValues)
throws IOException;
/**
* Alter the given {@link HiveTable}. An Exception should be thrown if the table does not exist.
*
* @param table a {@link HiveTable} to which the existing table should be updated.
* @throws IOException
*/
public abstract void alterTable(HiveTable table)
throws IOException;
/**
* Alter the given {@link HivePartition}. An Exception should be thrown if the partition does not exist.
*
* @param table the {@link HiveTable} to which the partition belongs.
* @param partition a {@link HivePartition} to which the existing partition should be updated.
* @throws IOException
*/
public abstract void alterPartition(HiveTable table, HivePartition partition)
throws IOException;
/**
* Create a table if not exists, or alter a table if exists.
*
* @param table a {@link HiveTable} to be created or altered
* @throws IOException
*/
public void createOrAlterTable(HiveTable table)
throws IOException {
if (!createTableIfNotExists(table)) {
alterTable(table);
}
}
/**
* Add a partition to a table if not exists, or alter a partition if exists.
*
* @param table the {@link HiveTable} to which the partition belongs.
* @param partition a {@link HivePartition} to which the existing partition should be updated.
* @throws IOException
*/
public void addOrAlterPartition(HiveTable table, HivePartition partition)
throws IOException {
if (!addPartitionIfNotExists(table, partition)) {
alterPartition(table, partition);
}
}
protected HiveRegistrationUnitComparator<?> getTableComparator(HiveTable existingTable, HiveTable newTable) {
try {
Class<?> clazz =
Class.forName(this.props.getProp(HIVE_TABLE_COMPARATOR_TYPE, DEFAULT_HIVE_TABLE_COMPARATOR_TYPE));
return (HiveRegistrationUnitComparator<?>) ConstructorUtils.invokeConstructor(clazz, existingTable, newTable);
} catch (ReflectiveOperationException e) {
log.error("Unable to instantiate Hive table comparator", e);
throw Throwables.propagate(e);
}
}
public boolean needToUpdateTable(HiveTable existingTable, HiveTable newTable) {
return getTableComparator(existingTable, newTable).compareAll().result();
}
protected HiveRegistrationUnitComparator<?> getPartitionComparator(HivePartition existingPartition,
HivePartition newPartition) {
try {
Class<?> clazz =
Class.forName(this.props.getProp(HIVE_PARTITION_COMPARATOR_TYPE, DEFAULT_HIVE_PARTITION_COMPARATOR_TYPE));
return (HiveRegistrationUnitComparator<?>) ConstructorUtils
.invokeConstructor(clazz, existingPartition, newPartition);
} catch (ReflectiveOperationException e) {
log.error("Unable to instantiate Hive partition comparator", e);
throw Throwables.propagate(e);
}
}
public boolean needToUpdatePartition(HivePartition existingPartition, HivePartition newPartition) {
return getPartitionComparator(existingPartition, newPartition).compareAll().result();
}
/**
* Wait till all registration requested submitted via {@link #register(HiveSpec)} to finish.
*
* @throws IOException if any registration failed or was interrupted.
*/
@Override
public void close()
throws IOException {
try {
waitOnFuturesToFinish();
} finally {
ExecutorsUtils.shutdownExecutorService(this.executor, Optional.of(log));
}
}
public void waitOnFuturesToFinish()
throws IOException {
for (Map.Entry<String, Future<Void>> entry : this.futures.entrySet()) {
try {
if (timeOutSeconds > 0L) {
entry.getValue().get(timeOutSeconds, TimeUnit.SECONDS);
} else {
entry.getValue().get();
}
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Failed to finish registration for " + entry.getKey(), e.getCause());
}
}
}
/**
* Get an instance of {@link HiveRegister}.
*
* @param props A {@link State} object. To get a specific implementation of {@link HiveRegister},
* specify property {@link #HIVE_REGISTER_TYPE} as the class name. Otherwise, {@link #DEFAULT_HIVE_REGISTER_TYPE}
* will be returned. This {@link State} object is also used to instantiate the {@link HiveRegister} object.
*/
public static HiveRegister get(State props) {
Optional<String> metastoreUri = Optional.fromNullable(props.getProperties().getProperty(HIVE_METASTORE_URI_KEY));
return get(props, metastoreUri);
}
/**
* Get an instance of {@link HiveRegister}.
*
* @param props A {@link State} object. To get a specific implementation of {@link HiveRegister},
* specify property {@link #HIVE_REGISTER_TYPE} as the class name. Otherwise, {@link #DEFAULT_HIVE_REGISTER_TYPE}
* will be returned. This {@link State} object is also used to instantiate the {@link HiveRegister} object.
*/
public static HiveRegister get(State props, Optional<String> metastoreURI) {
return get(props.getProp(HIVE_REGISTER_TYPE, DEFAULT_HIVE_REGISTER_TYPE), props, metastoreURI);
}
/**
* Get an instance of {@link HiveRegister}.
*
* @param hiveRegisterType The name of a class that implements {@link HiveRegister}.
* @param props A {@link State} object used to instantiate the {@link HiveRegister} object.
*/
public static HiveRegister get(String hiveRegisterType, State props, Optional<String> metastoreURI) {
try {
return (HiveRegister) ConstructorUtils.invokeConstructor(Class.forName(hiveRegisterType), props, metastoreURI);
} catch (ReflectiveOperationException e) {
throw Throwables.propagate(e);
}
}
}
| 4,611 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegistrationUnit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.reflect.TypeToken;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.ToString;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
/**
* A class that represents a Hive table or partition.
*
* @author Ziyang Liu
*/
@Getter
@Alpha
@ToString
public class HiveRegistrationUnit {
protected final String dbName;
protected final String tableName;
protected final boolean registerSchema;
protected final List<Column> columns = Lists.newArrayList();
protected final State props = new State();
protected final State storageProps = new State();
protected final State serDeProps = new State();
protected final Optional<HiveSerDeManager> serDeManager;
/**
* Table or Partition properties
*/
protected Optional<Long> createTime;
protected Optional<Long> lastAccessTime;
/**
* Storage properties
*/
protected Optional<String> location;
protected Optional<String> inputFormat;
protected Optional<String> outputFormat;
protected Optional<Boolean> isCompressed;
protected Optional<Integer> numBuckets;
protected Optional<List<String>> bucketColumns;
protected Optional<Boolean> isStoredAsSubDirs;
/**
* SerDe properties
*/
protected Optional<String> serDeType;
HiveRegistrationUnit(Builder<?> builder) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(builder.dbName));
Preconditions.checkArgument(!Strings.isNullOrEmpty(builder.tableName));
this.dbName = builder.dbName;
this.tableName = builder.tableName;
this.registerSchema = builder.registerSchema;
this.columns.addAll(builder.columns);
this.props.addAll(builder.props);
this.storageProps.addAll(builder.storageProps);
this.serDeProps.addAll(builder.serDeProps);
this.serDeManager = builder.serDeManager;
populateTablePartitionFields(this.props);
populateStorageFields(this.storageProps);
populateSerDeFields(this.serDeProps);
}
@SuppressWarnings("serial")
protected void populateTablePartitionFields(State state) {
this.createTime = populateField(state, HiveConstants.CREATE_TIME, new TypeToken<Long>() {});
this.lastAccessTime = populateField(state, HiveConstants.LAST_ACCESS_TIME, new TypeToken<Long>() {});
}
@SuppressWarnings({ "serial" })
protected void populateStorageFields(State state) {
this.location = populateField(state, HiveConstants.LOCATION, new TypeToken<String>() {});
this.inputFormat = populateField(state, HiveConstants.INPUT_FORMAT, new TypeToken<String>() {});
this.outputFormat = populateField(state, HiveConstants.OUTPUT_FORMAT, new TypeToken<String>() {});
this.isCompressed = populateField(state, HiveConstants.COMPRESSED, new TypeToken<Boolean>() {});
this.numBuckets = populateField(state, HiveConstants.NUM_BUCKETS, new TypeToken<Integer>() {});
this.bucketColumns = populateField(state, HiveConstants.BUCKET_COLUMNS, new TypeToken<List<String>>() {});
this.isStoredAsSubDirs = populateField(state, HiveConstants.STORED_AS_SUB_DIRS, new TypeToken<Boolean>() {});
}
@SuppressWarnings("serial")
protected void populateSerDeFields(State state) {
this.serDeType = populateField(state, HiveConstants.SERDE_TYPE, new TypeToken<String>() {});
}
@SuppressWarnings({ "serial", "unchecked" })
protected static <T> Optional<T> populateField(State state, String key, TypeToken<T> token) {
if (state.contains(key)) {
Optional<T> fieldValue;
if (new TypeToken<Boolean>(){}.isSupertypeOf(token)) {
fieldValue = (Optional<T>) Optional.of(state.getPropAsBoolean(key));
} else if (new TypeToken<Integer>(){}.isSupertypeOf(token)) {
fieldValue = (Optional<T>) Optional.of(state.getPropAsInt(key));
} else if (new TypeToken<Long>(){}.isSupertypeOf(token)) {
fieldValue = (Optional<T>) Optional.of(state.getPropAsLong(key));
} else if (new TypeToken<List<String>>(){}.isSupertypeOf(token)) {
fieldValue = (Optional<T>) Optional.of(state.getPropAsList(key));
} else {
fieldValue = (Optional<T>) Optional.of(state.getProp(key));
}
state.removeProp(key);
return fieldValue;
}
return Optional.<T> absent();
}
/**
* Set the columns for a table or partition.
*
* <p>
* Columns does not need to be set for a table if the table's serde already provides the schema,
* such as Avro tables. Columns does not need to be set for a partition if they are the same as
* the table's columns.
* </p>
* @param columns
*/
public void setColumns(List<Column> columns) {
this.columns.clear();
this.columns.addAll(columns);
}
/**
* Set a table/partition parameter.
*
* <p>
* When using {@link org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister}, since it internally use
* {@link org.apache.hadoop.hive.metastore.api.Table} and {@link org.apache.hadoop.hive.metastore.api.Partition}
* which distinguishes between table/partition parameters, storage descriptor parameters, and serde parameters,
* one may need to distinguish them when constructing a {@link HiveRegistrationUnit} by using
* {@link #setProp(String, Object)}, {@link #setStorageProp(String, Object)} and
* {@link #setSerDeProp(String, Object)}. When using query-based Hive registration, they do not need to be
* distinguished since all parameters will be passed via TBLPROPERTIES.
* </p>
*/
public void setProp(String key, Object value) {
this.props.setProp(key, value);
updateTablePartitionFields(this.props, key, value);
}
/**
* Set a storage parameter for a table/partition.
*
* <p>
* When using {@link org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister}, since it internally use
* {@link org.apache.hadoop.hive.metastore.api.Table} and {@link org.apache.hadoop.hive.metastore.api.Partition}
* which distinguishes between table/partition parameters, storage descriptor parameters, and serde parameters,
* one may need to distinguish them when constructing a {@link HiveRegistrationUnit} by using
* {@link #setProp(String, Object)}, {@link #setStorageProp(String, Object)} and
* {@link #setSerDeProp(String, Object)}. When using query-based Hive registration, they do not need to be
* distinguished since all parameters will be passed via TBLPROPERTIES.
* </p>
*/
public void setStorageProp(String key, Object value) {
this.storageProps.setProp(key, value);
updateStorageFields(this.storageProps, key, value);
}
/**
* Set a serde parameter for a table/partition.
*
* <p>
* When using {@link org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister}, since it internally use
* {@link org.apache.hadoop.hive.metastore.api.Table} and {@link org.apache.hadoop.hive.metastore.api.Partition}
* which distinguishes between table/partition parameters, storage descriptor parameters, and serde parameters,
* one may need to distinguish them when constructing a {@link HiveRegistrationUnit} by using
* {@link #setProp(String, Object)}, {@link #setStorageProp(String, Object)} and
* {@link #setSerDeProp(String, Object)}. When using query-based Hive registration, they do not need to be
* distinguished since all parameters will be passed via TBLPROPERTIES.
* </p>
*/
public void setSerDeProp(String key, Object value) {
this.serDeProps.setProp(key, value);
updateSerDeFields(this.serDeProps, key, value);
}
/**
* Set table/partition parameters.
*
* <p>
* When using {@link org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister}, since it internally use
* {@link org.apache.hadoop.hive.metastore.api.Table} and {@link org.apache.hadoop.hive.metastore.api.Partition}
* which distinguishes between table/partition parameters, storage descriptor parameters, and serde parameters,
* one may need to distinguish them when constructing a {@link HiveRegistrationUnit} by using
* {@link #setProps(State)}, {@link #setStorageProps(State)} and
* {@link #setSerDeProps(State)}. When using query-based Hive registration, they do not need to be
* distinguished since all parameters will be passed via TBLPROPERTIES.
* </p>
*/
public void setProps(State props) {
for (String propKey : props.getPropertyNames()) {
setProp(propKey, props.getProp(propKey));
}
}
/**
* Set storage parameters for a table/partition.
*
* <p>
* When using {@link org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister}, since it internally use
* {@link org.apache.hadoop.hive.metastore.api.Table} and {@link org.apache.hadoop.hive.metastore.api.Partition}
* which distinguishes between table/partition parameters, storage descriptor parameters, and serde parameters,
* one may need to distinguish them when constructing a {@link HiveRegistrationUnit} by using
* {@link #setProps(State)}, {@link #setStorageProps(State)} and
* {@link #setSerDeProps(State)}. When using query-based Hive registration, they do not need to be
* distinguished since all parameters will be passed via TBLPROPERTIES.
* </p>
*/
public void setStorageProps(State storageProps) {
for (String propKey : storageProps.getPropertyNames()) {
setStorageProp(propKey, storageProps.getProp(propKey));
}
}
/**
* Set serde parameters for a table/partition.
*
* <p>
* When using {@link org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister}, since it internally use
* {@link org.apache.hadoop.hive.metastore.api.Table} and {@link org.apache.hadoop.hive.metastore.api.Partition}
* which distinguishes between table/partition parameters, storage descriptor parameters, and serde parameters,
* one may need to distinguish them when constructing a {@link HiveRegistrationUnit} by using
* {@link #setProps(State)}, {@link #setStorageProps(State)} and
* {@link #setSerDeProps(State)}. When using query-based Hive registration, they do not need to be
* distinguished since all parameters will be passed via TBLPROPERTIES.
* </p>
*/
public void setSerDeProps(State serdeProps) {
for (String propKey : serdeProps.getPropertyNames()) {
setSerDeProp(propKey, serdeProps.getProp(propKey));
}
}
protected void updateTablePartitionFields(State state, String key, Object value) {
boolean isExistingField = true;
switch (key) {
case HiveConstants.CREATE_TIME:
this.createTime = Optional.of((Long) value);
break;
case HiveConstants.LAST_ACCESS_TIME:
this.createTime = Optional.of((Long) value);
break;
default:
isExistingField = false;
}
if (isExistingField) {
state.removeProp(key);
}
}
protected void updateStorageFields(State state, String key, Object value) {
boolean isExistingField = true;
switch (key) {
case HiveConstants.LOCATION:
this.location = Optional.of((String) value);
break;
case HiveConstants.INPUT_FORMAT:
this.inputFormat = Optional.of((String) value);
break;
case HiveConstants.OUTPUT_FORMAT:
this.outputFormat = Optional.of((String) value);
break;
case HiveConstants.COMPRESSED:
this.isCompressed = Optional.of((Boolean) value);
break;
case HiveConstants.NUM_BUCKETS:
this.numBuckets = Optional.of((Integer) value);
break;
case HiveConstants.BUCKET_COLUMNS:
this.bucketColumns = Optional.of(Splitter.on(',').omitEmptyStrings().trimResults().splitToList((String) value));
break;
case HiveConstants.STORED_AS_SUB_DIRS:
this.isStoredAsSubDirs = Optional.of((Boolean) value);
break;
default:
isExistingField = false;
}
if (isExistingField) {
state.removeProp(key);
}
}
protected void updateSerDeFields(State state, String key, Object value) {
boolean isExistingField = true;
switch (key) {
case HiveConstants.SERDE_TYPE:
this.serDeType = Optional.of((String) value);
break;
default:
isExistingField = false;
}
if (isExistingField) {
state.removeProp(key);
}
}
/**
* Set serde properties for a table/partition using the table/partition's {@link HiveSerDeManager}.
*
* <p>
* Requires that the {@link HiveSerDeManager} of the table/partition must be specified in
* {@link Builder#withSerdeManaager(HiveSerDeManager)}, and the table/partition's location must be specified
* either in {@link #setLocation(String)} or via {@link HiveConstants#LOCATION}.
* </p>
*/
public void setSerDeProps(Path path) throws IOException {
this.serDeManager.get().addSerDeProperties(path, this);
}
/**
* Set serde properties for a table/partition using another table/partition's serde properties.
*
* <p>
* A benefit of doing this is to avoid obtaining the schema multiple times when creating a table and a partition
* with the same schema, or creating several tables and partitions with the same schema. After the first
* table/partition is created, one can use the same SerDe properties to create the other tables/partitions.
* </p>
*/
public void setSerDeProps(HiveRegistrationUnit other) throws IOException {
this.serDeManager.get().addSerDeProperties(other, this);
}
public void setCreateTime(long createTime) {
this.createTime = Optional.of(createTime);
}
public void setLastAccessTime(long lastAccessTime) {
this.lastAccessTime = Optional.of(lastAccessTime);
}
public void setLocation(String location) {
this.location = Optional.of(location);
}
public void setInputFormat(String inputFormat) {
this.inputFormat = Optional.of(inputFormat);
}
public void setOutputFormat(String outputFormat) {
this.outputFormat = Optional.of(outputFormat);
}
public void setCompressed(boolean isCompressed) {
this.isCompressed = Optional.of(isCompressed);
}
public void setNumBuckets(int numBuckets) {
this.numBuckets = Optional.of(numBuckets);
}
public void setBucketColumns(List<String> bucketColumns) {
this.bucketColumns = Optional.<List<String>> of(ImmutableList.<String> copyOf(bucketColumns));
}
public void setStoredAsSubDirs(boolean isStoredAsSubDirs) {
this.isStoredAsSubDirs = Optional.of(isStoredAsSubDirs);
}
public void setSerDeType(String serDeType) {
this.serDeType = Optional.of(serDeType);
}
static abstract class Builder<T extends Builder<?>> {
private String dbName;
private String tableName;
private boolean registerSchema = true;
private List<Column> columns = Lists.newArrayList();
private State props = new State();
private State storageProps = new State();
private State serDeProps = new State();
private Optional<HiveSerDeManager> serDeManager = Optional.absent();
@SuppressWarnings("unchecked")
public T withDbName(String dbName) {
this.dbName = dbName;
return (T) this;
}
@SuppressWarnings("unchecked")
public T withTableName(String tableName) {
this.tableName = tableName;
return (T) this;
}
public T withRegisterSchema(boolean registerSchema) {
this.registerSchema = registerSchema;
return (T) this;
}
@SuppressWarnings("unchecked")
public T withColumns(List<Column> columns) {
this.columns = columns;
return (T) this;
}
@SuppressWarnings("unchecked")
public T withProps(State props) {
this.props = props;
return (T) this;
}
@SuppressWarnings("unchecked")
public T withStorageProps(State storageProps) {
this.storageProps = storageProps;
return (T) this;
}
@SuppressWarnings("unchecked")
public T withSerdeProps(State serDeProps) {
this.serDeProps = serDeProps;
return (T) this;
}
@SuppressWarnings("unchecked")
public T withSerdeManaager(HiveSerDeManager serDeManager) {
this.serDeManager = Optional.of(serDeManager);
return (T) this;
}
public abstract HiveRegistrationUnit build();
}
@AllArgsConstructor
@Getter
public static class Column {
private final String name;
private final String type;
private final String comment;
}
}
| 4,612 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveMetastoreClientPool.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.pool2.impl.GenericObjectPool;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.apache.gobblin.util.PropertiesUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import com.google.common.base.Optional;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.common.collect.Maps;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.AutoReturnableObject;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* A pool of {@link IMetaStoreClient} for querying the Hive metastore.
*/
@Slf4j
public class HiveMetastoreClientPool {
private final GenericObjectPool<IMetaStoreClient> pool;
private final HiveMetaStoreClientFactory factory;
@Getter
private final HiveConf hiveConf;
@Getter
private final HiveRegProps hiveRegProps;
private static final long DEFAULT_POOL_CACHE_TTL_MINUTES = 30;
public static final String POOL_CACHE_TTL_MINUTES_KEY = "hive.metaStorePoolCache.ttl";
public static final String POOL_EVICTION_POLICY_CLASS_NAME = "pool.eviction.policy.class.name";
public static final String DEFAULT_POOL_EVICTION_POLICY_CLASS_NAME = "org.apache.commons.pool2.impl.DefaultEvictionPolicy";
public static final String POOL_MIN_EVICTABLE_IDLE_TIME_MILLIS = "pool.min.evictable.idle.time.millis";
/**
* To provide additional or override configuration of a certain hive metastore,
* <p> firstly, set {@code hive.additionalConfig.targetUri=<the target hive metastore uri>}
* <p> Then all configurations with {@value #POOL_HIVE_ADDITIONAL_CONFIG_PREFIX} prefix will be extracted
* out of the job configurations and applied on top. for example, if there is a job configuration
* {@code hive.additionalConfig.hive.metastore.sasl.enabled=false},
* {@code hive.metastore.sasl.enabled=false} will be extracted and applied
*/
public static final String POOL_HIVE_ADDITIONAL_CONFIG_PREFIX = "hive.additionalConfig.";
public static final String POOL_HIVE_ADDITIONAL_CONFIG_TARGET = POOL_HIVE_ADDITIONAL_CONFIG_PREFIX + "targetUri";
public static final long DEFAULT_POOL_MIN_EVICTABLE_IDLE_TIME_MILLIS = 600000L;
public static final String POOL_TIME_BETWEEN_EVICTION_MILLIS = "pool.time.between eviction.millis";
public static final long DEFAULT_POOL_TIME_BETWEEN_EVICTION_MILLIS = 60000L;
private static Cache<Optional<String>, HiveMetastoreClientPool> poolCache = null;
private static final Cache<Optional<String>, HiveMetastoreClientPool> createPoolCache(final Properties properties) {
long duration = properties.containsKey(POOL_CACHE_TTL_MINUTES_KEY)
? Long.parseLong(properties.getProperty(POOL_CACHE_TTL_MINUTES_KEY)) : DEFAULT_POOL_CACHE_TTL_MINUTES;
return CacheBuilder.newBuilder()
.expireAfterAccess(duration, TimeUnit.MINUTES)
.removalListener(new RemovalListener<Optional<String>, HiveMetastoreClientPool>() {
@Override
public void onRemoval(RemovalNotification<Optional<String>, HiveMetastoreClientPool> notification) {
if (notification.getValue() != null) {
notification.getValue().close();
}
}
}).build();
}
/**
* Get a {@link HiveMetastoreClientPool} for the requested metastore URI. Useful for using the same pools across
* different classes in the code base. Note that if a pool already exists for that metastore, the max number of
* objects available will be unchanged, and it might be lower than requested by this method.
*
* @param properties {@link Properties} used to generate the pool.
* @param metastoreURI URI of the Hive metastore. If absent, use default metastore.
* @return a {@link HiveMetastoreClientPool}.
* @throws IOException
*/
public static HiveMetastoreClientPool get(final Properties properties, final Optional<String> metastoreURI)
throws IOException {
synchronized (HiveMetastoreClientPool.class) {
if (poolCache == null) {
poolCache = createPoolCache(properties);
}
}
try {
return poolCache.get(metastoreURI, new Callable<HiveMetastoreClientPool>() {
@Override
public HiveMetastoreClientPool call() throws Exception {
return new HiveMetastoreClientPool(properties, metastoreURI);
}
});
} catch (ExecutionException ee) {
throw new IOException("Failed to get " + HiveMetastoreClientPool.class.getSimpleName(), ee.getCause());
}
}
/**
* Constructor for {@link HiveMetastoreClientPool}.
* By default we will using the default eviction strategy for the client pool. Client will be evicted if the following conditions are met:
* * <ul>
* * <li>the object has been idle longer than
* * {@link GenericObjectPool#getMinEvictableIdleTimeMillis()}</li>
* * <li>there are more than {@link GenericObjectPool#getMinIdle()} idle objects in
* * the pool and the object has been idle for longer than
* * {@link GenericObjectPool#getSoftMinEvictableIdleTimeMillis()} </li>
* * </ul>
* @deprecated It is recommended to use the static {@link #get} method instead. Use this constructor only if you
* different pool configurations are required.
*/
@Deprecated
public HiveMetastoreClientPool(Properties properties, Optional<String> metastoreURI) {
this.hiveRegProps = new HiveRegProps(new State(properties));
GenericObjectPoolConfig config = new GenericObjectPoolConfig();
config.setMaxTotal(this.hiveRegProps.getNumThreads());
config.setMaxIdle(this.hiveRegProps.getNumThreads());
config.setMaxWaitMillis(this.hiveRegProps.getMaxWaitMillisBorrowingClient());
String extraConfigTarget = properties.getProperty(POOL_HIVE_ADDITIONAL_CONFIG_TARGET, "");
this.factory = new HiveMetaStoreClientFactory(metastoreURI);
if (metastoreURI.isPresent() && StringUtils.isNotEmpty(extraConfigTarget)
&& metastoreURI.get().equals(extraConfigTarget)) {
log.info("Setting additional hive config for metastore {}", extraConfigTarget);
properties.forEach((key, value) -> {
String configKey = key.toString();
if (configKey.startsWith(POOL_HIVE_ADDITIONAL_CONFIG_PREFIX) && !configKey.equals(
POOL_HIVE_ADDITIONAL_CONFIG_TARGET)) {
log.info("Setting additional hive config {}={}", configKey.substring(POOL_HIVE_ADDITIONAL_CONFIG_PREFIX.length()),
value.toString());
this.factory.getHiveConf().set(configKey.substring(POOL_HIVE_ADDITIONAL_CONFIG_PREFIX.length()), value.toString());
}
});
}
this.pool = new GenericObjectPool<>(this.factory, config);
//Set the eviction policy for the client pool
this.pool.setEvictionPolicyClassName(properties.getProperty(POOL_EVICTION_POLICY_CLASS_NAME, DEFAULT_POOL_EVICTION_POLICY_CLASS_NAME));
this.pool.setMinEvictableIdleTimeMillis(PropertiesUtils.getPropAsLong(properties, POOL_MIN_EVICTABLE_IDLE_TIME_MILLIS, DEFAULT_POOL_MIN_EVICTABLE_IDLE_TIME_MILLIS));
this.pool.setTimeBetweenEvictionRunsMillis(PropertiesUtils.getPropAsLong(properties, POOL_TIME_BETWEEN_EVICTION_MILLIS, DEFAULT_POOL_TIME_BETWEEN_EVICTION_MILLIS));
this.hiveConf = this.factory.getHiveConf();
}
public void close() {
this.pool.close();
}
/**
* @return an auto returnable wrapper around a {@link IMetaStoreClient}.
* @throws IOException
* Note: if you must acquire multiple locks, please use {@link #safeGetClients} instead, as this call may deadlock.
*/
public AutoReturnableObject<IMetaStoreClient> getClient() throws IOException {
return new AutoReturnableObject<>(this.pool);
}
/**
* A class wrapping multiple named {@link IMetaStoreClient}s.
*/
public static class MultiClient implements AutoCloseable {
private final Map<String, AutoReturnableObject<IMetaStoreClient>> clients;
private final Closer closer;
private MultiClient(Map<String, HiveMetastoreClientPool> namedPools) throws IOException {
this.clients = Maps.newHashMap();
this.closer = Closer.create();
Map<HiveMetastoreClientPool, Integer> requiredClientsPerPool = Maps.newHashMap();
for (Map.Entry<String, HiveMetastoreClientPool> entry : namedPools.entrySet()) {
if (requiredClientsPerPool.containsKey(entry.getValue())) {
requiredClientsPerPool.put(entry.getValue(), requiredClientsPerPool.get(entry.getValue()) + 1);
} else {
requiredClientsPerPool.put(entry.getValue(), 1);
}
}
for (Map.Entry<HiveMetastoreClientPool, Integer> entry : requiredClientsPerPool.entrySet()) {
if (entry.getKey().pool.getMaxTotal() < entry.getValue()) {
throw new IOException(
String.format("Not enough clients available in the pool. Required %d, max available %d.",
entry.getValue(), entry.getKey().pool.getMaxTotal()));
}
}
for (Map.Entry<String, HiveMetastoreClientPool> entry : namedPools.entrySet()) {
this.clients.put(entry.getKey(), this.closer.register(entry.getValue().getClient()));
}
}
/**
* Get the {@link IMetaStoreClient} with the provided name.
* @throws IOException
*/
public IMetaStoreClient getClient(String name) throws IOException {
if (!this.clients.containsKey(name)) {
throw new IOException("There is no client with name " + name);
}
return this.clients.get(name).get();
}
@Override
public void close() throws IOException {
this.closer.close();
}
}
/**
* A method to get multiple {@link IMetaStoreClient}s while preventing deadlocks.
* @param namedPools A map from String to {@link HiveMetastoreClientPool}.
* @return a {@link MultiClient} with a {@link IMetaStoreClient} for each entry in the input map. The client can
* be retrieved by its name in the input map.
* @throws IOException
*/
public static synchronized MultiClient safeGetClients(Map<String, HiveMetastoreClientPool> namedPools)
throws IOException {
return new MultiClient(namedPools);
}
}
| 4,613 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveLock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
import java.util.Properties;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister;
import org.apache.gobblin.util.AutoCloseableLock;
/**
* A striped lock class for Hive databases or tables. To get a lock, use {@link #getDbLock}, {@link #getTableLock}
* or {@link #getPartitionLock}, which returns a {@link AutoCloseableLock} object that is already locked.
*
* <p>
* Obtaining a table lock does <em>not</em> lock the database, which permits concurrent operations on different
* tables in the same database. Similarly, obtaining a partition lock does not lock the table or the database.
* </p>
*/
public class HiveLock {
private static String HIVE_LOCK_TYPE = HiveMetaStoreBasedRegister.HIVE_REGISTER_METRICS_PREFIX + "lock.type";
private static String HIVE_LOCK_TYPE_DEFAULT = "org.apache.gobblin.hive.HiveLockFactory";
private Properties properties;
private static final Joiner JOINER = Joiner.on('/').skipNulls();
private final HiveLockFactory locks;
public HiveLock(Properties properties) throws IOException {
this.properties = properties;
try {
locks = (HiveLockFactory) ConstructorUtils.invokeConstructor(
Class.forName(properties.getProperty(HIVE_LOCK_TYPE, HIVE_LOCK_TYPE_DEFAULT)), properties);
} catch (Exception e) {
throw new IOException(e);
}
}
public AutoCloseableHiveLock getDbLock(String dbName) throws IOException{
Preconditions.checkArgument(!Strings.isNullOrEmpty(dbName));
return new AutoCloseableHiveLock(this.locks.get(dbName));
}
public AutoCloseableHiveLock getTableLock(String dbName, String tableName) throws IOException{
Preconditions.checkArgument(!Strings.isNullOrEmpty(dbName));
Preconditions.checkArgument(!Strings.isNullOrEmpty(tableName));
return new AutoCloseableHiveLock(this.locks.get(JOINER.join(dbName, tableName)));
}
public AutoCloseableHiveLock getPartitionLock(String dbName, String tableName, Iterable<String> partitionValues)
throws IOException{
Preconditions.checkArgument(!Strings.isNullOrEmpty(dbName));
Preconditions.checkArgument(!Strings.isNullOrEmpty(tableName));
Preconditions.checkArgument(partitionValues.iterator().hasNext());
return new AutoCloseableHiveLock(this.locks.get(JOINER.join(dbName, tableName, JOINER.join(partitionValues))));
}
}
| 4,614 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/AutoCloseableHiveLock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive;
import java.io.IOException;
/**
* An autoCloseable hive lock. Use a {@link HiveLockImpl} as real lock, but will unlock automatically when close
*/
public class AutoCloseableHiveLock implements AutoCloseable {
private final HiveLockImpl lock;
public AutoCloseableHiveLock(HiveLockImpl lock) throws IOException {
this.lock = lock;
this.lock.lock();
}
@Override
public void close() throws IOException{
this.lock.unlock();
}
}
| 4,615 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/HiveSpecWithPostActivities.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec;
import java.util.Collection;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.hive.spec.activity.Activity;
/**
* A {@link HiveSpec} with a set of activities that should be executed after the registration is complete.
*/
@Alpha
public interface HiveSpecWithPostActivities extends HiveSpec {
/**
* A {@link Collection} of {@link Activity}s that should be executed after the registration is complete.
*/
public Collection<Activity> getPostActivities();
}
| 4,616 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/HiveSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveTable;
/**
* An specification for Hive registration.
*/
@Alpha
public interface HiveSpec {
/**
* Get the {@link Path} to be registered in Hive.
*/
public Path getPath();
/**
* Get the Hive {@link HiveTable} that the {@link Path} returned by {@link #getPath()} should be registered to.
*/
public HiveTable getTable();
/**
* Get the Hive {@link HivePartition} that the {@link Path} returned by {@link #getPath()} should be registered to.
*
* @return {@link Optional#absent()} indicates the {@link Path} in this HiveSpec should be registered as
* a Hive table. Otherwise, the {@link Path} should be registered as a Hive partition.
*/
public Optional<HivePartition> getPartition();
}
| 4,617 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/HiveSpecWithPreActivities.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec;
import java.util.Collection;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.hive.spec.activity.Activity;
/**
* A {@link HiveSpec} with a set of activities that should be executed prior to the Hive registration.
*/
@Alpha
public interface HiveSpecWithPreActivities extends HiveSpec {
/**
* A {@link Collection} of {@link Activity}s that should be executed prior to the Hive registration.
*/
public Collection<Activity> getPreActivities();
}
| 4,618 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/HiveSpecWithPredicates.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec;
import java.util.Collection;
import com.google.common.base.Predicate;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.hive.HiveRegister;
/**
* A {@link HiveSpec} with a set of {@link Predicate}s. If any of the {@link Predicate}s returns false,
* the Hive registration will be skipped.
*/
@Alpha
public interface HiveSpecWithPredicates extends HiveSpec {
/**
* A {@link Collection} of {@link Predicate}s. If any of the {@link Predicate}s returns false,
* the Hive registration will be skipped.
*/
public Collection<Predicate<HiveRegister>> getPredicates();
}
| 4,619 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/SimpleHiveSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec;
import com.google.common.base.MoreObjects;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.Lists;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.spec.activity.Activity;
import lombok.Builder;
import lombok.Getter;
import lombok.Singular;
import java.util.Collection;
/**
* A base implementation of {@link HiveSpec}.
*/
@Getter
@Alpha
@Builder(builderClassName = "Builder")
public class SimpleHiveSpec
implements HiveSpec, HiveSpecWithPreActivities, HiveSpecWithPostActivities, HiveSpecWithPredicates {
protected final Path path;
protected final HiveTable table;
protected final Optional<HivePartition> partition;
@Singular
@Getter
protected final Collection<Activity> preActivities;
@Singular
@Getter
protected final Collection<Activity> postActivities;
@Singular
@Getter
protected final Collection<Predicate<HiveRegister>> predicates;
protected SimpleHiveSpec(Builder<?> builder) {
this.path = builder.path;
this.table = builder.table;
this.partition = builder.partition != null ? builder.partition : Optional.<HivePartition> absent();
this.preActivities = builder.preActivities;
this.postActivities = builder.postActivities;
this.predicates = builder.predicates;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this).omitNullValues().add("path", this.path.toString())
.add("db", this.table.getDbName()).add("table", this.table.getTableName())
.add("partition", this.partition.orNull()).toString();
}
public static class Builder<T extends Builder<?>> {
@Getter
private Path path;
@Getter
private HiveTable table;
@Getter
private Optional<HivePartition> partition;
private Builder() {
this.path = null;
}
public Builder(Path path) {
this.path = path;
this.preActivities = Lists.newArrayList();
this.postActivities = Lists.newArrayList();
this.predicates = Lists.newArrayList();
}
@SuppressWarnings("unchecked")
public T withTable(HiveTable table) {
this.table = table;
return (T) this;
}
@SuppressWarnings("unchecked")
public T withPartition(Optional<HivePartition> partition) {
this.partition = partition;
return (T) this;
}
public SimpleHiveSpec build() {
Preconditions.checkNotNull(this.path);
Preconditions.checkNotNull(this.table);
return new SimpleHiveSpec(this);
}
}
}
| 4,620 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/predicate/TableNotExistPredicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec.predicate;
import java.io.IOException;
import com.google.common.base.Predicate;
import com.google.common.base.Throwables;
import org.apache.gobblin.hive.HiveRegister;
import lombok.AllArgsConstructor;
/**
* A {@link Predicate} that returns true if the given table does not exist.
*
* @author Ziyang Liu
*/
@AllArgsConstructor
public class TableNotExistPredicate implements Predicate<HiveRegister> {
protected final String dbName;
protected final String tableName;
@Override
public boolean apply(HiveRegister register) {
try {
return !register.existsTable(this.dbName, this.tableName);
} catch (IOException e) {
throw Throwables.propagate(e);
}
}
}
| 4,621 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/predicate/PartitionNotExistPredicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec.predicate;
import java.io.IOException;
import java.util.List;
import com.google.common.base.Predicate;
import com.google.common.base.Throwables;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.HiveRegistrationUnit.Column;
import lombok.AllArgsConstructor;
/**
* A {@link Predicate} that returns true if the given Hive partition does not exist.
*
* @author Ziyang Liu
*/
@AllArgsConstructor
public class PartitionNotExistPredicate implements Predicate<HiveRegister> {
protected final String dbName;
protected final String tableName;
protected final List<Column> partitionKeys;
protected final List<String> partitionValues;
@Override
public boolean apply(HiveRegister register) {
try {
return !register.existsPartition(this.dbName, this.tableName, this.partitionKeys, this.partitionValues);
} catch (IOException e) {
throw Throwables.propagate(e);
}
}
}
| 4,622 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/activity/Activity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec.activity;
import java.io.IOException;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.hive.HiveRegister;
/**
* Execute a task for a given input.
*/
@Alpha
public interface Activity {
/**
* Execute a task given a {@link HiveRegister}.
*
* @return true if the execution is successful; false otherwise.
*/
public boolean execute(HiveRegister register) throws IOException;
}
| 4,623 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/activity/DropTableActivity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec.activity;
import java.io.IOException;
import org.apache.gobblin.hive.HiveRegister;
import lombok.AllArgsConstructor;
/**
* An {@link Activity} that drops a collection of Hive tables given a {@link HiveRegister}.
*
* @author Ziyang Liu
*/
@AllArgsConstructor
public class DropTableActivity implements Activity {
protected final String dbName;
protected final String tableName;
@Override
public boolean execute(HiveRegister register) throws IOException {
register.dropTableIfExists(this.dbName, this.tableName);
return true;
}
}
| 4,624 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/spec/activity/DropPartitionActivity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.spec.activity;
import java.io.IOException;
import java.util.List;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.HiveRegistrationUnit.Column;
import lombok.AllArgsConstructor;
/**
* An {@link Activity} that drops a collection of Hive partitions given a {@link HiveRegister}.
*
* @author Ziyang Liu
*/
@AllArgsConstructor
public class DropPartitionActivity implements Activity {
protected final String dbName;
protected final String tableName;
protected final List<Column> partitionKeys;
protected final List<String> partitionValues;
@Override
public boolean execute(HiveRegister register) throws IOException {
register.dropPartitionIfExists(this.dbName, this.tableName, this.partitionKeys, this.partitionValues);
return true;
}
}
| 4,625 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/writer/MetadataWriterKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.writer;
public class MetadataWriterKeys {
public static final String METRICS_NAMESPACE_ICEBERG_WRITER = "IcebergWriter";
public static final String ICEBERG_COMMIT_EVENT_NAME = "IcebergMetadataCommitEvent";
public static final String HIVE_COMMIT_EVENT_NAME = "HiveMetadataCommitEvent";
public static final String METADATA_WRITER_FAILURE_EVENT = "MetadataWriterFailureEvent";
public static final String LAG_KEY_NAME = "endToEndLag";
public static final String SNAPSHOT_KEY_NAME = "currentSnapshotId";
public static final String MANIFEST_LOCATION = "currentManifestLocation";
public static final String SNAPSHOT_INFORMATION_KEY_NAME = "currentSnapshotDetailedInformation";
public static final String ICEBERG_TABLE_KEY_NAME = "icebergTableName";
public static final String ICEBERG_DATABASE_KEY_NAME = "icebergDatabaseName";
public static final String GMCE_TOPIC_NAME = "gmceTopicName";
public static final String GMCE_TOPIC_PARTITION = "gmceTopicPartition";
public static final String GMCE_HIGH_WATERMARK = "gmceHighWatermark";
public static final String GMCE_LOW_WATERMARK = "gmceLowWatermark";
public static final String DATASET_HDFS_PATH = "datasetHdfsPath";
public static final String PARTITION_HDFS_PATH = "partitionHdfsPath";
public static final String DATABASE_NAME_KEY = "databaseName";
public static final String TABLE_NAME_KEY = "tableName";
public static final String HIVE_DATABASE_NAME_KEY = "hiveDatabaseName";
public static final String HIVE_TABLE_NAME_KEY = "hiveTableName";
public static final String CLUSTER_IDENTIFIER_KEY_NAME = "clusterIdentifier";
public static final String EXCEPTION_MESSAGE_KEY_NAME = "exceptionMessage";
public static final String FAILED_WRITERS_KEY = "failedWriters";
public static final String OPERATION_TYPE_KEY = "operationType";
public static final String PARTITION_VALUES_KEY = "partitionValues";
public static final String FAILED_TO_ADD_PARTITION_VALUES_KEY = "failedToAddPartitionValues";
public static final String FAILED_TO_DROP_PARTITION_VALUES_KEY = "failedToDropPartitionValues";
public static final String PARTITION_KEYS = "partitionKeys";
public static final String HIVE_PARTITION_OPERATION_KEY = "hivePartitionOperation";
public static final String HIVE_EVENT_GMCE_TOPIC_NAME = "kafkaTopic";
private MetadataWriterKeys() {
}
}
| 4,626 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/writer/HiveMetadataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.writer;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.specific.SpecificData;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Throwables;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.ListenableFuture;
import javax.annotation.Nullable;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.hive.WhitelistBlacklist;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.avro.HiveAvroSerDeManager;
import org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metadata.SchemaSource;
import org.apache.gobblin.metrics.GobblinMetricsRegistry;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ClustersNames;
/**
* This writer is used to register the hiveSpec into hive metaStore
* For add_files operation, this writer will use cache to determine whether the partition is registered already or need to be altered
* and then register the partition if needed
* For rewrite_files operation, this writer will directly register the new hive spec and try to de-register the old hive spec if oldFilePrefixes is set
* For drop_files operation, this writer will de-register the hive partition only if oldFilePrefixes is set in the GMCE
*
* Added warning suppression for all references of {@link Cache}.
*/
@Slf4j
@SuppressWarnings("UnstableApiUsage")
public class HiveMetadataWriter implements MetadataWriter {
private static final String HIVE_REGISTRATION_WHITELIST = "hive.registration.whitelist";
private static final String HIVE_REGISTRATION_BLACKLIST = "hive.registration.blacklist";
private static final String HIVE_USE_LATEST_SCHEMA_ALLOWLIST = "hive.use.latest.schema.allowlist";
private static final String HIVE_USE_LATEST_SCHEMA_DENYLIST = "hive.use.latest.schema.denylist";
private static final String HIVE_REGISTRATION_TIMEOUT_IN_SECONDS = "hive.registration.timeout.seconds";
private static final long DEFAULT_HIVE_REGISTRATION_TIMEOUT_IN_SECONDS = 60;
private final Joiner tableNameJoiner = Joiner.on('.');
private final Closer closer = Closer.create();
@VisibleForTesting
@Setter(AccessLevel.PACKAGE)
protected HiveRegister hiveRegister;
private final WhitelistBlacklist whitelistBlacklist;
// Always use the latest table Schema for tables in #useLatestTableSchemaWhiteListBlackList
// unless a newer writer schema arrives
private final WhitelistBlacklist useExistingTableSchemaAllowDenyList;
@Getter
private final KafkaSchemaRegistry schemaRegistry;
private final HashMap<String, HashMap<List<String>, ListenableFuture<Void>>> currentExecutionMap;
/* Mapping from tableIdentifier to a cache, key'ed by timestamp and value is not in use. */
private final HashMap<String, Cache<String, String>> schemaCreationTimeMap;
/* Mapping from tableIdentifier to a cache, key'ed by a list of partitions with value as HiveSpec object. */
private final HashMap<String, Cache<List<String>, HiveSpec>> specMaps;
// Used to store the relationship between table and the gmce topicPartition
private final HashMap<String, String> tableTopicPartitionMap;
/* Mapping from tableIdentifier to latest schema observed. */
private final HashMap<String, String> latestSchemaMap;
private final long timeOutSeconds;
protected State state;
protected EventSubmitter eventSubmitter;
public enum HivePartitionOperation {
ADD_OR_MODIFY, DROP
}
public HiveMetadataWriter(State state) throws IOException {
this.state = state;
this.whitelistBlacklist = new WhitelistBlacklist(state.getProp(HIVE_REGISTRATION_WHITELIST, ""),
state.getProp(HIVE_REGISTRATION_BLACKLIST, ""));
this.schemaRegistry = KafkaSchemaRegistry.get(state.getProperties());
this.currentExecutionMap = new HashMap<>();
this.schemaCreationTimeMap = new HashMap<>();
this.specMaps = new HashMap<>();
this.latestSchemaMap = new HashMap<>();
this.useExistingTableSchemaAllowDenyList = new WhitelistBlacklist(state.getProp(HIVE_USE_LATEST_SCHEMA_ALLOWLIST, ""),
state.getProp(HIVE_USE_LATEST_SCHEMA_DENYLIST, ""));
this.tableTopicPartitionMap = new HashMap<>();
this.timeOutSeconds =
state.getPropAsLong(HIVE_REGISTRATION_TIMEOUT_IN_SECONDS, DEFAULT_HIVE_REGISTRATION_TIMEOUT_IN_SECONDS);
if (!state.contains(HiveRegister.HIVE_REGISTER_CLOSE_TIMEOUT_SECONDS_KEY)) {
state.setProp(HiveRegister.HIVE_REGISTER_CLOSE_TIMEOUT_SECONDS_KEY, timeOutSeconds);
}
this.hiveRegister = this.closer.register(HiveRegister.get(state));
List<Tag<?>> tags = Lists.newArrayList();
String clusterIdentifier = ClustersNames.getInstance().getClusterName();
tags.add(new Tag<>(MetadataWriterKeys.CLUSTER_IDENTIFIER_KEY_NAME, clusterIdentifier));
MetricContext metricContext = closer.register(GobblinMetricsRegistry.getInstance().getMetricContext(state, HiveMetadataWriter.class, tags));
this.eventSubmitter = new EventSubmitter.Builder(metricContext, HiveMetadataWriter.class.getCanonicalName()).build();
}
@Override
public void flush(String dbName, String tableName) throws IOException {
String tableKey = tableNameJoiner.join(dbName, tableName);
if(this.currentExecutionMap.containsKey(tableKey)) {
log.info("start to flush table: " + tableKey);
HashMap<List<String>, ListenableFuture<Void>> executionMap = this.currentExecutionMap.get(tableKey);
//iterator all execution to get the result to make sure they all succeeded
for (HashMap.Entry<List<String>, ListenableFuture<Void>> execution : executionMap.entrySet()) {
try (Timer.Context context = new Timer().time()) {
execution.getValue().get(timeOutSeconds, TimeUnit.SECONDS);
log.info("Time taken to add partition to table {} is {} ms", tableKey, TimeUnit.NANOSECONDS.toMillis(context.stop()));
} catch (TimeoutException e) {
// Since TimeoutException should always be a transient issue, throw RuntimeException which will fail/retry container
throw new RuntimeException("Timeout waiting for result of registration for table " + tableKey, e);
} catch (InterruptedException | ExecutionException e) {
if (Throwables.getRootCause(e) instanceof AlreadyExistsException) {
log.warn("Caught AlreadyExistsException for db {}, table {}, ignoring", dbName, tableName);
} else {
Set<String> partitions = executionMap.keySet().stream().flatMap(List::stream).collect(Collectors.toSet());
throw new HiveMetadataWriterWithPartitionInfoException(partitions, Collections.emptySet(), e);
}
}
Cache<List<String>, HiveSpec> cache = specMaps.get(tableKey);
if (cache != null) {
HiveSpec hiveSpec = cache.getIfPresent(execution.getKey());
if (hiveSpec != null) {
try (Timer.Context context = new Timer().time()) {
eventSubmitter.submit(buildCommitEvent(dbName, tableName, execution.getKey(), hiveSpec,
HivePartitionOperation.ADD_OR_MODIFY));
log.info("Time taken to submit event for table {} is {} ms", tableKey, TimeUnit.NANOSECONDS.toMillis(context.stop()));
}
}
}
}
executionMap.clear();
log.info("finish flushing table: " + tableKey);
}
}
@Override
public void reset(String dbName, String tableName) throws IOException {
String tableKey = tableNameJoiner.join(dbName, tableName);
this.currentExecutionMap.remove(tableKey);
this.schemaCreationTimeMap.remove(tableKey);
this.latestSchemaMap.remove(tableKey);
this.specMaps.remove(tableKey);
}
public void write(GobblinMetadataChangeEvent gmce, Map<String, Collection<HiveSpec>> newSpecsMap,
Map<String, Collection<HiveSpec>> oldSpecsMap, HiveSpec tableSpec, String gmceTopicPartition) throws IOException {
String dbName = tableSpec.getTable().getDbName();
String tableName = tableSpec.getTable().getTableName();
String tableKey = tableNameJoiner.join(dbName, tableName);
OperationType opType = gmce.getOperationType();
String topicName = getTopicName(gmce);
if (opType != OperationType.drop_files) {
if (!createTable(tableSpec, tableKey)) {
return;
}
updateLatestSchemaMapWithExistingSchema(dbName, tableName, tableKey);
}
tableTopicPartitionMap.put(tableKey, gmceTopicPartition);
switch (opType) {
case add_files: {
addFiles(gmce, newSpecsMap, dbName, tableName, topicName);
break;
}
case drop_files: {
deleteFiles(gmce, oldSpecsMap, dbName, tableName);
break;
}
case rewrite_files: {
//de-register old partitions
deleteFiles(gmce, oldSpecsMap, dbName, tableName);
//register new partitions
addFiles(gmce, newSpecsMap, dbName, tableName, topicName);
break;
}
default: {
log.error("unsupported operation {}", opType);
return;
}
}
}
/**
* Helper function to gracefully handle errors when creating a hive table. i.e. IOExceptions when creating the table
* are swallowed and logged to error
* @param tableSpec
* @param tableKey table key used to check if table is in spec cache
* @return if the table the table was created. If the table existed beforehand, it still returns true.
*/
private boolean createTable(HiveSpec tableSpec, String tableKey) {
try {
// no-op if it's in spec cache (spec cache contains tablekey for all db / tables created since last flush)
if (inHiveSpecCache(tableKey)) {
return true;
}
this.hiveRegister.createTableIfNotExists(tableSpec.getTable());
return true;
} catch (IOException e) {
log.error("Failed to create table. Skipping this event", e);
return false;
}
}
@Nullable
protected String getTopicName(GobblinMetadataChangeEvent gmce) {
//Calculate the topic name from gmce, fall back to topic.name in hive spec which can also be null
//todo: make topicName fall back to topic.name in hive spec so that we can also get schema for re-write operation
String topicName = null;
if (gmce.getTopicPartitionOffsetsRange() != null && !gmce.getTopicPartitionOffsetsRange().isEmpty()) {
// In case the topic name is not the table name or the topic name contains '-'
String topicPartitionString = gmce.getTopicPartitionOffsetsRange().keySet().iterator().next();
topicName = parseTopicNameFromOffsetRangeKey(topicPartitionString);
}
return topicName;
}
public static String parseTopicNameFromOffsetRangeKey(String offsetRangeKey) {
int startOfTopicName = offsetRangeKey.lastIndexOf('.') + 1;
return offsetRangeKey.substring(startOfTopicName, offsetRangeKey.lastIndexOf('-'));
}
/**
* We care about if a table key is in the spec cache because it means that we have already created this table before
* since the last flush. Therefore, we can use this method to check whether we need to create a table
* @param tableKey
* @return
*/
private boolean inHiveSpecCache(String tableKey) {
return specMaps.containsKey(tableKey) && specMaps.get(tableKey).size() > 0;
}
private void updateLatestSchemaMapWithExistingSchema(String dbName, String tableName, String tableKey) throws IOException {
updateLatestSchemaMapWithExistingSchema(dbName, tableName, tableKey, useExistingTableSchemaAllowDenyList, hiveRegister, latestSchemaMap);
}
/**
* Helper method for updating the schema map with the latest existing schema from Hive if necessary
*
* @param dbName Hive DB name
* @param tableName Hive table name
* @param tableKey table key for the latest schema map
* @param useExistingTableSchemaAllowDenyList list of topics that should always use the latest existing schema in hive
* @param hiveRegister hive register for getting table info
* @param latestSchemaMap map containing the latest schema for all topics since last flush
* @return true if latest schema map was updated with the existing schema in Hive
* @throws IOException
*/
@VisibleForTesting
protected static boolean updateLatestSchemaMapWithExistingSchema(String dbName, String tableName, String tableKey,
WhitelistBlacklist useExistingTableSchemaAllowDenyList, HiveRegister hiveRegister,
HashMap<String, String> latestSchemaMap) throws IOException{
//ToDo: after making sure all spec has topic.name set, we should use topicName as key for schema
boolean alwaysUseExistingSchema = useExistingTableSchemaAllowDenyList.acceptTable(dbName, tableName);
if (!alwaysUseExistingSchema && latestSchemaMap.containsKey(tableKey)) {
return false;
}
HiveTable table = hiveRegister.getTable(dbName, tableName).get();
String latestSchema = table.getSerDeProps().getProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName());
if (latestSchema == null) {
throw new IllegalStateException(String.format("The %s in the table %s.%s is null. This implies the DB is "
+ "misconfigured and was not correctly created through Gobblin, since all Gobblin managed tables should "
+ "have %s", HiveAvroSerDeManager.SCHEMA_LITERAL, dbName, tableName, HiveAvroSerDeManager.SCHEMA_LITERAL));
}
latestSchemaMap.put(tableKey, latestSchema);
return true;
}
public void deleteFiles(GobblinMetadataChangeEvent gmce, Map<String, Collection<HiveSpec>> oldSpecsMap, String dbName,
String tableName) throws IOException {
if (gmce.getOldFilePrefixes() == null || gmce.getOldFilePrefixes().isEmpty()) {
//We only de-register partition when old file prefixes is set, since hive partition refer to a whole directory
return;
}
for (Collection<HiveSpec> specs : oldSpecsMap.values()) {
for (HiveSpec spec : specs) {
if (spec.getTable().getDbName().equals(dbName) && spec.getTable().getTableName().equals(tableName)) {
if (spec.getPartition().isPresent()) {
deRegisterPartitionHelper(dbName, tableName, spec);
}
}
}
}
//TODO: De-register table if table location does not exist (Configurable)
}
protected void deRegisterPartitionHelper(String dbName, String tableName, HiveSpec spec) throws IOException {
hiveRegister.dropPartitionIfExists(dbName, tableName, spec.getTable().getPartitionKeys(),
spec.getPartition().get().getValues());
eventSubmitter.submit(buildCommitEvent(dbName, tableName, spec.getPartition().get().getValues(), spec, HivePartitionOperation.DROP));
}
public void addFiles(GobblinMetadataChangeEvent gmce, Map<String, Collection<HiveSpec>> newSpecsMap, String dbName,
String tableName, String topicName) throws IOException {
String tableKey = tableNameJoiner.join(dbName, tableName);
for (Collection<HiveSpec> specs : newSpecsMap.values()) {
for (HiveSpec spec : specs) {
if (spec.getTable().getDbName().equals(dbName) && spec.getTable().getTableName().equals(tableName)) {
List<String> partitionValue =
spec.getPartition().isPresent() ? spec.getPartition().get().getValues() : Lists.newArrayList();
Cache<List<String>, HiveSpec> hiveSpecCache = specMaps.computeIfAbsent(tableKey,
s -> CacheBuilder.newBuilder()
.expireAfterAccess(state.getPropAsInt(MetadataWriter.CACHE_EXPIRING_TIME,
MetadataWriter.DEFAULT_CACHE_EXPIRING_TIME), TimeUnit.HOURS)
.build());
HiveSpec existedSpec = hiveSpecCache.getIfPresent(partitionValue);
schemaUpdateHelper(gmce, spec, topicName, tableKey);
if (existedSpec != null) {
//if existedSpec is not null, it means we already registered this partition, so check whether we need to update the table/partition
if ((this.hiveRegister.needToUpdateTable(existedSpec.getTable(), spec.getTable())) || (
spec.getPartition().isPresent() && this.hiveRegister.needToUpdatePartition(
existedSpec.getPartition().get(), spec.getPartition().get()))) {
registerSpec(dbName, tableName, partitionValue, spec, hiveSpecCache);
}
} else {
registerSpec(dbName, tableName, partitionValue, spec, hiveSpecCache);
}
}
}
}
}
private void registerSpec(String dbName, String tableName, List<String> partitionValue, HiveSpec spec,
Cache<List<String>, HiveSpec> hiveSpecCache) {
String tableKey = tableNameJoiner.join(dbName, tableName);
HashMap<List<String>, ListenableFuture<Void>> executionMap =
this.currentExecutionMap.computeIfAbsent(tableKey, s -> new HashMap<>());
if (executionMap.containsKey(partitionValue)) {
try {
executionMap.get(partitionValue).get(timeOutSeconds, TimeUnit.SECONDS);
eventSubmitter.submit(buildCommitEvent(dbName, tableName, partitionValue, spec, HivePartitionOperation.ADD_OR_MODIFY));
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Error when getting the result of registration for table " + tableKey);
throw new RuntimeException(e);
}
}
executionMap.put(partitionValue, this.hiveRegister.register(spec));
hiveSpecCache.put(partitionValue, spec);
}
private void schemaUpdateHelper(GobblinMetadataChangeEvent gmce, HiveSpec spec, String topicName, String tableKey)
throws IOException {
if (gmce.getSchemaSource() != SchemaSource.NONE) {
String newSchemaString =
spec.getTable().getSerDeProps().getProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName());
if (newSchemaString != null) {
Schema newSchema = new Schema.Parser().parse(newSchemaString);
String newSchemaCreationTime = AvroUtils.getSchemaCreationTime(newSchema);
Cache<String, String> existedSchemaCreationTimes = schemaCreationTimeMap.computeIfAbsent(tableKey,
s -> CacheBuilder.newBuilder()
.expireAfterAccess(
state.getPropAsInt(MetadataWriter.CACHE_EXPIRING_TIME, MetadataWriter.DEFAULT_CACHE_EXPIRING_TIME),
TimeUnit.HOURS)
.build());
if (gmce.getSchemaSource() == SchemaSource.EVENT) {
// Schema source is Event, update schema anyway
String schemaToUpdate = overrideSchemaLiteral(spec, newSchemaString, newSchemaCreationTime, gmce.getPartitionColumns());
latestSchemaMap.put(tableKey, schemaToUpdate);
// Clear the schema versions cache so next time if we see schema source is schemaRegistry, we will contact schemaRegistry and update
existedSchemaCreationTimes.cleanUp();
} else if (gmce.getSchemaSource() == SchemaSource.SCHEMAREGISTRY && newSchemaCreationTime != null
&& existedSchemaCreationTimes.getIfPresent(newSchemaCreationTime) == null) {
// We haven't seen this schema before, so we query schemaRegistry to get latest schema
if (StringUtils.isNoneEmpty(topicName)) {
Schema latestSchema = (Schema) this.schemaRegistry.getLatestSchemaByTopic(topicName);
String latestCreationTime = AvroUtils.getSchemaCreationTime(latestSchema);
if (latestCreationTime.equals(newSchemaCreationTime)) {
String schemaToUpdate = overrideSchemaLiteral(spec, newSchemaString, newSchemaCreationTime, gmce.getPartitionColumns());
//new schema is the latest schema, we update our record
latestSchemaMap.put(tableKey, schemaToUpdate);
}
existedSchemaCreationTimes.put(newSchemaCreationTime, "");
}
}
}
} else if (gmce.getRegistrationProperties().containsKey(HiveMetaStoreBasedRegister.SCHEMA_SOURCE_DB)
&& !gmce.getRegistrationProperties().get(HiveMetaStoreBasedRegister.SCHEMA_SOURCE_DB).equals(spec.getTable().getDbName())) {
// If schema source is NONE and schema source db is set, we will directly update the schema to source db schema
String schemaSourceDb = gmce.getRegistrationProperties().get(HiveMetaStoreBasedRegister.SCHEMA_SOURCE_DB);
try {
String sourceSchema = fetchSchemaFromTable(schemaSourceDb, spec.getTable().getTableName());
if (sourceSchema != null){
spec.getTable()
.getSerDeProps()
.setProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), sourceSchema);
HiveMetaStoreUtils.updateColumnsInfoIfNeeded(spec);
}
} catch (IOException e) {
log.warn(String.format("Cannot get schema from table %s.%s", schemaSourceDb, spec.getTable().getTableName()), e);
}
return;
}
//Force to set the schema even there is no schema literal defined in the spec
String latestSchema = latestSchemaMap.get(tableKey);
if (latestSchema != null) {
spec.getTable().getSerDeProps()
.setProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), latestSchema);
HiveMetaStoreUtils.updateColumnsInfoIfNeeded(spec);
}
}
/**
* Method that overrides schema literal in implementation class
* @param spec HiveSpec
* @param latestSchema returns passed schema as is
* @param schemaCreationTime updates schema with creation time
* @param partitionNames
* @return schema literal
*/
protected String overrideSchemaLiteral(HiveSpec spec, String latestSchema, String schemaCreationTime,
List<String> partitionNames) {
return latestSchema;
}
private String fetchSchemaFromTable(String dbName, String tableName) throws IOException {
String tableKey = tableNameJoiner.join(dbName, tableName);
if (latestSchemaMap.containsKey(tableKey)) {
return latestSchemaMap.get(tableKey);
}
Optional<HiveTable> table = hiveRegister.getTable(dbName, tableName);
return table.isPresent()? table.get().getSerDeProps().getProp(
AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()) : null;
}
@Override
public void writeEnvelope(RecordEnvelope<GenericRecord> recordEnvelope, Map<String, Collection<HiveSpec>> newSpecsMap,
Map<String, Collection<HiveSpec>> oldSpecsMap, HiveSpec tableSpec) throws IOException {
GenericRecord genericRecord = recordEnvelope.getRecord();
GobblinMetadataChangeEvent gmce =
(GobblinMetadataChangeEvent) SpecificData.get().deepCopy(genericRecord.getSchema(), genericRecord);
if (whitelistBlacklist.acceptTable(tableSpec.getTable().getDbName(), tableSpec.getTable().getTableName())) {
try {
write(gmce, newSpecsMap, oldSpecsMap, tableSpec, recordEnvelope.getWatermark().getSource());
} catch (IOException e) {
throw new HiveMetadataWriterWithPartitionInfoException(getPartitionValues(newSpecsMap), getPartitionValues(oldSpecsMap), e);
}
} else {
log.debug(String.format("Skip table %s.%s since it's not selected", tableSpec.getTable().getDbName(),
tableSpec.getTable().getTableName()));
}
}
/**
* Extract a unique list of partition values as strings from a map of HiveSpecs.
*/
public Set<String> getPartitionValues(Map<String, Collection<HiveSpec>> specMap) {
Set<HiveSpec> hiveSpecs = specMap.values().stream().flatMap(Collection::stream).collect(Collectors.toSet());
Set<List<String>> partitionValueLists = hiveSpecs.stream().filter(spec -> spec.getPartition().isPresent())
.map(spec -> spec.getPartition().get().getValues()).collect(Collectors.toSet());
return partitionValueLists.stream().flatMap(List::stream).collect(Collectors.toSet());
}
protected GobblinEventBuilder buildCommitEvent(String dbName, String tableName, List<String> partitionValues, HiveSpec hiveSpec,
HivePartitionOperation operation) {
GobblinEventBuilder gobblinTrackingEvent = new GobblinEventBuilder(MetadataWriterKeys.HIVE_COMMIT_EVENT_NAME);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.HIVE_DATABASE_NAME_KEY, dbName);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.HIVE_TABLE_NAME_KEY, tableName);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.PARTITION_KEYS, Joiner.on(',').join(hiveSpec.getTable().getPartitionKeys().stream()
.map(HiveRegistrationUnit.Column::getName).collect(Collectors.toList())));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.PARTITION_VALUES_KEY, Joiner.on(',').join(partitionValues));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.HIVE_PARTITION_OPERATION_KEY, operation.name());
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.PARTITION_HDFS_PATH, hiveSpec.getPath().toString());
String gmceTopicPartition = tableTopicPartitionMap.get(tableNameJoiner.join(dbName, tableName));
if (gmceTopicPartition != null) {
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.HIVE_EVENT_GMCE_TOPIC_NAME, gmceTopicPartition.split("-")[0]);
}
return gobblinTrackingEvent;
}
@Override
public void close() throws IOException {
this.closer.close();
}
}
| 4,627 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/writer/MetadataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.writer;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.stream.RecordEnvelope;
/**
* This is the interface that is used to calculate and accumulate the desired metadata and register to the metadata store
*/
public interface MetadataWriter extends Closeable {
String CACHE_EXPIRING_TIME = "GMCEWriter.cache.expiring.time.hours";
int DEFAULT_CACHE_EXPIRING_TIME = 1;
/**
* Register the metadata of specific table to the metadata store. This is a blocking method,
* meaning once it returns, as long as the underlying metadata storage is transactional (e.g. Mysql as for HMS),
* one could expect the metadata registration going through and being persisted already.
*
* @param dbName The db name of metadata-registration target.
* @param tableName The table name of metadata-registration target.
* @throws IOException
*/
void flush(String dbName, String tableName) throws IOException;
/**
* If something wrong happens, we want to clean up in-memory state for the table inside the writer so that we can continue
* registration for this table without affect correctness
*
* @param dbName The db name of metadata-registration target.
* @param tableName The table name of metadata-registration target.
* @throws IOException
*/
void reset(String dbName, String tableName) throws IOException;
/**
* Compute and cache the metadata from the GMCE
* @param recordEnvelope Containing {@link GobblinMetadataChangeEvent}
* @param newSpecsMap The container (as a map) for new specs.
* @param oldSpecsMap The container (as a map) for old specs.
* @param tableSpec A sample table spec representing one instances among all path's {@link HiveSpec}
* @throws IOException
*/
void writeEnvelope(RecordEnvelope<GenericRecord> recordEnvelope, Map<String, Collection<HiveSpec>> newSpecsMap,
Map<String, Collection<HiveSpec>> oldSpecsMap, HiveSpec tableSpec) throws IOException;
}
| 4,628 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/writer/HiveMetadataWriterWithPartitionInfoException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.writer;
import java.io.IOException;
import java.util.Set;
public class HiveMetadataWriterWithPartitionInfoException extends IOException {
public Set<String> addedPartitionValues;
public Set<String> droppedPartitionValues;
HiveMetadataWriterWithPartitionInfoException(Set<String> addedPartitionValues, Set<String> droppedPartitionValues, Exception exception) {
super(exception);
this.addedPartitionValues = addedPartitionValues;
this.droppedPartitionValues = droppedPartitionValues;
}
}
| 4,629 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/avro/HiveAvroSerDeManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.avro;
import java.io.IOException;
import java.net.URI;
import org.apache.avro.Schema;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.codahale.metrics.Timer;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveSerDeManager;
import org.apache.gobblin.hive.HiveSerDeWrapper;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.HadoopUtils;
/**
* A {@link HiveSerDeManager} for registering Avro tables and partitions.
*
* @author Ziyang Liu
*/
@Slf4j
@Alpha
public class HiveAvroSerDeManager extends HiveSerDeManager {
public static final String SCHEMA_LITERAL = "avro.schema.literal";
public static final String SCHEMA_URL = "avro.schema.url";
public static final String USE_SCHEMA_FILE = "use.schema.file";
public static final boolean DEFAULT_USE_SCHEMA_FILE = false;
public static final String SCHEMA_FILE_NAME = "schema.file.name";
public static final String DEFAULT_SCHEMA_FILE_NAME = "_schema.avsc";
public static final String SCHEMA_TEMP_FILE_NAME = "schema.temp.file.name";
public static final String DEFAULT_SCHEMA_TEMP_FILE_NAME = "_schema_temp.avsc";
public static final String USE_SCHEMA_TEMP_FILE = "use.schema.temp.file";
public static final boolean DEFAULT_USE_SCHEMA_TEMP_FILE = false;
public static final String SCHEMA_LITERAL_LENGTH_LIMIT = "schema.literal.length.limit";
public static final int DEFAULT_SCHEMA_LITERAL_LENGTH_LIMIT = 4000;
public static final String HIVE_SPEC_SCHEMA_READING_TIMER = "hiveAvroSerdeManager.schemaReadTimer";
public static final String HIVE_SPEC_SCHEMA_WRITING_TIMER = "hiveAvroSerdeManager.schemaWriteTimer";
protected final FileSystem fs;
protected final boolean useSchemaFile;
protected final String schemaFileName;
protected final boolean useSchemaTempFile;
protected final String schemaTempFileName;
protected final int schemaLiteralLengthLimit;
protected final HiveSerDeWrapper serDeWrapper = HiveSerDeWrapper.get("AVRO");
private final MetricContext metricContext ;
public HiveAvroSerDeManager(State props) throws IOException {
super(props);
if (props.contains(HiveRegistrationPolicyBase.HIVE_FS_URI)) {
this.fs = FileSystem.get(URI.create(props.getProp(HiveRegistrationPolicyBase.HIVE_FS_URI)), HadoopUtils.getConfFromState(props));
} else {
this.fs = FileSystem.get(HadoopUtils.getConfFromState(props));
}
this.useSchemaFile = props.getPropAsBoolean(USE_SCHEMA_FILE, DEFAULT_USE_SCHEMA_FILE);
this.useSchemaTempFile = props.getPropAsBoolean(USE_SCHEMA_TEMP_FILE, DEFAULT_USE_SCHEMA_TEMP_FILE);
this.schemaFileName = props.getProp(SCHEMA_FILE_NAME, DEFAULT_SCHEMA_FILE_NAME);
this.schemaTempFileName = props.getProp(SCHEMA_TEMP_FILE_NAME, DEFAULT_SCHEMA_TEMP_FILE_NAME);
this.schemaLiteralLengthLimit =
props.getPropAsInt(SCHEMA_LITERAL_LENGTH_LIMIT, DEFAULT_SCHEMA_LITERAL_LENGTH_LIMIT);
this.metricContext = Instrumented.getMetricContext(props, HiveAvroSerDeManager.class);
}
/**
* Add an Avro {@link Schema} to the given {@link HiveRegistrationUnit}.
*
* <p>
* If {@link #USE_SCHEMA_FILE} is true, the schema will be added via {@link #SCHEMA_URL} pointing to
* the schema file named {@link #SCHEMA_FILE_NAME}.
* </p>
*
* <p>
* If {@link #USE_SCHEMA_FILE} is false, the schema will be obtained by {@link #getDirectorySchema(Path)}.
* If the length of the schema is less than {@link #SCHEMA_LITERAL_LENGTH_LIMIT}, it will be added via
* {@link #SCHEMA_LITERAL}. Otherwise, the schema will be written to {@link #SCHEMA_FILE_NAME} and added
* via {@link #SCHEMA_URL}.
* </p>
*/
@Override
public void addSerDeProperties(Path path, HiveRegistrationUnit hiveUnit) throws IOException {
Preconditions.checkArgument(this.fs.getFileStatus(path).isDirectory(), path + " is not a directory.");
Schema schema;
try (Timer.Context context = metricContext.timer(HIVE_SPEC_SCHEMA_READING_TIMER).time()) {
schema = getDirectorySchema(path);
}
if (schema == null) {
return;
}
hiveUnit.setSerDeType(this.serDeWrapper.getSerDe().getClass().getName());
hiveUnit.setInputFormat(this.serDeWrapper.getInputFormatClassName());
hiveUnit.setOutputFormat(this.serDeWrapper.getOutputFormatClassName());
addSchemaPropertiesIfRequired(path, hiveUnit, schema);
}
@Override
public void addSerDeProperties(HiveRegistrationUnit source, HiveRegistrationUnit target) throws IOException {
if (source.getSerDeType().isPresent()) {
target.setSerDeType(source.getSerDeType().get());
}
if (source.getInputFormat().isPresent()) {
target.setInputFormat(source.getInputFormat().get());
}
if (source.getOutputFormat().isPresent()) {
target.setOutputFormat(source.getOutputFormat().get());
}
if (source.getSerDeProps().contains(SCHEMA_LITERAL)) {
target.setSerDeProp(SCHEMA_LITERAL, source.getSerDeProps().getProp(SCHEMA_LITERAL));
}
if (source.getSerDeProps().contains(SCHEMA_URL)) {
target.setSerDeProp(SCHEMA_URL, source.getSerDeProps().getProp(SCHEMA_URL));
}
}
private void addSchemaPropertiesIfRequired(Path path, HiveRegistrationUnit hiveUnit, Schema schema) throws IOException {
if (hiveUnit.isRegisterSchema()) {
Path schemaFile = new Path(path, this.schemaFileName);
if (this.useSchemaFile) {
hiveUnit.setSerDeProp(SCHEMA_URL, schemaFile.toString());
} else {
try (Timer.Context context = metricContext.timer(HIVE_SPEC_SCHEMA_WRITING_TIMER).time()) {
addSchemaFromAvroFile(schema, schemaFile, hiveUnit);
}
}
}
}
/**
* Get schema for a directory using {@link AvroUtils#getDirectorySchema(Path, FileSystem, boolean)}.
*/
protected Schema getDirectorySchema(Path directory) throws IOException {
return AvroUtils.getDirectorySchema(directory, this.fs, true);
}
/**
* Add a {@link Schema} obtained from an Avro data file to the given {@link HiveRegistrationUnit}.
*
* <p>
* If the length of the schema is less than {@link #SCHEMA_LITERAL_LENGTH_LIMIT}, it will be added via
* {@link #SCHEMA_LITERAL}. Otherwise, the schema will be written to {@link #SCHEMA_FILE_NAME} and added
* via {@link #SCHEMA_URL}.
* </p>
*/
protected void addSchemaFromAvroFile(Schema schema, Path schemaFile, HiveRegistrationUnit hiveUnit)
throws IOException {
Preconditions.checkNotNull(schema);
String schemaStr = schema.toString();
if (schemaStr.length() <= this.schemaLiteralLengthLimit) {
hiveUnit.setSerDeProp(SCHEMA_LITERAL, schema.toString());
} else {
Path schemaTempFile = null;
if (useSchemaTempFile) {
schemaTempFile = new Path(schemaFile.getParent(), this.schemaTempFileName);
}
AvroUtils.writeSchemaToFile(schema, schemaFile, schemaTempFile, this.fs, true);
log.info("Using schema file " + schemaFile.toString());
hiveUnit.setSerDeProp(SCHEMA_URL, schemaFile.toString());
}
}
@Override
public void updateSchema(HiveRegistrationUnit existingUnit, HiveRegistrationUnit newUnit) throws IOException {
Preconditions.checkArgument(
newUnit.getSerDeProps().contains(SCHEMA_LITERAL) || newUnit.getSerDeProps().contains(SCHEMA_URL));
if (newUnit.getSerDeProps().contains(SCHEMA_LITERAL)) {
existingUnit.setSerDeProp(SCHEMA_LITERAL, newUnit.getSerDeProps().getProp(SCHEMA_LITERAL));
} else {
existingUnit.setSerDeProp(SCHEMA_URL, newUnit.getSerDeProps().getProp(SCHEMA_URL));
}
}
@Override
public boolean haveSameSchema(HiveRegistrationUnit unit1, HiveRegistrationUnit unit2) {
if (unit1.getSerDeProps().contains(HiveAvroSerDeManager.SCHEMA_LITERAL)
&& unit2.getSerDeProps().contains(HiveAvroSerDeManager.SCHEMA_LITERAL)) {
return unit1.getSerDeProps().getProp(HiveAvroSerDeManager.SCHEMA_LITERAL)
.equals(unit2.getSerDeProps().getProp(HiveAvroSerDeManager.SCHEMA_LITERAL));
} else if (unit1.getSerDeProps().contains(HiveAvroSerDeManager.SCHEMA_URL)
&& unit2.getSerDeProps().contains(HiveAvroSerDeManager.SCHEMA_URL)) {
return unit1.getSerDeProps().getProp(HiveAvroSerDeManager.SCHEMA_URL)
.equals(unit2.getSerDeProps().getProp(HiveAvroSerDeManager.SCHEMA_URL));
}
return false;
}
}
| 4,630 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/orc/HiveOrcSerDeManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.orc;
import com.google.common.base.Strings;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.avro.Schema;
import org.apache.gobblin.util.orc.AvroOrcSchemaConverter;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.ql.io.orc.OrcFile;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.TypeDescriptionToObjectInspectorUtil;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import com.codahale.metrics.Timer;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveSerDeManager;
import org.apache.gobblin.hive.HiveSerDeWrapper;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.orc.TypeDescription;
/**
* A derived class of {@link org.apache.gobblin.hive.HiveSerDeManager} that is mainly responsible for adding schema
* information into {@link HiveRegistrationUnit#serDeProps}, based on the format of the data.
*/
@Slf4j
public class HiveOrcSerDeManager extends HiveSerDeManager {
// Extensions of files containing ORC data
public static final String FILE_EXTENSIONS_KEY = "hiveOrcSerdeManager.fileExtensions";
public static final String DEFAULT_FILE_EXTENSIONS = ".orc";
// Files with these prefixes are ignored when finding the latest schema
public static final String IGNORED_FILE_PREFIXES_KEY = "hiveOrcSerdeManager.ignoredPrefixes";
public static final String DEFAULT_IGNORED_FILE_PREFIXES = "_,.";
// The serde type
public static final String SERDE_TYPE_KEY = "hiveOrcSerdeManager.serdeType";
public static final String DEFAULT_SERDE_TYPE = "ORC";
public static final String INPUT_FORMAT_CLASS_KEY = "hiveOrcSerdeManager.inputFormatClass";
public static final String DEFAULT_INPUT_FORMAT_CLASS = OrcInputFormat.class.getName();
public static final String OUTPUT_FORMAT_CLASS_KEY = "hiveOrcSerdeManager.outputFormatClass";
public static final String DEFAULT_OUTPUT_FORMAT_CLASS = OrcOutputFormat.class.getName();
public static final String HIVE_SPEC_SCHEMA_READING_TIMER = "hiveOrcSerdeManager.schemaReadTimer";
public static final String ENABLED_ORC_TYPE_CHECK = "hiveOrcSerdeManager.enableFormatCheck";
public static final boolean DEFAULT_ENABLED_ORC_TYPE_CHECK = false;
private static final int EXPECTED_FOOTER_SIZE = 16 * 1024;
private static final String ORC_FORMAT = "ORC";
private static final ByteBuffer MAGIC_BUFFER = ByteBuffer.wrap(ORC_FORMAT.getBytes(Charsets.UTF_8));
private final FileSystem fs;
private final HiveSerDeWrapper serDeWrapper;
private final List<String> fileExtensions;
private final List<String> ignoredFilePrefixes;
private final boolean checkOrcFormat;
private final MetricContext metricContext;
public HiveOrcSerDeManager(State props)
throws IOException {
super(props);
this.fs = FileSystem.get(HadoopUtils.getConfFromState(props));
List<String> extensions = props.getPropAsList(FILE_EXTENSIONS_KEY, DEFAULT_FILE_EXTENSIONS);
this.fileExtensions = extensions.isEmpty() ? ImmutableList.of("") : extensions;
this.ignoredFilePrefixes = props.getPropAsList(IGNORED_FILE_PREFIXES_KEY, DEFAULT_IGNORED_FILE_PREFIXES);
this.checkOrcFormat = props.getPropAsBoolean(ENABLED_ORC_TYPE_CHECK, DEFAULT_ENABLED_ORC_TYPE_CHECK);
this.metricContext = Instrumented.getMetricContext(props, HiveOrcSerDeManager.class);
this.serDeWrapper = HiveSerDeWrapper.get(props.getProp(SERDE_TYPE_KEY, DEFAULT_SERDE_TYPE),
Optional.of(props.getProp(INPUT_FORMAT_CLASS_KEY, DEFAULT_INPUT_FORMAT_CLASS)),
Optional.of(props.getProp(OUTPUT_FORMAT_CLASS_KEY, DEFAULT_OUTPUT_FORMAT_CLASS)));
}
@Override
//Using LIST_COLUMNS and LIST_COLUMN_TYPES to compare schema
public boolean haveSameSchema(HiveRegistrationUnit unit1, HiveRegistrationUnit unit2)
throws IOException {
if (unit1.getSerDeProps().contains(serdeConstants.LIST_COLUMNS) && unit2.getSerDeProps().contains(serdeConstants.LIST_COLUMNS)
&& unit1.getSerDeProps().contains(serdeConstants.LIST_COLUMN_TYPES) && unit2.getSerDeProps().contains(serdeConstants.LIST_COLUMN_TYPES)) {
return unit1.getSerDeProps().getProp(serdeConstants.LIST_COLUMNS).equals(unit2.getSerDeProps().getProp(serdeConstants.LIST_COLUMNS))
&& unit1.getSerDeProps().getProp(serdeConstants.LIST_COLUMN_TYPES).equals(unit2.getSerDeProps().getProp(serdeConstants.LIST_COLUMN_TYPES));
} else {
return false;
}
}
/**
* Add ORC SerDe attributes into HiveUnit
*
* @param path
* @param hiveUnit
* @throws IOException
*/
@Override
public void addSerDeProperties(Path path, HiveRegistrationUnit hiveUnit)
throws IOException {
hiveUnit.setSerDeType(this.serDeWrapper.getSerDe().getClass().getName());
hiveUnit.setInputFormat(this.serDeWrapper.getInputFormatClassName());
hiveUnit.setOutputFormat(this.serDeWrapper.getOutputFormatClassName());
addSchemaProperties(path, hiveUnit);
}
@Override
public void addSerDeProperties(HiveRegistrationUnit source, HiveRegistrationUnit target)
throws IOException {
if (source.getSerDeType().isPresent()) {
target.setSerDeType(source.getSerDeType().get());
}
if (source.getInputFormat().isPresent()) {
target.setInputFormat(source.getInputFormat().get());
}
if (source.getOutputFormat().isPresent()) {
target.setOutputFormat(source.getOutputFormat().get());
}
}
@Override
public void updateSchema(HiveRegistrationUnit existingUnit, HiveRegistrationUnit newUnit)
throws IOException {
Preconditions.checkArgument(
newUnit.getSerDeProps().contains(serdeConstants.LIST_COLUMNS));
Preconditions.checkArgument(
newUnit.getSerDeProps().contains(serdeConstants.LIST_COLUMN_TYPES));
existingUnit.setSerDeProp(serdeConstants.LIST_COLUMNS, newUnit.getSerDeProps().getProp(serdeConstants.LIST_COLUMNS));
existingUnit.setSerDeProp(serdeConstants.LIST_COLUMN_TYPES, newUnit.getSerDeProps().getProp(serdeConstants.LIST_COLUMN_TYPES));
}
/**
* Get the schema as a TypeInfo object
* @param path path that contains the ORC files
* @param fs {@link FileSystem}
* @return {@link TypeInfo} with the schema information
* @throws IOException
*/
public TypeInfo getSchemaFromLatestFile(Path path, FileSystem fs)
throws IOException {
if (fs.isDirectory(path)) {
List<FileStatus> files = Arrays.asList(fs.listStatus(path, new PathFilter() {
@Override
public boolean accept(Path path) {
try {
return ignoredFilePrefixes.stream().noneMatch(e -> path.getName().startsWith(e))
&& fileExtensions.stream().anyMatch(e -> path.getName().endsWith(e))
&& (!checkOrcFormat || isORC(path, fs));
} catch(IOException e) {
log.error("Error checking file for schema retrieval", e);
return false;
}
}
}));
if (files.size() > 0) {
Collections.sort((files), FileListUtils.LATEST_MOD_TIME_ORDER);
} else {
throw new FileNotFoundException("No files in Dataset:" + path + " found for schema retrieval");
}
return getSchemaFromLatestFile(files.get(0).getPath(), fs);
} else {
return TypeInfoUtils.getTypeInfoFromObjectInspector(OrcFile.createReader(fs, path).getObjectInspector());
}
}
/**
* Determine if a file is ORC format.
* Steal ideas & code from presto/OrcReader under Apache License 2.0.
*
* Note: This operation is pretty expensive when it comes to checking magicBytes for each file while listing,
* as itself require getFileStatus and open the file. In normal cases, consider disable it if the confidene level
* of format consistency is high enough.
*/
private static boolean isORC(Path file, FileSystem fs)
throws IOException {
try {
FSDataInputStream inputStream = fs.open(file);
long size = fs.getFileStatus(file).getLen();
byte[] buffer = new byte[Math.toIntExact(Math.min(size, EXPECTED_FOOTER_SIZE))];
if (size < buffer.length) {
return false;
}
inputStream.readFully(size - buffer.length, buffer);
// get length of PostScript - last byte of the file
int postScriptSize = buffer[buffer.length - 1] & 0xff;
int magicLen = MAGIC_BUFFER.remaining();
if (postScriptSize < magicLen + 1 || postScriptSize >= buffer.length) {
return false;
}
if (!MAGIC_BUFFER.equals(ByteBuffer.wrap(buffer, buffer.length - 1 - magicLen, magicLen))) {
// Old versions of ORC (0.11) wrote the magic to the head of the file
byte[] headerMagic = new byte[magicLen];
inputStream.readFully(0, headerMagic);
// if it isn't there, this isn't an ORC file
if (!MAGIC_BUFFER.equals(ByteBuffer.wrap(headerMagic))) {
return false;
}
}
return true;
} catch (Exception e) {
throw new RuntimeException("Error occured when checking the type of file:" + file);
}
}
private void addSchemaProperties(Path path, HiveRegistrationUnit hiveUnit)
throws IOException {
try (Timer.Context context = metricContext.timer(HIVE_SPEC_SCHEMA_READING_TIMER).time()) {
addSchemaPropertiesHelper(path, hiveUnit);
}
}
/**
* Extensible if there's other source-of-truth for fetching schema instead of interacting with HDFS.
*
* For purpose of initializing {@link org.apache.hadoop.hive.ql.io.orc.OrcSerde} object, it will require:
* org.apache.hadoop.hive.serde.serdeConstants#LIST_COLUMNS and
* org.apache.hadoop.hive.serde.serdeConstants#LIST_COLUMN_TYPES
*
*/
protected void addSchemaPropertiesHelper(Path path, HiveRegistrationUnit hiveUnit) throws IOException {
TypeInfo schema;
String schemaString = hiveUnit.getSerDeProps()
.getProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(),
props.getProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()));
if (!Strings.isNullOrEmpty(schemaString)) {
Schema avroSchema =
new Schema.Parser().parse(schemaString);
TypeDescription orcSchema = AvroOrcSchemaConverter.getOrcSchema(avroSchema);
schema = TypeInfoUtils.getTypeInfoFromObjectInspector(
TypeDescriptionToObjectInspectorUtil.getObjectInspector(orcSchema));
} else {
Preconditions.checkArgument(this.fs.getFileStatus(path).isDirectory(), path + " is not a directory.");
schema = getSchemaFromLatestFile(path, this.fs);
}
if (schema instanceof StructTypeInfo) {
StructTypeInfo structTypeInfo = (StructTypeInfo) schema;
hiveUnit.setSerDeProp(serdeConstants.LIST_COLUMNS, Joiner.on(",").join(structTypeInfo.getAllStructFieldNames()));
hiveUnit.setSerDeProp(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(",")
.join(structTypeInfo.getAllStructFieldTypeInfos()
.stream()
.map(x -> x.getTypeName())
.collect(Collectors.toList())));
} else {
// Hive always uses a struct with a field for each of the top-level columns as the root object type.
// So for here we assume to-be-registered ORC files follow this pattern.
throw new IllegalStateException("A valid ORC schema should be an instance of struct");
}
}
}
| 4,631 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/metastore/HiveMetaStoreUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.metastore;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Stream;
import org.apache.avro.Schema;
import org.apache.avro.SchemaParseException;
import org.apache.commons.lang.reflect.MethodUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.SerDeUtils;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.orc.TypeDescription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.primitives.Ints;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveConfFactory;
import org.apache.gobblin.hive.HiveConstants;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveRegistrationUnit.Column;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.SharedHiveConfKey;
import org.apache.gobblin.hive.avro.HiveAvroSerDeManager;
import org.apache.gobblin.hive.spec.HiveSpec;
/**
* A utility class for converting Hive's {@link Table} and {@link Partition} objects into Gobblin's
* {@link HiveTable} and {@link HivePartition} objects, and vice versa.
*
* @author Ziyang Liu
*/
@Alpha
public class HiveMetaStoreUtils {
private static final Logger LOG = LoggerFactory.getLogger(HiveMetaStoreUtils.class);
private static final TableType DEFAULT_TABLE_TYPE = TableType.EXTERNAL_TABLE;
private static final Splitter LIST_SPLITTER_COMMA = Splitter.on(",").trimResults().omitEmptyStrings();
private static final Splitter LIST_SPLITTER_COLON = Splitter.on(":").trimResults().omitEmptyStrings();
private static final String EXTERNAL = "EXTERNAL";
public static final String RUNTIME_PROPS = "runtime.props";
private HiveMetaStoreUtils() {
}
/**
* Convert a {@link HiveTable} into a {@link Table}.
*/
public static Table getTable(HiveTable hiveTable) {
State props = hiveTable.getProps();
Table table = new Table();
table.setDbName(hiveTable.getDbName());
table.setTableName(hiveTable.getTableName());
table.setParameters(getParameters(props));
if (hiveTable.getCreateTime().isPresent()) {
table.setCreateTime(Ints.checkedCast(hiveTable.getCreateTime().get()));
}
if (hiveTable.getLastAccessTime().isPresent()) {
table.setLastAccessTime(Ints.checkedCast(hiveTable.getLastAccessTime().get()));
}
if (hiveTable.getOwner().isPresent()) {
table.setOwner(hiveTable.getOwner().get());
}
if (hiveTable.getRetention().isPresent()) {
table.setRetention(Ints.checkedCast(hiveTable.getRetention().get()));
}
if (hiveTable.getTableType().isPresent()) {
table.setTableType(hiveTable.getTableType().get());
} else {
table.setTableType(DEFAULT_TABLE_TYPE.toString());
}
if (table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
table.getParameters().put(EXTERNAL, Boolean.TRUE.toString().toUpperCase());
}
table.setPartitionKeys(getFieldSchemas(hiveTable.getPartitionKeys()));
table.setSd(getStorageDescriptor(hiveTable));
return table;
}
/**
* Convert a {@link Table} into a {@link HiveTable}.
*/
public static HiveTable getHiveTable(Table table) {
State tableProps = getTableProps(table);
State storageProps = getStorageProps(table.getSd());
State serDeProps = getSerDeProps(table.getSd().getSerdeInfo());
HiveTable hiveTable = new HiveTable.Builder().withDbName(table.getDbName()).withTableName(table.getTableName())
.withPartitionKeys(getColumns(table.getPartitionKeys())).withProps(tableProps).withStorageProps(storageProps)
.withSerdeProps(serDeProps).build();
if (table.getCreateTime() > 0) {
hiveTable.setCreateTime(table.getCreateTime());
}
if (table.getSd().getCols() != null) {
hiveTable.setColumns(getColumns(table.getSd().getCols()));
}
if (table.getSd().getBucketCols() != null) {
hiveTable.setBucketColumns(table.getSd().getBucketCols());
}
return hiveTable;
}
/**
* Hive does not use '-' or '.' in the table name, so they are replaced with '_'
* @param topic
* @return
*/
public static String getHiveTableName(String topic) {
return topic.replaceAll("[-.]", "_");
}
/**
* Convert a {@link HivePartition} into a {@link Partition}.
*/
public static Partition getPartition(HivePartition hivePartition) {
State props = hivePartition.getProps();
Partition partition = new Partition();
partition.setDbName(hivePartition.getDbName());
partition.setTableName(hivePartition.getTableName());
partition.setValues(hivePartition.getValues());
partition.setParameters(getParameters(props));
if (hivePartition.getCreateTime().isPresent()) {
partition.setCreateTime(Ints.checkedCast(hivePartition.getCreateTime().get()));
} else if (props.contains(HiveConstants.CREATE_TIME)) {
partition.setCreateTime(props.getPropAsInt(HiveConstants.CREATE_TIME));
}
if (props.contains(HiveConstants.LAST_ACCESS_TIME)) {
partition.setLastAccessTime(props.getPropAsInt(HiveConstants.LAST_ACCESS_TIME));
}
partition.setSd(getStorageDescriptor(hivePartition));
return partition;
}
/**
* Convert a {@link Partition} into a {@link HivePartition}.
*/
public static HivePartition getHivePartition(Partition partition) {
State partitionProps = getPartitionProps(partition);
State storageProps = getStorageProps(partition.getSd());
State serDeProps = getSerDeProps(partition.getSd().getSerdeInfo());
HivePartition hivePartition =
new HivePartition.Builder().withDbName(partition.getDbName()).withTableName(partition.getTableName())
.withPartitionValues(partition.getValues()).withProps(partitionProps).withStorageProps(storageProps)
.withSerdeProps(serDeProps).build();
if (partition.getCreateTime() > 0) {
hivePartition.setCreateTime(partition.getCreateTime());
}
if (partition.getSd().getCols() != null) {
hivePartition.setColumns(getColumns(partition.getSd().getCols()));
}
if (partition.getSd().getBucketCols() != null) {
hivePartition.setBucketColumns(partition.getSd().getBucketCols());
}
return hivePartition;
}
public static Map<String, String> getParameters(State props) {
Map<String, String> parameters = Maps.newHashMap();
if (props.contains(RUNTIME_PROPS)) {
String runtimePropsString = props.getProp(RUNTIME_PROPS);
for (String propValue : LIST_SPLITTER_COMMA.splitToList(runtimePropsString)) {
List<String> tokens = LIST_SPLITTER_COLON.splitToList(propValue);
Preconditions.checkState(tokens.size() == 2,
propValue + " is not a valid Hive table/partition property");
parameters.put(tokens.get(0), tokens.get(1));
}
}
for (String propKey : props.getPropertyNames()) {
if (!propKey.equals(RUNTIME_PROPS)) {
parameters.put(propKey, props.getProp(propKey));
}
}
return parameters;
}
public static boolean isNonAvroFormat(HiveRegistrationUnit unit) {
return unit.getInputFormat().isPresent() && !unit.getInputFormat().get().equals(AvroContainerInputFormat.class.getName());
}
public static StorageDescriptor getStorageDescriptor(HiveRegistrationUnit unit) {
State props = unit.getStorageProps();
StorageDescriptor sd = new StorageDescriptor();
sd.setParameters(getParameters(props));
//Treat AVRO and other formats differently. Details can be found in GOBBLIN-877
if (unit.isRegisterSchema() || isNonAvroFormat(unit)) {
sd.setCols(getFieldSchemas(unit));
}
if (unit.getLocation().isPresent()) {
sd.setLocation(unit.getLocation().get());
}
if (unit.getInputFormat().isPresent()) {
sd.setInputFormat(unit.getInputFormat().get());
}
if (unit.getOutputFormat().isPresent()) {
sd.setOutputFormat(unit.getOutputFormat().get());
}
if (unit.getIsCompressed().isPresent()) {
sd.setCompressed(unit.getIsCompressed().get());
}
if (unit.getNumBuckets().isPresent()) {
sd.setNumBuckets(unit.getNumBuckets().get());
}
if (unit.getBucketColumns().isPresent()) {
sd.setBucketCols(unit.getBucketColumns().get());
}
if (unit.getIsStoredAsSubDirs().isPresent()) {
sd.setStoredAsSubDirectories(unit.getIsStoredAsSubDirs().get());
}
sd.setSerdeInfo(getSerDeInfo(unit));
return sd;
}
public static SerDeInfo getSerDeInfo(HiveRegistrationUnit unit) {
State props = unit.getSerDeProps();
SerDeInfo si = new SerDeInfo();
si.setParameters(getParameters(props));
si.setName(unit.getTableName());
if (unit.getSerDeType().isPresent()) {
si.setSerializationLib(unit.getSerDeType().get());
}
return si;
}
public static boolean containsNonOptionalUnionTypeColumn(Table t) {
return containsNonOptionalUnionTypeColumn(getHiveTable(t));
}
/**
* Util for detecting if a hive table has a non-optional union (aka complex unions) column types. A non optional
* union is defined as a uniontype with n >= 2 non-null subtypes
*
* @param hiveTable Hive table with either avro.schema.literal set or is an ORC table
* @return if hive table contains non-optional uniontype columns
*/
public static boolean containsNonOptionalUnionTypeColumn(HiveTable hiveTable) {
if (hiveTable.getProps().contains(HiveAvroSerDeManager.SCHEMA_LITERAL)) {
Schema.Parser parser = new Schema.Parser();
Schema schema = parser.parse(hiveTable.getProps().getProp(HiveAvroSerDeManager.SCHEMA_LITERAL));
return isNonOptionalUnion(schema);
}
if (isNonAvroFormat(hiveTable)) {
return hiveTable.getColumns().stream()
.map(HiveRegistrationUnit.Column::getType)
.filter(type -> type.contains("uniontype"))
.map(type -> TypeDescription.fromString(type))
.anyMatch(type -> isNonOptionalUnion(type));
}
throw new RuntimeException("Avro based Hive tables without \"" + HiveAvroSerDeManager.SCHEMA_LITERAL +"\" are not supported. "
+ "hiveTable=" + hiveTable.getDbName() + "." + hiveTable.getTableName());
}
/**
* Detects if an Avro schema contains a non-optional union. A non optional (aka complex)
* union is defined as a uniontype with n >= 2 non-null subtypes
* @param schema Avro Schema
* @return if schema contains non optional union
*/
public static boolean isNonOptionalUnion(Schema schema) {
switch (schema.getType()) {
case UNION:
Stream<Schema.Type> nonNullSubTypes = schema.getTypes().stream()
.map(Schema::getType).filter(t -> !t.equals(Schema.Type.NULL));
if (nonNullSubTypes.count() >= 2) {
return true;
}
return schema.getTypes().stream().anyMatch(s -> isNonOptionalUnion(s));
case MAP: // key is a string and doesn't need to be checked
return isNonOptionalUnion(schema.getValueType());
case ARRAY:
return isNonOptionalUnion(schema.getElementType());
case RECORD:
return schema.getFields().stream().map(Schema.Field::schema).anyMatch(s -> isNonOptionalUnion(s));
default:
return false;
}
}
/**
* Detects if an ORC column data type contains a non-optional union. A non optional (aka complex)
* union is defined as a UNION with n >= 2 non-null subtypes
* @param description ORC type description
* @return if the ORC data type contains a non optional union type
*/
public static boolean isNonOptionalUnion(TypeDescription description) {
switch (description.getCategory()) {
case UNION:
if (description.getChildren().size() >= 2) {
return true;
}
case MAP:
case LIST:
case STRUCT:
return description.getChildren()
.stream().anyMatch(st -> isNonOptionalUnion(st));
default:
return false;
}
}
public static State getTableProps(Table table) {
State tableProps = new State();
for (Map.Entry<String, String> entry : table.getParameters().entrySet()) {
tableProps.setProp(entry.getKey(), entry.getValue());
}
if (table.isSetCreateTime()) {
tableProps.setProp(HiveConstants.CREATE_TIME, table.getCreateTime());
}
if (table.isSetLastAccessTime()) {
tableProps.setProp(HiveConstants.LAST_ACCESS_TIME, table.getCreateTime());
}
if (table.isSetOwner()) {
tableProps.setProp(HiveConstants.OWNER, table.getOwner());
}
if (table.isSetTableType()) {
tableProps.setProp(HiveConstants.TABLE_TYPE, table.getTableType());
}
if (table.isSetRetention()) {
tableProps.setProp(HiveConstants.RETENTION, table.getRetention());
}
return tableProps;
}
private static State getPartitionProps(Partition partition) {
State partitionProps = new State();
for (Map.Entry<String, String> entry : partition.getParameters().entrySet()) {
partitionProps.setProp(entry.getKey(), entry.getValue());
}
if (partition.isSetCreateTime()) {
partitionProps.setProp(HiveConstants.CREATE_TIME, partition.getCreateTime());
}
if (partition.isSetLastAccessTime()) {
partitionProps.setProp(HiveConstants.LAST_ACCESS_TIME, partition.getCreateTime());
}
return partitionProps;
}
private static State getStorageProps(StorageDescriptor sd) {
State storageProps = new State();
for (Map.Entry<String, String> entry : sd.getParameters().entrySet()) {
storageProps.setProp(entry.getKey(), entry.getValue());
}
if (sd.isSetLocation()) {
storageProps.setProp(HiveConstants.LOCATION, sd.getLocation());
}
if (sd.isSetInputFormat()) {
storageProps.setProp(HiveConstants.INPUT_FORMAT, sd.getInputFormat());
}
if (sd.isSetOutputFormat()) {
storageProps.setProp(HiveConstants.OUTPUT_FORMAT, sd.getOutputFormat());
}
if (sd.isSetCompressed()) {
storageProps.setProp(HiveConstants.COMPRESSED, sd.isCompressed());
}
if (sd.isSetNumBuckets()) {
storageProps.setProp(HiveConstants.NUM_BUCKETS, sd.getNumBuckets());
}
if (sd.isSetBucketCols()) {
for (String bucketColumn : sd.getBucketCols()) {
storageProps.appendToListProp(HiveConstants.BUCKET_COLUMNS, bucketColumn);
}
}
if (sd.isSetStoredAsSubDirectories()) {
storageProps.setProp(HiveConstants.STORED_AS_SUB_DIRS, sd.isStoredAsSubDirectories());
}
return storageProps;
}
private static State getSerDeProps(SerDeInfo si) {
State serDeProps = new State();
for (Map.Entry<String, String> entry : si.getParameters().entrySet()) {
serDeProps.setProp(entry.getKey(), entry.getValue());
}
if (si.isSetSerializationLib()) {
serDeProps.setProp(HiveConstants.SERDE_TYPE, si.getSerializationLib());
}
return serDeProps;
}
private static List<Column> getColumns(List<FieldSchema> fieldSchemas) {
List<Column> columns = Lists.newArrayListWithCapacity(fieldSchemas.size());
for (FieldSchema fieldSchema : fieldSchemas) {
columns.add(new Column(fieldSchema.getName(), fieldSchema.getType(), fieldSchema.getComment()));
}
return columns;
}
private static List<FieldSchema> getFieldSchemas(List<Column> columns) {
List<FieldSchema> fieldSchemas = Lists.newArrayListWithCapacity(columns.size());
for (Column column : columns) {
fieldSchemas.add(new FieldSchema(column.getName(), column.getType(), column.getComment()));
}
return fieldSchemas;
}
/**
* First tries getting the {@code FieldSchema}s from the {@code HiveRegistrationUnit}'s columns, if set.
* Else, gets the {@code FieldSchema}s from the deserializer.
*/
private static List<FieldSchema> getFieldSchemas(HiveRegistrationUnit unit) {
List<Column> columns = unit.getColumns();
List<FieldSchema> fieldSchemas = new ArrayList<>();
if (columns != null && columns.size() > 0) {
fieldSchemas = getFieldSchemas(columns);
} else {
Deserializer deserializer = getDeserializer(unit);
if (deserializer != null) {
try {
fieldSchemas = MetaStoreUtils.getFieldsFromDeserializer(unit.getTableName(), deserializer);
} catch (SerDeException | MetaException e) {
LOG.warn("Encountered exception while getting fields from deserializer.", e);
}
}
}
return fieldSchemas;
}
/**
* Returns a Deserializer from HiveRegistrationUnit if present and successfully initialized. Else returns null.
*/
private static Deserializer getDeserializer(HiveRegistrationUnit unit) {
Optional<String> serdeClass = unit.getSerDeType();
if (!serdeClass.isPresent()) {
return null;
}
String serde = serdeClass.get();
HiveConf hiveConf;
Deserializer deserializer;
try {
hiveConf = SharedResourcesBrokerFactory
.getImplicitBroker().getSharedResource(new HiveConfFactory<>(), SharedHiveConfKey.INSTANCE);
deserializer =
ReflectionUtils.newInstance(hiveConf.getClassByName(serde).asSubclass(Deserializer.class), hiveConf);
} catch (ClassNotFoundException e) {
LOG.warn("Serde class " + serde + " not found!", e);
return null;
} catch (NotConfiguredException nce) {
LOG.error("Implicit broker is not configured properly", nce);
return null;
}
Properties props = new Properties();
props.putAll(unit.getProps().getProperties());
props.putAll(unit.getStorageProps().getProperties());
props.putAll(unit.getSerDeProps().getProperties());
try {
SerDeUtils.initializeSerDe(deserializer, hiveConf, props, null);
// Temporary check that's needed until Gobblin is upgraded to Hive 1.1.0+, which includes the improved error
// handling in AvroSerDe added in HIVE-7868.
if (deserializer instanceof AvroSerDe) {
try {
inVokeDetermineSchemaOrThrowExceptionMethod(props, new Configuration());
} catch (SchemaParseException | InvocationTargetException | NoSuchMethodException | IllegalAccessException e) {
LOG.warn("Failed to initialize AvroSerDe.");
throw new SerDeException(e);
}
}
} catch (SerDeException e) {
LOG.warn("Failed to initialize serde " + serde + " with properties " + props + " for table " + unit.getDbName() +
"." + unit.getTableName());
return null;
}
return deserializer;
}
public static void updateColumnsInfoIfNeeded(HiveSpec spec) throws IOException {
HiveTable table = spec.getTable();
if (table.getSerDeProps().contains(serdeConstants.LIST_COLUMNS)) {
if (table.getSerDeManager().isPresent()) {
String path = spec.getPartition().isPresent() && spec.getPartition().get().getLocation().isPresent() ? spec.getPartition()
.get()
.getLocation()
.get() : spec.getTable().getLocation().get();
table.getSerDeManager().get().addSerDeProperties(new Path(path), table);
}
}
}
@VisibleForTesting
protected static void inVokeDetermineSchemaOrThrowExceptionMethod(Properties props, Configuration conf)
throws NoSuchMethodException, IllegalAccessException, InvocationTargetException {
String methodName = "determineSchemaOrThrowException";
Method method = MethodUtils.getAccessibleMethod(AvroSerdeUtils.class, methodName, Properties.class);
boolean withConf = false;
if (method == null) {
method = MethodUtils
.getAccessibleMethod(AvroSerdeUtils.class, methodName, new Class[]{Configuration.class, Properties.class});
withConf = true;
}
Preconditions.checkNotNull(method, "Cannot find matching " + methodName);
if (!withConf) {
MethodUtils.invokeStaticMethod(AvroSerdeUtils.class, methodName, props);
} else {
MethodUtils.invokeStaticMethod(AvroSerdeUtils.class, methodName, new Object[]{conf, props});
}
}
}
| 4,632 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/metastore/HiveMetaStoreBasedRegister.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.metastore;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import org.apache.avro.Schema;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.apache.thrift.TException;
import org.joda.time.DateTime;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.primitives.Ints;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.AutoCloseableHiveLock;
import org.apache.gobblin.hive.HiveLock;
import org.apache.gobblin.hive.HiveMetaStoreClientFactory;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveRegProps;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.HiveRegistrationUnit.Column;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.GobblinMetricsRegistry;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.AvroUtils;
/**
* An implementation of {@link HiveRegister} that uses {@link IMetaStoreClient} for Hive registration.
*
* <p>
* An instance of this class is constructed with a {@link State} object or obtained via
* {@link HiveRegister#get(State)}. Property {@link HiveRegProps#HIVE_DB_ROOT_DIR} is required for registering
* a table or a partition if the database does not exist.
* </p>
*
* <p>
* The {@link #register(HiveSpec)} method is asynchronous and returns immediately. Registration is performed in a
* thread pool whose size is controlled by {@link HiveRegProps#HIVE_REGISTER_THREADS}.
* </p>
*
* @author Ziyang Liu
*/
@Slf4j
@Alpha
public class HiveMetaStoreBasedRegister extends HiveRegister {
public static final String HIVE_REGISTER_METRICS_PREFIX = "hiveRegister.";
public static final String ADD_PARTITION_TIMER = HIVE_REGISTER_METRICS_PREFIX + "addPartitionTimerTimer";
public static final String SCHEMA_SOURCE_DB = HIVE_REGISTER_METRICS_PREFIX + "schema.source.dbName";
public static final String GET_HIVE_PARTITION = HIVE_REGISTER_METRICS_PREFIX + "getPartitionTimer";
public static final String ALTER_PARTITION = HIVE_REGISTER_METRICS_PREFIX + "alterPartitionTimer";
public static final String TABLE_EXISTS = HIVE_REGISTER_METRICS_PREFIX + "tableExistsTimer";
public static final String ALTER_TABLE = HIVE_REGISTER_METRICS_PREFIX + "alterTableTimer";
public static final String GET_HIVE_DATABASE = HIVE_REGISTER_METRICS_PREFIX + "getDatabaseTimer";
public static final String CREATE_HIVE_DATABASE = HIVE_REGISTER_METRICS_PREFIX + "createDatabaseTimer";
public static final String CREATE_HIVE_TABLE = HIVE_REGISTER_METRICS_PREFIX + "createTableTimer";
public static final String GET_HIVE_TABLE = HIVE_REGISTER_METRICS_PREFIX + "getTableTimer";
public static final String GET_SCHEMA_SOURCE_HIVE_TABLE = HIVE_REGISTER_METRICS_PREFIX + "getSchemaSourceTableTimer";
public static final String GET_AND_SET_LATEST_SCHEMA = HIVE_REGISTER_METRICS_PREFIX + "getAndSetLatestSchemaTimer";
public static final String DROP_TABLE = HIVE_REGISTER_METRICS_PREFIX + "dropTableTimer";
public static final String PATH_REGISTER_TIMER = HIVE_REGISTER_METRICS_PREFIX + "pathRegisterTimer";
public static final String SKIP_PARTITION_DIFF_COMPUTATION = HIVE_REGISTER_METRICS_PREFIX + "skip.partition.diff.computation";
public static final String FETCH_LATEST_SCHEMA = HIVE_REGISTER_METRICS_PREFIX + "fetchLatestSchemaFromSchemaRegistry";
//A config which when enabled checks for the existence of a partition in Hive before adding the partition.
// This is done to minimize the add_partition calls sent to Hive.
public static final String REGISTER_PARTITION_WITH_PULL_MODE = HIVE_REGISTER_METRICS_PREFIX + "registerPartitionWithPullMode";
/**
* To reduce lock aquisition and RPC to metaStoreClient, we cache the result of query regarding to
* the existence of databases and tables in {@link #tableAndDbExistenceCache},
* so that for databases/tables existed in cache, a RPC for query the existence can be saved.
*
* We make this optimization configurable by setting {@link #OPTIMIZED_CHECK_ENABLED} to be true.
*/
public static final String OPTIMIZED_CHECK_ENABLED = "hiveRegister.cacheDbTableExistence";
private final HiveMetastoreClientPool clientPool;
private final HiveLock locks;
private final EventSubmitter eventSubmitter;
private final MetricContext metricContext;
private final boolean shouldUpdateLatestSchema;
private final boolean registerPartitionWithPullMode;
/**
* Local cache that contains records for both databases and tables.
* To distinguish tables with the same name but in different databases,
* use the <databaseName>:<tableName> as the key.
*
* The value(true/false) in cache doesn't really matter, the existence of entry in cache guarantee the table is existed on hive.
* The value in k-v pair in cache indicates:
* when the first time a table/database is loaded into the cache, whether they existed on the remote hiveMetaStore side.
*/
CacheLoader<String, Boolean> cacheLoader = new CacheLoader<String, Boolean>() {
@Override
public Boolean load(String key) throws Exception {
return true;
}
};
Cache<String, Boolean> tableAndDbExistenceCache = CacheBuilder.newBuilder().build(cacheLoader);
private final boolean optimizedChecks;
private final State state;
//If this is true, after we know the partition is existing, we will skip the partition in stead of getting the existing
// partition and computing the diff to see if it needs to be updated. Use this only when you can make sure the metadata
//for a partition is immutable
private final boolean skipDiffComputation;
@VisibleForTesting
protected Optional<KafkaSchemaRegistry> schemaRegistry = Optional.absent();
private String topicName = "";
public HiveMetaStoreBasedRegister(State state, Optional<String> metastoreURI) throws IOException {
super(state);
this.state = state;
this.locks = new HiveLock(state.getProperties());
this.optimizedChecks = state.getPropAsBoolean(OPTIMIZED_CHECK_ENABLED, true);
this.skipDiffComputation = state.getPropAsBoolean(SKIP_PARTITION_DIFF_COMPUTATION, false);
this.shouldUpdateLatestSchema = state.getPropAsBoolean(FETCH_LATEST_SCHEMA, false);
this.registerPartitionWithPullMode = state.getPropAsBoolean(REGISTER_PARTITION_WITH_PULL_MODE, false);
if(this.shouldUpdateLatestSchema) {
this.schemaRegistry = Optional.of(KafkaSchemaRegistry.get(state.getProperties()));
topicName = state.getProp(KafkaSource.TOPIC_NAME);
}
GenericObjectPoolConfig config = new GenericObjectPoolConfig();
config.setMaxTotal(this.props.getNumThreads());
config.setMaxIdle(this.props.getNumThreads());
this.clientPool = HiveMetastoreClientPool.get(this.props.getProperties(), metastoreURI);
this.metricContext =
GobblinMetricsRegistry.getInstance().getMetricContext(state, HiveMetaStoreBasedRegister.class, GobblinMetrics.getCustomTagsFromState(state));
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, "org.apache.gobblin.hive.HiveMetaStoreBasedRegister").build();
}
@Override
protected void registerPath(HiveSpec spec) throws IOException {
try (Timer.Context context = this.metricContext.timer(PATH_REGISTER_TIMER).time();
AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
Table table = HiveMetaStoreUtils.getTable(spec.getTable());
// Abort the rest of operations if a view is seen.
if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
String msg = "Cannot register paths against a view on Hive for:" + spec.getPath()
+ " on table:" + spec.getTable().toString();
log.info(msg);
HiveMetaStoreEventHelper.submitFailedPathRegistration(eventSubmitter, spec,
new UnsupportedOperationException(msg));
}
createDbIfNotExists(client.get(), table.getDbName());
createOrAlterTable(client.get(), table, spec);
Optional<HivePartition> partition = spec.getPartition();
if (partition.isPresent()) {
addOrAlterPartition(client.get(), table, partition.get());
}
HiveMetaStoreEventHelper.submitSuccessfulPathRegistration(eventSubmitter, spec);
} catch (TException e) {
HiveMetaStoreEventHelper.submitFailedPathRegistration(eventSubmitter, spec, e);
throw new IOException(e);
}
}
/**
* This method is used to update the table schema to the latest schema
* It will fetch creation time of the latest schema from schema registry and compare that
* with the creation time of writer's schema. If they are the same, then we will update the
* table schema to the writer's schema, else we will keep the table schema the same as schema of
* existing table.
* Note: If there is no schema specified in the table spec, we will directly update the schema to
* the existing table schema
* Note: We cannot treat the creation time as version number of schema, since schema registry allows
* "out of order registration" of schemas, this means chronological latest is NOT what the registry considers latest.
* @param spec
* @param table
* @param existingTable
* @throws IOException
*/
@VisibleForTesting
protected void updateSchema(HiveSpec spec, Table table, HiveTable existingTable) throws IOException{
if (this.schemaRegistry.isPresent() && existingTable.getSerDeProps().getProp(
AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()) != null) {
try (Timer.Context context = this.metricContext.timer(GET_AND_SET_LATEST_SCHEMA).time()) {
Schema existingTableSchema = new Schema.Parser().parse(existingTable.getSerDeProps().getProp(
AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()));
String existingSchemaCreationTime = AvroUtils.getSchemaCreationTime(existingTableSchema);
// If no schema set for the table spec, we fall back to existing schema
if (spec.getTable().getSerDeProps().getProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()) == null) {
spec.getTable()
.getSerDeProps()
.setProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), existingTableSchema);
HiveMetaStoreUtils.updateColumnsInfoIfNeeded(spec);
table.setSd(HiveMetaStoreUtils.getStorageDescriptor(spec.getTable()));
return;
}
Schema writerSchema = new Schema.Parser().parse((
spec.getTable().getSerDeProps().getProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName())));
String writerSchemaCreationTime = AvroUtils.getSchemaCreationTime(writerSchema);
if(existingSchemaCreationTime != null && !existingSchemaCreationTime.equals(writerSchemaCreationTime)) {
// If creation time of writer schema does not equal to the existing schema, we compare with schema fetched from
// schema registry to determine whether to update the schema
Schema latestSchema = (Schema) this.schemaRegistry.get().getLatestSchemaByTopic(topicName);
String latestSchemaCreationTime = AvroUtils.getSchemaCreationTime(latestSchema);
if (latestSchemaCreationTime != null && latestSchemaCreationTime.equals(existingSchemaCreationTime)) {
// If latest schema creation time equals to existing schema creation time, we keep the schema as existing table schema
spec.getTable()
.getSerDeProps()
.setProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), existingTableSchema);
HiveMetaStoreUtils.updateColumnsInfoIfNeeded(spec);
table.setSd(HiveMetaStoreUtils.getStorageDescriptor(spec.getTable()));
}
}
} catch ( IOException e) {
log.error(String.format("Error when updating latest schema for topic %s", topicName));
throw new IOException(e);
}
}
}
/**
* If table existed on Hive side will return false;
* Or will create the table thru. RPC and return retVal from remote MetaStore.
*/
private boolean ensureHiveTableExistenceBeforeAlternation(String tableName, String dbName, IMetaStoreClient client,
Table table) throws TException, IOException{
try (AutoCloseableHiveLock lock = this.locks.getTableLock(dbName, tableName)) {
try {
if (!existsTable(dbName, tableName, client)) {
try (Timer.Context context = this.metricContext.timer(CREATE_HIVE_TABLE).time()) {
client.createTable(getTableWithCreateTimeNow(table));
log.info(String.format("Created Hive table %s in db %s", tableName, dbName));
return true;
}
}
} catch (AlreadyExistsException ignore) {
// Table already exists, continue
} catch (TException e) {
log.error(
String.format("Unable to create Hive table %s in db %s: " + e.getMessage(), tableName, dbName), e);
throw e;
}
log.info("Table {} already exists in db {}.", tableName, dbName);
// When the logic up to here it means table already existed in db. Return false.
return false;
}
}
private void alterTableIfNeeded (String tableName, String dbName, IMetaStoreClient client,
Table table, HiveSpec spec) throws TException, IOException {
try {
HiveTable existingTable;
try (Timer.Context context = this.metricContext.timer(GET_HIVE_TABLE).time()) {
existingTable = HiveMetaStoreUtils.getHiveTable(client.getTable(dbName, tableName));
}
HiveTable schemaSourceTable = existingTable;
if (state.contains(SCHEMA_SOURCE_DB)) {
try (Timer.Context context = this.metricContext.timer(GET_SCHEMA_SOURCE_HIVE_TABLE).time()) {
// We assume the schema source table has the same table name as the origin table, so only the db name can be configured
schemaSourceTable = HiveMetaStoreUtils.getHiveTable(client.getTable(state.getProp(SCHEMA_SOURCE_DB, dbName),
tableName));
}
}
if(shouldUpdateLatestSchema) {
updateSchema(spec, table, schemaSourceTable);
}
if (needToUpdateTable(existingTable, HiveMetaStoreUtils.getHiveTable(table))) {
try (Timer.Context context = this.metricContext.timer(ALTER_TABLE).time()) {
client.alter_table(dbName, tableName, getNewTblByMergingExistingTblProps(table, existingTable));
}
log.info(String.format("updated Hive table %s in db %s", tableName, dbName));
}
} catch (TException e2) {
log.error(
String.format("Unable to alter Hive table %s in db %s: " + e2.getMessage(), tableName, dbName),
e2);
throw e2;
}
}
/**
* If databse existed on Hive side will return false;
* Or will create the table thru. RPC and return retVal from remote MetaStore.
* @param hiveDbName is the hive databases to be checked for existence
*/
private boolean ensureHiveDbExistence(String hiveDbName, IMetaStoreClient client) throws IOException{
try (AutoCloseableHiveLock lock = this.locks.getDbLock(hiveDbName)) {
Database db = new Database();
db.setName(hiveDbName);
try {
try (Timer.Context context = this.metricContext.timer(GET_HIVE_DATABASE).time()) {
client.getDatabase(db.getName());
}
return false;
} catch (NoSuchObjectException nsoe) {
// proceed with create
} catch (TException te) {
throw new IOException(te);
}
Preconditions.checkState(this.hiveDbRootDir.isPresent(),
"Missing required property " + HiveRegProps.HIVE_DB_ROOT_DIR);
db.setLocationUri(new Path(this.hiveDbRootDir.get(), hiveDbName + HIVE_DB_EXTENSION).toString());
try {
try (Timer.Context context = this.metricContext.timer(CREATE_HIVE_DATABASE).time()) {
client.createDatabase(db);
}
log.info("Created database " + hiveDbName);
HiveMetaStoreEventHelper.submitSuccessfulDBCreation(this.eventSubmitter, hiveDbName);
return true;
} catch (AlreadyExistsException e) {
return false;
} catch (TException e) {
HiveMetaStoreEventHelper.submitFailedDBCreation(this.eventSubmitter, hiveDbName, e);
throw new IOException("Unable to create Hive database " + hiveDbName, e);
}
}
}
/**
* @return true if the db is successfully created.
* false if the db already exists.
* @throws IOException
*/
private boolean createDbIfNotExists(IMetaStoreClient client, String dbName) throws IOException {
boolean retVal;
if (this.optimizedChecks) {
try {
retVal = this.tableAndDbExistenceCache.get(dbName, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return ensureHiveDbExistence(dbName, client);
}
});
} catch (ExecutionException ee) {
throw new IOException("Database existence checking throwing execution exception.", ee);
}
return retVal;
} else {
return this.ensureHiveDbExistence(dbName, client);
}
}
/**
* @deprecated Use {@link #createDbIfNotExists(IMetaStoreClient, String)} directly.
*/
@Deprecated
public boolean createDbIfNotExists(String dbName) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
return createDbIfNotExists(client.get(), dbName);
}
}
/**
* @deprecated Please use {@link #createOrAlterTable(IMetaStoreClient, Table, HiveSpec)} instead.
*/
@Deprecated
@Override
public boolean createTableIfNotExists(HiveTable table) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
return createTableIfNotExists(client.get(), HiveMetaStoreUtils.getTable(table), table);
}
}
/**
* @deprecated Use {@link #addOrAlterPartition} instead.
*/
@Deprecated
@Override
public boolean addPartitionIfNotExists(HiveTable table, HivePartition partition) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient();
AutoCloseableHiveLock lock = this.locks.getTableLock(table.getDbName(), table.getTableName())) {
try {
try (Timer.Context context = this.metricContext.timer(GET_HIVE_PARTITION).time()) {
client.get().getPartition(table.getDbName(), table.getTableName(), partition.getValues());
}
return false;
} catch (NoSuchObjectException e) {
try (Timer.Context context = this.metricContext.timer(ADD_PARTITION_TIMER).time()) {
client.get().add_partition(getPartitionWithCreateTimeNow(HiveMetaStoreUtils.getPartition(partition)));
}
HiveMetaStoreEventHelper.submitSuccessfulPartitionAdd(this.eventSubmitter, table, partition);
return true;
}
} catch (TException e) {
HiveMetaStoreEventHelper.submitFailedPartitionAdd(this.eventSubmitter, table, partition, e);
throw new IOException(String.format("Unable to add partition %s in table %s in db %s", partition.getValues(),
table.getTableName(), table.getDbName()), e);
}
}
@Deprecated
/**
* @deprecated Please use {@link #createOrAlterTable(IMetaStoreClient, Table, HiveSpec)} instead.
*/
private boolean createTableIfNotExists(IMetaStoreClient client, Table table, HiveTable hiveTable) throws IOException {
String dbName = table.getDbName();
String tableName = table.getTableName();
try (AutoCloseableHiveLock lock = this.locks.getTableLock(dbName, tableName)) {
boolean tableExists;
try (Timer.Context context = this.metricContext.timer(TABLE_EXISTS).time()) {
tableExists = client.tableExists(table.getDbName(), table.getTableName());
}
if (tableExists) {
return false;
}
try (Timer.Context context = this.metricContext.timer(CREATE_HIVE_TABLE).time()) {
client.createTable(getTableWithCreateTimeNow(table));
}
log.info(String.format("Created Hive table %s in db %s", tableName, dbName));
HiveMetaStoreEventHelper.submitSuccessfulTableCreation(this.eventSubmitter, hiveTable);
return true;
} catch (TException e) {
HiveMetaStoreEventHelper.submitFailedTableCreation(eventSubmitter, hiveTable, e);
throw new IOException(String.format("Error in creating or altering Hive table %s in db %s", table.getTableName(),
table.getDbName()), e);
}
}
private void createOrAlterTable(IMetaStoreClient client, Table table, HiveSpec spec) throws TException, IOException {
String dbName = table.getDbName();
String tableName = table.getTableName();
this.ensureHiveTableExistenceBeforeAlternation(tableName, dbName, client, table);
alterTableIfNeeded(tableName, dbName, client, table, spec);
}
public boolean existsTable(String dbName, String tableName, IMetaStoreClient client) throws IOException {
Boolean tableExits = this.tableAndDbExistenceCache.getIfPresent(dbName + ":" + tableName );
if (this.optimizedChecks && tableExits != null && tableExits) {
return true;
}
try {
boolean exists;
try (Timer.Context context = this.metricContext.timer(TABLE_EXISTS).time()) {
exists = client.tableExists(dbName, tableName);
}
this.tableAndDbExistenceCache.put(dbName + ":" + tableName, exists);
return exists;
} catch (TException e) {
throw new IOException(String.format("Unable to check existence of table %s in db %s", tableName, dbName), e);
}
}
@Override
public boolean existsTable(String dbName, String tableName) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
return existsTable(dbName, tableName, client.get());
}
}
@Override
public boolean existsPartition(String dbName, String tableName, List<Column> partitionKeys,
List<String> partitionValues) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
try (Timer.Context context = this.metricContext.timer(GET_HIVE_PARTITION).time()) {
client.get().getPartition(dbName, tableName, partitionValues);
}
return true;
} catch (NoSuchObjectException e) {
return false;
} catch (TException e) {
throw new IOException(String.format("Unable to check existence of partition %s in table %s in db %s",
partitionValues, tableName, dbName), e);
}
}
@Override
public void dropTableIfExists(String dbName, String tableName) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
boolean tableExists;
try (Timer.Context context = this.metricContext.timer(TABLE_EXISTS).time()) {
tableExists = client.get().tableExists(dbName, tableName);
}
if (tableExists) {
try (Timer.Context context = this.metricContext.timer(DROP_TABLE).time()) {
client.get().dropTable(dbName, tableName, false, false);
}
String metastoreURI = this.clientPool.getHiveConf().get(HiveMetaStoreClientFactory.HIVE_METASTORE_TOKEN_SIGNATURE, "null");
HiveMetaStoreEventHelper.submitSuccessfulTableDrop(eventSubmitter, dbName, tableName, metastoreURI);
log.info("Dropped table " + tableName + " in db " + dbName);
}
} catch (TException e) {
HiveMetaStoreEventHelper.submitFailedTableDrop(eventSubmitter, dbName, tableName, e);
throw new IOException(String.format("Unable to deregister table %s in db %s", tableName, dbName), e);
}
}
@Override
public void dropPartitionIfExists(String dbName, String tableName, List<Column> partitionKeys,
List<String> partitionValues) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
if (client.get().getPartition(dbName, tableName, partitionValues) == null) {
// Partition does not exist. Nothing to do
return;
}
try (Timer.Context context = this.metricContext.timer(DROP_TABLE).time()) {
client.get().dropPartition(dbName, tableName, partitionValues, false);
}
String metastoreURI = this.clientPool.getHiveConf().get(HiveMetaStoreClientFactory.HIVE_METASTORE_TOKEN_SIGNATURE, "null");
HiveMetaStoreEventHelper.submitSuccessfulPartitionDrop(eventSubmitter, dbName, tableName, partitionValues, metastoreURI);
log.info("Dropped partition " + partitionValues + " in table " + tableName + " in db " + dbName);
} catch (NoSuchObjectException e) {
// Partition does not exist. Nothing to do
} catch (TException e) {
HiveMetaStoreEventHelper.submitFailedPartitionDrop(eventSubmitter, dbName, tableName, partitionValues, e);
throw new IOException(String.format("Unable to check existence of Hive partition %s in table %s in db %s",
partitionValues, tableName, dbName), e);
}
}
@Override
public void addOrAlterPartition(HiveTable table, HivePartition partition) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
addOrAlterPartition(client.get(), HiveMetaStoreUtils.getTable(table), partition);
} catch (TException te) {
throw new IOException(
String.format("Failed to add/alter partition %s.%s@%s", table.getDbName(), table.getTableName(), partition.getValues()),
te);
}
}
private void addOrAlterPartitionWithPushMode(IMetaStoreClient client, Table table, HivePartition partition)
throws TException, IOException {
Partition nativePartition = HiveMetaStoreUtils.getPartition(partition);
Preconditions.checkArgument(table.getPartitionKeysSize() == nativePartition.getValues().size(),
String.format("Partition key size is %s but partition value size is %s", table.getPartitionKeys().size(),
nativePartition.getValues().size()));
try (AutoCloseableHiveLock lock =
this.locks.getPartitionLock(table.getDbName(), table.getTableName(), nativePartition.getValues())) {
try {
try (Timer.Context context = this.metricContext.timer(ADD_PARTITION_TIMER).time()) {
client.add_partition(getPartitionWithCreateTimeNow(nativePartition));
}
log.info(String.format("Added partition %s to table %s with location %s", stringifyPartition(nativePartition),
table.getTableName(), nativePartition.getSd().getLocation()));
} catch (AlreadyExistsException e) {
try {
if (this.skipDiffComputation) {
onPartitionExistWithoutComputingDiff(table, nativePartition, e);
} else {
onPartitionExist(client, table, partition, nativePartition, null);
}
} catch (Throwable e2) {
log.error(String.format(
"Unable to add or alter partition %s in table %s with location %s: " + e2.getMessage(),
stringifyPartitionVerbose(nativePartition), table.getTableName(), nativePartition.getSd().getLocation()), e2);
throw e2;
}
}
}
}
private void addOrAlterPartition(IMetaStoreClient client, Table table, HivePartition partition)
throws TException, IOException {
if(!registerPartitionWithPullMode) {
addOrAlterPartitionWithPushMode(client, table, partition);
} else {
addOrAlterPartitionWithPullMode(client, table, partition);
}
}
private void addOrAlterPartitionWithPullMode(IMetaStoreClient client, Table table, HivePartition partition)
throws TException, IOException {
Partition nativePartition = HiveMetaStoreUtils.getPartition(partition);
Preconditions.checkArgument(table.getPartitionKeysSize() == nativePartition.getValues().size(),
String.format("Partition key size is %s but partition value size is %s", table.getPartitionKeys().size(),
nativePartition.getValues().size()));
try (AutoCloseableHiveLock lock =
this.locks.getPartitionLock(table.getDbName(), table.getTableName(), nativePartition.getValues())) {
Partition existedPartition;
try {
try (Timer.Context context = this.metricContext.timer(GET_HIVE_PARTITION).time()) {
existedPartition = client.getPartition(table.getDbName(), table.getTableName(), nativePartition.getValues());
if (this.skipDiffComputation) {
onPartitionExistWithoutComputingDiff(table, nativePartition, null);
} else {
onPartitionExist(client, table, partition, nativePartition, existedPartition);
}
}
} catch (NoSuchObjectException e) {
try (Timer.Context context = this.metricContext.timer(ADD_PARTITION_TIMER).time()) {
client.add_partition(getPartitionWithCreateTimeNow(nativePartition));
}
catch (Throwable e2) {
log.error(String.format(
"Unable to add or alter partition %s in table %s with location %s: " + e2.getMessage(),
stringifyPartitionVerbose(nativePartition), table.getTableName(), nativePartition.getSd().getLocation()), e2);
throw e2;
}
log.info(String.format("Added partition %s to table %s with location %s", stringifyPartition(nativePartition),
table.getTableName(), nativePartition.getSd().getLocation()));
}
}
}
private void onPartitionExist(IMetaStoreClient client, Table table, HivePartition partition, Partition nativePartition, Partition existedPartition) throws TException {
HivePartition existingPartition;
if(existedPartition == null) {
try (Timer.Context context = this.metricContext.timer(GET_HIVE_PARTITION).time()) {
existingPartition = HiveMetaStoreUtils.getHivePartition(
client.getPartition(table.getDbName(), table.getTableName(), nativePartition.getValues()));
}
} else {
existingPartition = HiveMetaStoreUtils.getHivePartition(existedPartition);
}
if (needToUpdatePartition(existingPartition, partition)) {
log.info(String.format("Partition update required. ExistingPartition %s, newPartition %s",
stringifyPartition(existingPartition), stringifyPartition(partition)));
Partition newPartition = getPartitionWithCreateTime(nativePartition, existingPartition);
log.info(String.format("Altering partition %s", newPartition));
try (Timer.Context context = this.metricContext.timer(ALTER_PARTITION).time()) {
client.alter_partition(table.getDbName(), table.getTableName(), newPartition);
}
log.info(String.format("Updated partition %s in table %s with location %s", stringifyPartition(newPartition),
table.getTableName(), nativePartition.getSd().getLocation()));
} else {
log.debug(String.format("Partition %s in table %s with location %s already exists and no need to update",
stringifyPartition(nativePartition), table.getTableName(), nativePartition.getSd().getLocation()));
}
}
private void onPartitionExistWithoutComputingDiff(Table table, Partition nativePartition, TException e) throws TException {
if(e == null) {
return;
}
if (e instanceof AlreadyExistsException) {
log.debug(String.format("Partition %s in table %s with location %s already exists and no need to update",
stringifyPartition(nativePartition), table.getTableName(), nativePartition.getSd().getLocation()));
}
else {
throw e;
}
}
private static String stringifyPartition(Partition partition) {
if (log.isDebugEnabled()) {
return stringifyPartitionVerbose(partition);
}
return Arrays.toString(partition.getValues().toArray());
}
private static String stringifyPartition(HivePartition partition) {
return partition.toString();
}
private static String stringifyPartitionVerbose(Partition partition) {
return partition.toString();
}
@Override
public Optional<HiveTable> getTable(String dbName, String tableName) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
Table hiveTable;
try (Timer.Context context = this.metricContext.timer(GET_HIVE_TABLE).time()) {
hiveTable = client.get().getTable(dbName, tableName);
}
return Optional.of(HiveMetaStoreUtils.getHiveTable(hiveTable));
} catch (NoSuchObjectException e) {
return Optional.<HiveTable> absent();
} catch (TException e) {
throw new IOException("Unable to get table " + tableName + " in db " + dbName, e);
}
}
@Override
public Optional<HivePartition> getPartition(String dbName, String tableName, List<Column> partitionKeys,
List<String> partitionValues) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
Partition hivePartition;
try (Timer.Context context = this.metricContext.timer(GET_HIVE_PARTITION).time()) {
hivePartition = client.get().getPartition(dbName, tableName, partitionValues);
}
return Optional.of(HiveMetaStoreUtils.getHivePartition(hivePartition));
} catch (NoSuchObjectException e) {
return Optional.<HivePartition> absent();
} catch (TException e) {
throw new IOException(
"Unable to get partition " + partitionValues + " from table " + tableName + " in db " + dbName, e);
}
}
@Override
public void alterTable(HiveTable table) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
Table existingTable;
//During alter table we need to persist the existing property of iceberg in HMS
try (Timer.Context context = this.metricContext.timer(GET_HIVE_TABLE).time()) {
existingTable = client.get().getTable(table.getDbName(), table.getTableName());
} catch (Exception e){
throw new IOException("Cannot get table " + table.getTableName() + " in db " + table.getDbName(), e);
}
try (Timer.Context context = this.metricContext.timer(ALTER_TABLE).time()) {
table.getProps().addAllIfNotExist(HiveMetaStoreUtils.getTableProps(existingTable));
client.get().alter_table(table.getDbName(), table.getTableName(),
getTableWithCreateTimeNow(HiveMetaStoreUtils.getTable(table)));
}
HiveMetaStoreEventHelper.submitSuccessfulTableAlter(eventSubmitter, table);
} catch (TException e) {
HiveMetaStoreEventHelper.submitFailedTableAlter(eventSubmitter, table, e);
throw new IOException("Unable to alter table " + table.getTableName() + " in db " + table.getDbName(), e);
}
}
@Override
public void alterPartition(HiveTable table, HivePartition partition) throws IOException {
try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
try (Timer.Context context = this.metricContext.timer(ALTER_PARTITION).time()) {
client.get().alter_partition(table.getDbName(), table.getTableName(),
getPartitionWithCreateTimeNow(HiveMetaStoreUtils.getPartition(partition)));
}
HiveMetaStoreEventHelper.submitSuccessfulPartitionAlter(eventSubmitter, table, partition);
} catch (TException e) {
HiveMetaStoreEventHelper.submitFailedPartitionAlter(eventSubmitter, table, partition, e);
throw new IOException(String.format("Unable to alter partition %s in table %s in db %s", partition.getValues(),
table.getTableName(), table.getDbName()), e);
}
}
private Partition getPartitionWithCreateTimeNow(Partition partition) {
return getPartitionWithCreateTime(partition, Ints.checkedCast(DateTime.now().getMillis() / 1000));
}
private Partition getPartitionWithCreateTime(Partition partition, HivePartition referencePartition) {
return getPartitionWithCreateTime(partition,
Ints.checkedCast(referencePartition.getCreateTime().or(DateTime.now().getMillis() / 1000)));
}
/**
* Sets create time if not already set.
*/
private Partition getPartitionWithCreateTime(Partition partition, int createTime) {
if (partition.isSetCreateTime() && partition.getCreateTime() > 0) {
return partition;
}
Partition actualPartition = partition.deepCopy();
actualPartition.setCreateTime(createTime);
return actualPartition;
}
private Table getTableWithCreateTimeNow(Table table) {
return gettableWithCreateTime(table, Ints.checkedCast(DateTime.now().getMillis() / 1000));
}
private Table getTableWithCreateTime(Table table, HiveTable referenceTable) {
return gettableWithCreateTime(table,
Ints.checkedCast(referenceTable.getCreateTime().or(DateTime.now().getMillis() / 1000)));
}
/**
* Sets create time if not already set.
*/
private Table gettableWithCreateTime(Table table, int createTime) {
if (table.isSetCreateTime() && table.getCreateTime() > 0) {
return table;
}
Table actualtable = table.deepCopy();
actualtable.setCreateTime(createTime);
return actualtable;
}
/**
* Used to merge properties from existingTable to newTable.
* e.g. New table will inherit creation time from existing table.
*
* This method is extensible for customized logic in merging table properties.
* @param newTable
* @param existingTable
*/
protected Table getNewTblByMergingExistingTblProps(Table newTable, HiveTable existingTable) {
Table table = getTableWithCreateTime(newTable, existingTable);
// Get existing parameters
Map<String, String> allParameters = HiveMetaStoreUtils.getParameters(existingTable.getProps());
// Apply new parameters
allParameters.putAll(table.getParameters());
table.setParameters(allParameters);
return table;
}
}
| 4,633 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/metastore/HiveMetaStoreEventHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.metastore;
import java.util.List;
import java.util.Map;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.metrics.event.EventSubmitter;
/**
* Helper class to populate hive registration event in state.
*/
public class HiveMetaStoreEventHelper {
public static final String SUCCESS_POSTFIX = "Succeed";
public static final String FAILED_POSTFIX = "Failed";
public static final String DB_NAME = "DBName";
public static final String TABLE_NAME = "TableName";
public static final String PARTITIONS = "Partitions";
public static final String ERROR_MESSAGE = "ErrorMessage";
public static final String PATH_REGISTRATION = "PathRegistration";
public static final String DB_CREATION = "DBCreation";
public static final String TABLE_CREATION = "TableCreation";
public static final String TABLE_DROP = "TableDrop";
public static final String TABLE_ALTER = "TableAlter";
public static final String PARTITION_CREATION = "PartitionCreation";
public static final String PARTITION_DROP = "PartitionDrop";
public static final String PARTITION_ALTER = "PartitionAlter";
// Path Registration
protected static void submitSuccessfulPathRegistration(EventSubmitter eventSubmitter, HiveSpec spec) {
eventSubmitter.submit(PATH_REGISTRATION + SUCCESS_POSTFIX,
getAdditionalMetadata(spec, Optional.<Exception> absent()));
}
protected static void submitFailedPathRegistration(EventSubmitter eventSubmitter, HiveSpec spec, Exception error) {
eventSubmitter.submit(PATH_REGISTRATION + FAILED_POSTFIX,
getAdditionalMetadata(spec, Optional.<Exception> of(error)));
}
private static Map<String, String> getAdditionalMetadata(HiveSpec spec,
Optional<Exception> error) {
ImmutableMap.Builder<String, String> builder =
ImmutableMap.<String, String> builder().put(DB_NAME, spec.getTable().getDbName())
.put(TABLE_NAME, spec.getTable().getTableName()).put("Path", spec.getPath().toString());
if(spec.getPartition().isPresent()){
builder.put(PARTITIONS, spec.getPartition().get().toString());
}
if (error.isPresent()) {
builder.put(ERROR_MESSAGE, error.get().getMessage());
}
return builder.build();
}
private static Map<String, String> getAdditionalMetadata(HiveTable table,
Optional<HivePartition> partition, Optional<Exception> error) {
ImmutableMap.Builder<String, String> builder =
ImmutableMap.<String, String> builder().put(DB_NAME, table.getDbName()).put(TABLE_NAME, table.getTableName());
if (table.getLocation().isPresent()) {
builder.put("Location", table.getLocation().get());
}
if (partition.isPresent()) {
builder.put("Partition", partition.get().toString());
}
if (error.isPresent()) {
builder.put(ERROR_MESSAGE, error.get().getMessage());
}
return builder.build();
}
// DB Creation
protected static void submitSuccessfulDBCreation(EventSubmitter eventSubmitter, String dbName) {
eventSubmitter.submit(DB_CREATION+SUCCESS_POSTFIX, ImmutableMap.of(DB_NAME, dbName));
}
protected static void submitFailedDBCreation(EventSubmitter eventSubmitter, String dbName, Exception error) {
eventSubmitter.submit(DB_CREATION+FAILED_POSTFIX,
ImmutableMap.<String, String> builder().put(DB_NAME, dbName).put(ERROR_MESSAGE, error.getMessage()).build());
}
// Table Creation
protected static void submitSuccessfulTableCreation(EventSubmitter eventSubmitter, HiveTable table) {
eventSubmitter.submit(TABLE_CREATION+SUCCESS_POSTFIX, getAdditionalMetadata(table,
Optional.<HivePartition> absent(), Optional.<Exception> absent()));
}
protected static void submitFailedTableCreation(EventSubmitter eventSubmitter, HiveTable table, Exception error) {
eventSubmitter.submit(TABLE_CREATION+FAILED_POSTFIX, getAdditionalMetadata(table,
Optional.<HivePartition> absent(), Optional.<Exception> of(error)));
}
// Add partition
protected static void submitSuccessfulPartitionAdd(EventSubmitter eventSubmitter, HiveTable table,
HivePartition partition) {
eventSubmitter.submit(PARTITION_CREATION+SUCCESS_POSTFIX, getAdditionalMetadata(table,
Optional.<HivePartition> of(partition), Optional.<Exception> absent()));
}
protected static void submitFailedPartitionAdd(EventSubmitter eventSubmitter, HiveTable table,
HivePartition partition, Exception error) {
eventSubmitter.submit(PARTITION_CREATION+FAILED_POSTFIX, getAdditionalMetadata(table,
Optional.<HivePartition> of(partition), Optional.<Exception> of(error)));
}
// Drop Table
protected static void submitSuccessfulTableDrop(EventSubmitter eventSubmitter, String dbName,
String tableName, String metastoreURI) {
eventSubmitter.submit(TABLE_DROP+SUCCESS_POSTFIX,
ImmutableMap.<String, String> builder().put(DB_NAME, dbName).put(TABLE_NAME, tableName).
put("metastoreURI", metastoreURI).build());
}
protected static void submitFailedTableDrop(EventSubmitter eventSubmitter, String dbName, String tableName,
Exception e) {
eventSubmitter.submit(TABLE_DROP+FAILED_POSTFIX, ImmutableMap.<String, String> builder().put(DB_NAME, dbName)
.put(TABLE_NAME, tableName).put(ERROR_MESSAGE, e.getMessage()).build());
}
// Drop partition
protected static void submitSuccessfulPartitionDrop(EventSubmitter eventSubmitter, String dbName, String tableName,
List<String> partitionValues, String metastoreURI) {
eventSubmitter.submit(PARTITION_DROP+SUCCESS_POSTFIX, ImmutableMap.<String, String> builder().put(DB_NAME, dbName)
.put(TABLE_NAME, tableName).put("PartitionValues", partitionValues.toString())
.put("metastoreURI", metastoreURI).build());
}
protected static void submitFailedPartitionDrop(EventSubmitter eventSubmitter, String dbName, String tableName,
List<String> partitionValues, Exception error) {
eventSubmitter.submit(PARTITION_DROP+FAILED_POSTFIX,
ImmutableMap.<String, String> builder().put(DB_NAME, dbName).put(TABLE_NAME, tableName)
.put("PartitionValues", partitionValues.toString()).put(ERROR_MESSAGE, error.getMessage()).build());
}
// Alter Table
protected static void submitSuccessfulTableAlter(EventSubmitter eventSubmitter, HiveTable table) {
eventSubmitter.submit(TABLE_ALTER+SUCCESS_POSTFIX, getAdditionalMetadata(table,
Optional.<HivePartition> absent(), Optional.<Exception> absent()));
}
protected static void submitFailedTableAlter(EventSubmitter eventSubmitter, HiveTable table, Exception error) {
eventSubmitter.submit(TABLE_ALTER+FAILED_POSTFIX, getAdditionalMetadata(table,
Optional.<HivePartition> absent(), Optional.<Exception> of(error)));
}
// Alter partition
protected static void submitSuccessfulPartitionAlter(EventSubmitter eventSubmitter, HiveTable table,
HivePartition partition) {
eventSubmitter.submit(PARTITION_ALTER+SUCCESS_POSTFIX, getAdditionalMetadata(table,
Optional.<HivePartition> of(partition), Optional.<Exception> absent()));
}
protected static void submitFailedPartitionAlter(EventSubmitter eventSubmitter, HiveTable table,
HivePartition partition, Exception error) {
eventSubmitter.submit(PARTITION_ALTER+FAILED_POSTFIX, getAdditionalMetadata(table,
Optional.<HivePartition> of(partition), Optional.<Exception> of(error)));
}
}
| 4,634 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/policy/HiveRegistrationPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.policy;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.hive.spec.HiveSpec;
/**
* An interface for generating a {@link HiveSpec} for a {@link Path}.
*
* @author Ziyang Liu
*/
@Alpha
public interface HiveRegistrationPolicy {
public static final String MAPREDUCE_JOB_INPUT_PATH_EMPTY_KEY = "mapreduce.job.input.path.empty";
/**
* Get a collection of {@link HiveSpec}s for a {@link Path}, which can be used by {@link org.apache.gobblin.hive.HiveRegister}
* to register the given {@link Path}.
*/
public Collection<HiveSpec> getHiveSpecs(Path path) throws IOException;
}
| 4,635 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/policy/HiveSnapshotRegistrationPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.policy;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.base.Optional;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.spec.SimpleHiveSpec;
/**
* A {@link org.apache.gobblin.hive.policy.HiveRegistrationPolicy} for registering snapshots.
*
* @author Ziyang Liu
*/
@Alpha
public class HiveSnapshotRegistrationPolicy extends HiveRegistrationPolicyBase {
public static final String SNAPSHOT_PATH_PATTERN = "snapshot.path.pattern";
protected final Optional<Pattern> snapshotPathPattern;
protected HiveSnapshotRegistrationPolicy(State props) throws IOException {
super(props);
this.snapshotPathPattern = props.contains(SNAPSHOT_PATH_PATTERN)
? Optional.of(Pattern.compile(props.getProp(SNAPSHOT_PATH_PATTERN))) : Optional.<Pattern> absent();
}
/**
* @param path The root directory of snapshots. This directory may contain zero or more snapshots.
*/
@Override
public Collection<HiveSpec> getHiveSpecs(Path path) throws IOException {
List<HiveTable> tables = getTables(path);
if (tables.isEmpty()) {
return ImmutableList.<HiveSpec> of();
}
Collection<HiveSpec> specs = Lists.newArrayList();
for (HiveTable table : tables) {
specs.add(new SimpleHiveSpec.Builder<>(path).withTable(table).withPartition(getPartition(path, table)).build());
}
return specs;
}
/**
* Get {@link HiveTable}s using the latest snapshot (returned by {@link #getLatestSnapshot(Path)}.
*/
@Override
protected List<HiveTable> getTables(Path path) throws IOException {
Path latestSnapshot = getLatestSnapshot(path);
if (latestSnapshot == null) {
return ImmutableList.<HiveTable> of();
}
return super.getTables(latestSnapshot);
}
/**
* Get the latest snapshot in the given {@link Path}.
*
* <p>
* The lastest snapshot is a sub-directory of the input {@link Path} that has the largest folder
* name alphabetically. If property {@link #SNAPSHOT_PATH_PATTERN} is set, only those sub-directories
* whose full path matches the given pattern are considered.
* </p>
*/
protected Path getLatestSnapshot(Path path) throws IOException {
FileStatus statuses[] = this.fs.listStatus(path, new PathFilter() {
@Override
public boolean accept(Path p) {
try {
if (!HiveSnapshotRegistrationPolicy.this.fs.isDirectory(p)) {
return false;
}
} catch (IOException e) {
throw Throwables.propagate(e);
}
return !HiveSnapshotRegistrationPolicy.this.snapshotPathPattern.isPresent()
|| HiveSnapshotRegistrationPolicy.this.snapshotPathPattern.get().matcher(p.toString()).matches();
}
});
if (statuses.length == 0) {
return null;
}
Arrays.sort(statuses, new Comparator<FileStatus>() {
@Override
public int compare(FileStatus o1, FileStatus o2) {
return o2.getPath().getName().compareTo(o1.getPath().getName());
}
});
return statuses[0].getPath();
}
}
| 4,636 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive | Create_ds/gobblin/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/policy/HiveRegistrationPolicyBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.hive.policy;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveRegProps;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.HiveSerDeManager;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.spec.SimpleHiveSpec;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.source.extractor.extract.kafka.ConfigStoreUtils;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource;
import org.apache.gobblin.util.ConfigUtils;
/**
* A base implementation of {@link HiveRegistrationPolicy}. It obtains database name from
* property {@link #HIVE_DATABASE_NAME} or {@link #HIVE_DATABASE_REGEX} (group 1), obtains
* table name from property {@link #HIVE_TABLE_NAME} and {@link #HIVE_TABLE_REGEX} (group 1),
* and builds a {@link SimpleHiveSpec}.
*
* @author Ziyang Liu
*/
@Alpha
public class HiveRegistrationPolicyBase implements HiveRegistrationPolicy {
public static final String HIVE_DATABASE_NAME = "hive.database.name";
public static final String ADDITIONAL_HIVE_DATABASE_NAMES = "additional.hive.database.names";
public static final String HIVE_DATABASE_REGEX = "hive.database.regex";
public static final String HIVE_DATABASE_NAME_PREFIX = "hive.database.name.prefix";
public static final String HIVE_DATABASE_NAME_SUFFIX = "hive.database.name.suffix";
public static final String HIVE_TABLE_NAME = "hive.table.name";
public static final String ADDITIONAL_HIVE_TABLE_NAMES = "additional.hive.table.names";
public static final String HIVE_TABLE_REGEX = "hive.table.regex";
public static final String HIVE_TABLE_NAME_PREFIX = "hive.table.name.prefix";
public static final String HIVE_TABLE_NAME_SUFFIX = "hive.table.name.suffix";
public static final String HIVE_SANITIZE_INVALID_NAMES = "hive.sanitize.invalid.names";
public static final String HIVE_FS_URI = "hive.registration.fs.uri";
// {@value PRIMARY_TABLE_TOKEN} if present in {@value ADDITIONAL_HIVE_TABLE_NAMES} or dbPrefix.{@value HIVE_TABLE_NAME}
// .. will be replaced by the table name determined via {@link #getTableName(Path)}
public static final String PRIMARY_TABLE_TOKEN = "$PRIMARY_TABLE";
protected static final ConfigClient configClient =
org.apache.gobblin.config.client.ConfigClient.createConfigClient(VersionStabilityPolicy.WEAK_LOCAL_STABILITY);
protected Optional<Config> configForTopic = Optional.<Config>absent();
/**
* A valid db or table name should start with an alphanumeric character, and contains only
* alphanumeric characters and '_'.
*/
private static final Pattern VALID_DB_TABLE_NAME_PATTERN_1 = Pattern.compile("[a-z0-9][a-z0-9_]*");
/**
* A valid db or table name should contain at least one letter or '_' (i.e., should not be numbers only).
*/
private static final Pattern VALID_DB_TABLE_NAME_PATTERN_2 = Pattern.compile(".*[a-z_].*");
public static final String CONFIG_FOR_TOPIC_TIMER = "configForTopicTimer";
protected final HiveRegProps props;
protected final FileSystem fs;
protected final boolean sanitizeNameAllowed;
protected final Optional<Pattern> dbNamePattern;
protected final Optional<Pattern> tableNamePattern;
protected final String dbNamePrefix;
protected final String dbNameSuffix;
protected final String tableNamePrefix;
protected final String tableNameSuffix;
protected final boolean emptyInputPathFlag;
protected final MetricContext metricContext;
public HiveRegistrationPolicyBase(State props) throws IOException {
Preconditions.checkNotNull(props);
this.props = new HiveRegProps(props);
if (props.contains(HiveRegistrationPolicyBase.HIVE_FS_URI)) {
this.fs = FileSystem.get(URI.create(props.getProp(HiveRegistrationPolicyBase.HIVE_FS_URI)), new Configuration());
} else {
this.fs = FileSystem.get(new Configuration());
}
this.sanitizeNameAllowed = props.getPropAsBoolean(HIVE_SANITIZE_INVALID_NAMES, true);
this.dbNamePattern = props.contains(HIVE_DATABASE_REGEX)
? Optional.of(Pattern.compile(props.getProp(HIVE_DATABASE_REGEX))) : Optional.<Pattern> absent();
this.tableNamePattern = props.contains(HIVE_TABLE_REGEX)
? Optional.of(Pattern.compile(props.getProp(HIVE_TABLE_REGEX))) : Optional.<Pattern> absent();
this.dbNamePrefix = props.getProp(HIVE_DATABASE_NAME_PREFIX, StringUtils.EMPTY);
this.dbNameSuffix = props.getProp(HIVE_DATABASE_NAME_SUFFIX, StringUtils.EMPTY);
this.tableNamePrefix = props.getProp(HIVE_TABLE_NAME_PREFIX, StringUtils.EMPTY);
this.tableNameSuffix = props.getProp(HIVE_TABLE_NAME_SUFFIX, StringUtils.EMPTY);
this.emptyInputPathFlag = props.getPropAsBoolean(MAPREDUCE_JOB_INPUT_PATH_EMPTY_KEY, false);
this.metricContext = Instrumented.getMetricContext(props, HiveRegister.class);
// Get Topic-specific config object doesn't require any runtime-set properties in prop object, safe to initialize
// in constructor.
if (this.props.getProperties().containsKey(KafkaSource.TOPIC_NAME)) {
Timer.Context context = this.metricContext.timer(CONFIG_FOR_TOPIC_TIMER).time();
configForTopic =
ConfigStoreUtils.getConfigForTopic(this.props.getProperties(), KafkaSource.TOPIC_NAME, this.configClient);
context.close();
}
}
/**
* This method first tries to obtain the database name from {@link #HIVE_DATABASE_NAME}.
* If this property is not specified, it then tries to obtain the database name using
* the first group of {@link #HIVE_DATABASE_REGEX}.
*
*/
protected Optional<String> getDatabaseName(Path path) {
if (!this.props.contains(HIVE_DATABASE_NAME) && !this.props.contains(HIVE_DATABASE_REGEX)) {
return Optional.<String> absent();
}
return Optional.<String> of(
this.dbNamePrefix + getDatabaseOrTableName(path, HIVE_DATABASE_NAME, HIVE_DATABASE_REGEX, this.dbNamePattern)
+ this.dbNameSuffix);
}
/**
* Obtain Hive database names. The returned {@link Iterable} contains the database name returned by
* {@link #getDatabaseName(Path)} (if present) plus additional database names specified in
* {@link #ADDITIONAL_HIVE_DATABASE_NAMES}.
* Note that the dataset-specific configuration will overwrite job configuration for the value of
* {@link #ADDITIONAL_HIVE_DATABASE_NAMES}
*
*/
protected Iterable<String> getDatabaseNames(Path path) {
List<String> databaseNames = Lists.newArrayList();
Optional<String> databaseName;
if ((databaseName = getDatabaseName(path)).isPresent()) {
databaseNames.add(databaseName.get());
}
if (configForTopic.isPresent() && configForTopic.get().hasPath(ADDITIONAL_HIVE_DATABASE_NAMES)) {
databaseNames.addAll(ConfigUtils.getStringList(configForTopic.get(), ADDITIONAL_HIVE_DATABASE_NAMES).stream()
.map(x -> this.dbNamePrefix + x + this.dbNameSuffix).collect(Collectors.toList()));
} else if (!Strings.isNullOrEmpty(this.props.getProp(ADDITIONAL_HIVE_DATABASE_NAMES))) {
for (String additionalDbName : this.props.getPropAsList(ADDITIONAL_HIVE_DATABASE_NAMES)) {
databaseNames.add(this.dbNamePrefix + additionalDbName + this.dbNameSuffix);
}
}
Preconditions.checkState(!databaseNames.isEmpty(), "Hive database name not specified");
return databaseNames;
}
/**
* This method first tries to obtain the table name from {@link #HIVE_TABLE_NAME}.
* If this property is not specified, it then tries to obtain the table name using
* the first group of {@link #HIVE_TABLE_REGEX}.
*/
protected Optional<String> getTableName(Path path) {
if (!this.props.contains(HIVE_TABLE_NAME) && !this.props.contains(HIVE_TABLE_REGEX)) {
return Optional.<String> absent();
}
return Optional.<String> of(
this.tableNamePrefix + getDatabaseOrTableName(path, HIVE_TABLE_NAME, HIVE_TABLE_REGEX, this.tableNamePattern)
+ this.tableNameSuffix);
}
/***
* Obtain Hive table names.
*
* The returned {@link Iterable} contains:
* 1. Table name returned by {@link #getTableName(Path)}
* 2. Table names specified by <code>additional.hive.table.names</code>
*
* In table names above, the {@value PRIMARY_TABLE_TOKEN} if present is also replaced by the
* table name obtained via {@link #getTableName(Path)}.
*
* @param path Path for the table on filesystem.
* @return Table names to register.
*/
protected Iterable<String> getTableNames(Path path) {
List<String> tableNames = getTableNames(Optional.<String>absent(), path);
Preconditions.checkState(!tableNames.isEmpty(), "Hive table name not specified");
return tableNames;
}
/***
* Obtain Hive table names filtered by <code>dbPrefix</code> (if present).
*
* The returned {@link List} contains:
* A. If <code>dbPrefix</code> is absent:
* 1. Table name returned by {@link #getTableName(Path)}
* 2. Table names specified by <code>additional.hive.table.names</code>
* B. If dbPrefix is present:
* 1. Table names specified by <code>dbPrefix.hive.table.names</code>
*
* In table names above, the {@value PRIMARY_TABLE_TOKEN} if present is also replaced by the
* table name obtained via {@link #getTableName(Path)}.
*
* @param dbPrefix Prefix to the property <code>additional.table.names</code>, to obtain table names only
* for the specified db. Eg. If <code>dbPrefix</code> is db, then
* <code>db.hive.table.names</code> is the resolved property name.
* @param path Path for the table on filesystem.
* @return Table names to register.
*/
protected List<String> getTableNames(Optional<String> dbPrefix, Path path) {
List<String> tableNames = Lists.newArrayList();
Optional<String> primaryTableName;
if ((primaryTableName = getTableName(path)).isPresent() && !dbPrefix.isPresent()) {
tableNames.add(primaryTableName.get());
}
String additionalNamesProp;
if (dbPrefix.isPresent()) {
additionalNamesProp = String.format("%s.%s", dbPrefix.get(), HIVE_TABLE_NAME);
} else {
additionalNamesProp = ADDITIONAL_HIVE_TABLE_NAMES;
}
// Searching additional table name from ConfigStore-returned object.
if (primaryTableName.isPresent() && configForTopic.isPresent() && configForTopic.get().hasPath(additionalNamesProp)) {
for (String additionalTableName : ConfigUtils.getStringList(configForTopic.get(), additionalNamesProp)) {
String resolvedTableName =
StringUtils.replace(additionalTableName, PRIMARY_TABLE_TOKEN, primaryTableName.get());
tableNames.add(this.tableNamePrefix + resolvedTableName + this.tableNameSuffix);
}
} else if (!Strings.isNullOrEmpty(this.props.getProp(additionalNamesProp))) {
for (String additionalTableName : this.props.getPropAsList(additionalNamesProp)) {
String resolvedTableName =
primaryTableName.isPresent() ? StringUtils.replace(additionalTableName, PRIMARY_TABLE_TOKEN,
primaryTableName.get()) : additionalTableName;
tableNames.add(this.tableNamePrefix + resolvedTableName + this.tableNameSuffix);
}
}
return tableNames;
}
protected String getDatabaseOrTableName(Path path, String nameKey, String regexKey, Optional<Pattern> pattern) {
String name;
if (this.props.contains(nameKey)) {
name = this.props.getProp(nameKey);
} else if (pattern.isPresent()) {
Matcher matcher = pattern.get().matcher(path.toString());
if (matcher.matches() && matcher.groupCount() >= 1) {
name = matcher.group(1);
} else {
throw new IllegalStateException("No group match found for regexKey " + regexKey+" with regexp "+ pattern.get().toString() +" on path "+path);
}
} else {
throw new IllegalStateException("Missing required property " + nameKey + " or " + regexKey);
}
return sanitizeAndValidateName(name);
}
protected String sanitizeAndValidateName(String name) {
name = name.toLowerCase();
if (this.sanitizeNameAllowed && !isNameValid(name)) {
name = sanitizeName(name);
}
if (isNameValid(name)) {
return name;
}
throw new IllegalStateException(name + " is not a valid Hive database or table name");
}
/**
* A base implementation for creating {@link HiveTable}s given a {@link Path}.
*
* <p>
* This method returns a list of {@link Hivetable}s that contains one table per db name
* (returned by {@link #getDatabaseNames(Path)}) and table name (returned by {@link #getTableNames(Path)}.
* </p>
*
* @param path a {@link Path} used to create the {@link HiveTable}.
* @return a list of {@link HiveTable}s for the given {@link Path}.
* @throws IOException
*/
protected List<HiveTable> getTables(Path path) throws IOException {
List<HiveTable> tables = Lists.newArrayList();
for (String databaseName : getDatabaseNames(path)) {
// Get tables to register ONLY for this Hive database (specified via prefix filter in properties)
boolean foundTablesViaDbFilter = false;
for (String tableName : getTableNames(Optional.of(databaseName), path)) {
tables.add(getTable(path, databaseName, tableName));
foundTablesViaDbFilter = true;
}
// If no tables found via db filter, get tables to register in all Hive databases and add them for this database
if (!foundTablesViaDbFilter) {
for (String tableName : getTableNames(path)) {
tables.add(getTable(path, databaseName, tableName));
}
}
}
return tables;
}
/**
* A base implementation for creating a non bucketed, external {@link HiveTable} for a {@link Path}.
*
* @param path a {@link Path} used to create the {@link HiveTable}.
* @param dbName the database name for the created {@link HiveTable}.
* @param tableName the table name for the created {@link HiveTable}.
* @return a {@link HiveTable}s for the given {@link Path}.
* @throws IOException
*/
protected HiveTable getTable(Path path, String dbName, String tableName) throws IOException {
HiveTable.Builder tableBuilder = new HiveTable.Builder().withDbName(dbName).withTableName(tableName);
if (!this.emptyInputPathFlag) {
tableBuilder = tableBuilder.withSerdeManaager(HiveSerDeManager.get(this.props));
}
HiveTable table = tableBuilder.build();
table.setLocation(this.fs.makeQualified(getTableLocation(path)).toString());
if (!this.emptyInputPathFlag) {
table.setSerDeProps(path);
}
// Setting table-level props.
table.setProps(getRuntimePropsEnrichedTblProps());
table.setStorageProps(this.props.getStorageProps());
table.setSerDeProps(this.props.getSerdeProps());
table.setNumBuckets(-1);
table.setBucketColumns(Lists.<String> newArrayList());
table.setTableType(TableType.EXTERNAL_TABLE.toString());
return table;
}
/**
* Enrich the table-level properties with properties carried over from ingestion runtime.
* Extend this class to add more runtime properties if required.
*/
protected State getRuntimePropsEnrichedTblProps() {
State tableProps = new State(this.props.getTablePartitionProps());
if (this.props.getRuntimeTableProps().isPresent()){
tableProps.setProp(HiveMetaStoreUtils.RUNTIME_PROPS, this.props.getRuntimeTableProps().get());
}
return tableProps;
}
protected Optional<HivePartition> getPartition(Path path, HiveTable table) throws IOException {
return Optional.<HivePartition> absent();
}
protected Path getTableLocation(Path path) {
return path;
}
/**
* Determine whether a database or table name is valid.
*
* A name is valid if and only if: it starts with an alphanumeric character, contains only alphanumeric characters
* and '_', and is NOT composed of numbers only.
*/
protected static boolean isNameValid(String name) {
Preconditions.checkNotNull(name);
name = name.toLowerCase();
return VALID_DB_TABLE_NAME_PATTERN_1.matcher(name).matches()
&& VALID_DB_TABLE_NAME_PATTERN_2.matcher(name).matches();
}
/**
* Attempt to sanitize an invalid database or table name by replacing characters that are not alphanumeric
* or '_' with '_'.
*/
protected static String sanitizeName(String name) {
return name.replaceAll("[^a-zA-Z0-9_]", "_");
}
@Override
public Collection<HiveSpec> getHiveSpecs(Path path) throws IOException {
List<HiveSpec> specs = Lists.newArrayList();
for (HiveTable table : getTables(path)) {
specs.add(new SimpleHiveSpec.Builder<>(path).withTable(table).withPartition(getPartition(path, table)).build());
}
return specs;
}
/**
* Get a {@link HiveRegistrationPolicy} from a {@link State} object.
*
* @param props A {@link State} object that contains property, {@link #HIVE_REGISTRATION_POLICY},
* which is the class name of the desired policy. This policy class must have a constructor that
* takes a {@link State} object.
*/
public static HiveRegistrationPolicy getPolicy(State props) {
Preconditions.checkArgument(props.contains(ConfigurationKeys.HIVE_REGISTRATION_POLICY));
String policyType = props.getProp(ConfigurationKeys.HIVE_REGISTRATION_POLICY);
try {
return (HiveRegistrationPolicy) ConstructorUtils.invokeConstructor(Class.forName(policyType), props);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(
"Unable to instantiate " + HiveRegistrationPolicy.class.getSimpleName() + " with type " + policyType, e);
}
}
} | 4,637 |
0 | Create_ds/gobblin/gobblin-hive-registration/src/main/java/gobblin | Create_ds/gobblin/gobblin-hive-registration/src/main/java/gobblin/hive/HiveRegProps.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.hive;
import gobblin.configuration.State;
/***
* Shim layer for org.apache.gobblin.hive.HiveRegProps
*/
public class HiveRegProps extends org.apache.gobblin.hive.HiveRegProps {
public HiveRegProps(State props) {
super(props);
}
public HiveRegProps(State props, State tableProps, State storageProps, State serdeProps) {
super(props, tableProps, storageProps, serdeProps);
}
}
| 4,638 |
0 | Create_ds/gobblin/gobblin-rest-service/gobblin-rest-client/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-rest-service/gobblin-rest-client/src/main/java/org/apache/gobblin/rest/JobExecutionInfoClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.rest;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
import com.google.common.collect.Sets;
import com.linkedin.common.callback.FutureCallback;
import com.linkedin.common.util.None;
import com.linkedin.r2.RemoteInvocationException;
import com.linkedin.r2.transport.common.Client;
import com.linkedin.r2.transport.common.bridge.client.TransportClientAdapter;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.BatchGetKVRequest;
import com.linkedin.restli.client.ErrorHandlingBehavior;
import com.linkedin.restli.client.GetRequest;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.RestClient;
import com.linkedin.restli.client.response.BatchKVResponse;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
/**
* A Rest.li client to work with the Rest.li service for job execution queries.
*
* @author Yinan Li
*/
public class JobExecutionInfoClient implements Closeable {
private final HttpClientFactory httpClientFactory;
protected final RestClient restClient;
public JobExecutionInfoClient(String serverUri) {
this.httpClientFactory = new HttpClientFactory();
Client r2Client =
new TransportClientAdapter(this.httpClientFactory.getClient(Collections.<String, String>emptyMap()));
this.restClient = new RestClient(r2Client, serverUri);
}
/**
* Get a {@link org.apache.gobblin.rest.JobExecutionQueryResult} for a {@link org.apache.gobblin.rest.JobExecutionQuery}.
*
* @param query a {@link org.apache.gobblin.rest.JobExecutionQuery}
* @return a {@link org.apache.gobblin.rest.JobExecutionQueryResult}
* @throws RemoteInvocationException
*/
public JobExecutionQueryResult get(JobExecutionQuery query)
throws RemoteInvocationException {
GetRequest<JobExecutionQueryResult> getRequest = new JobExecutionsBuilders().get()
.id(new ComplexResourceKey<JobExecutionQuery, EmptyRecord>(query, new EmptyRecord())).build();
Response<JobExecutionQueryResult> response =
this.restClient.sendRequest(getRequest, ErrorHandlingBehavior.TREAT_SERVER_ERROR_AS_SUCCESS).getResponse();
return response.getEntity();
}
/**
* Get a collection of {@link JobExecutionQueryResult}s for a collection of {@link JobExecutionQuery}s.
*
* <p>
* The order of {@link JobExecutionQueryResult}s may not match the order of {@link JobExecutionQuery}s.
* </p>
*
* @param queries a collection of {@link JobExecutionQuery}s
* @return a collection of {@link JobExecutionQueryResult}s
* @throws RemoteInvocationException
*/
public Collection<JobExecutionQueryResult> batchGet(Collection<JobExecutionQuery> queries)
throws RemoteInvocationException {
Set<ComplexResourceKey<JobExecutionQuery, EmptyRecord>> ids = Sets.newHashSet();
for (JobExecutionQuery query : queries) {
ids.add(new ComplexResourceKey<JobExecutionQuery, EmptyRecord>(query, new EmptyRecord()));
}
BatchGetKVRequest<ComplexResourceKey<JobExecutionQuery, EmptyRecord>, JobExecutionQueryResult>
batchGetRequest = new JobExecutionsBuilders().batchGet().ids(ids).buildKV();
BatchKVResponse<ComplexResourceKey<JobExecutionQuery, EmptyRecord>, JobExecutionQueryResult>
response = this.restClient.sendRequest(batchGetRequest,
ErrorHandlingBehavior.TREAT_SERVER_ERROR_AS_SUCCESS)
.getResponseEntity();
return response.getResults().values();
}
@Override
public void close()
throws IOException {
this.restClient.shutdown(new FutureCallback<None>());
this.httpClientFactory.shutdown(new FutureCallback<None>());
}
}
| 4,639 |
0 | Create_ds/gobblin/gobblin-rest-service/gobblin-rest-server/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-rest-service/gobblin-rest-server/src/test/java/org/apache/gobblin/rest/JobExecutionInfoServerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.rest;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.linkedin.data.template.StringMap;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.JobHistoryStore;
import org.apache.gobblin.metastore.MetaStoreModule;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
import org.apache.gobblin.util.PortUtils;
/**
* Unit tests for {@link org.apache.gobblin.rest.JobExecutionInfoServer}.
*
* <p>
* This test case uses {@link org.apache.gobblin.rest.JobExecutionInfoClient} to talk to the
* {@link org.apache.gobblin.rest.JobExecutionInfoServer}, which runs the Rest.li resource
* {@link org.apache.gobblin.rest.JobExecutionInfoResource}. So this test case is also testing
* those classes.
* </p>
*
* @author Yinan Li
*/
@Test(groups = { "gobblin.rest" })
public class JobExecutionInfoServerTest {
private ITestMetastoreDatabase testMetastoreDatabase;
private JobHistoryStore jobHistoryStore;
private JobExecutionInfoClient client;
private JobExecutionInfoServer server;
private JobExecutionInfo expected1;
private JobExecutionInfo expected2;
@BeforeClass
public void setUp() throws Exception {
testMetastoreDatabase = TestMetastoreDatabaseFactory.get();
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.JOB_HISTORY_STORE_URL_KEY, testMetastoreDatabase.getJdbcUrl());
int randomPort = new PortUtils.ServerSocketPortLocator().random();
properties.setProperty(ConfigurationKeys.REST_SERVER_PORT_KEY, Integer.toString(randomPort));
Injector injector = Guice.createInjector(new MetaStoreModule(properties));
this.jobHistoryStore = injector.getInstance(JobHistoryStore.class);
this.client = new JobExecutionInfoClient(String.format("http://%s:%s/", "localhost", randomPort));
this.server = new JobExecutionInfoServer(properties);
this.server.startUp();
this.expected1 = createJobExecutionInfo(1);
this.expected2 = createJobExecutionInfo(2);
this.jobHistoryStore.put(this.expected1);
this.jobHistoryStore.put(this.expected2);
}
@Test
public void testGet() throws Exception {
JobExecutionQuery queryByJobId = new JobExecutionQuery();
queryByJobId.setIdType(QueryIdTypeEnum.JOB_ID);
queryByJobId.setId(JobExecutionQuery.Id.create(this.expected1.getJobId()));
JobExecutionQueryResult result = this.client.get(queryByJobId);
JobExecutionInfoArray jobExecutionInfos = result.getJobExecutions();
Assert.assertEquals(jobExecutionInfos.size(), 1);
JobExecutionInfo actual = jobExecutionInfos.get(0);
assertJobExecution(actual, this.expected1);
}
@Test
public void testBadGet() throws Exception {
JobExecutionQuery queryByJobId = new JobExecutionQuery();
queryByJobId.setIdType(QueryIdTypeEnum.JOB_ID);
queryByJobId.setId(JobExecutionQuery.Id.create(this.expected1.getJobId() + "1"));
JobExecutionQueryResult result = this.client.get(queryByJobId);
Assert.assertTrue(result.getJobExecutions().isEmpty());
}
@Test
public void testBatchGet() throws Exception {
JobExecutionQuery queryByJobId1 = new JobExecutionQuery();
queryByJobId1.setIdType(QueryIdTypeEnum.JOB_ID);
queryByJobId1.setId(JobExecutionQuery.Id.create(this.expected1.getJobId()));
JobExecutionQuery queryByJobId2 = new JobExecutionQuery();
queryByJobId2.setIdType(QueryIdTypeEnum.JOB_ID);
queryByJobId2.setId(JobExecutionQuery.Id.create(this.expected2.getJobId()));
List<JobExecutionQuery> queries = Lists.newArrayList(queryByJobId1, queryByJobId2);
List<JobExecutionQueryResult> result = Lists.newArrayList(this.client.batchGet(queries));
Assert.assertEquals(result.size(), 2);
Assert.assertEquals(result.get(0).getJobExecutions().size(), 1);
Assert.assertEquals(result.get(1).getJobExecutions().size(), 1);
JobExecutionInfo actual1 = result.get(0).getJobExecutions().get(0);
JobExecutionInfo actual2 = result.get(1).getJobExecutions().get(0);
if (actual1.getJobName().equals(this.expected1.getJobName())) {
assertJobExecution(actual1, this.expected1);
assertJobExecution(actual2, this.expected2);
} else {
assertJobExecution(actual1, this.expected2);
assertJobExecution(actual2, this.expected1);
}
}
@AfterClass(alwaysRun = true)
public void tearDown() throws Exception {
if (this.client != null) {
this.client.close();
}
if (this.server != null) {
this.server.shutDown();
}
if (this.jobHistoryStore != null) {
this.jobHistoryStore.close();
}
if (this.testMetastoreDatabase != null) {
this.testMetastoreDatabase.close();
}
}
private static JobExecutionInfo createJobExecutionInfo(int index) {
JobExecutionInfo jobExecutionInfo = new JobExecutionInfo();
jobExecutionInfo.setJobName("TestJob" + index);
jobExecutionInfo.setJobId(jobExecutionInfo.getJobName() + "_" + System.currentTimeMillis());
jobExecutionInfo.setStartTime(System.currentTimeMillis());
jobExecutionInfo.setState(JobStateEnum.PENDING);
jobExecutionInfo.setLaunchedTasks(2);
jobExecutionInfo.setCompletedTasks(0);
MetricArray jobMetrics = new MetricArray();
Metric jobMetric1 = new Metric();
jobMetric1.setGroup("JOB");
jobMetric1.setName("jm1");
jobMetric1.setType(MetricTypeEnum.COUNTER);
jobMetric1.setValue("100");
jobMetrics.add(jobMetric1);
jobExecutionInfo.setMetrics(jobMetrics);
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put("k", "v");
jobExecutionInfo.setJobProperties(new StringMap(jobProperties));
TaskExecutionInfoArray taskExecutionInfos = new TaskExecutionInfoArray();
TaskExecutionInfo taskExecutionInfo1 = new TaskExecutionInfo();
taskExecutionInfo1.setJobId(jobExecutionInfo.getJobId());
taskExecutionInfo1.setTaskId(jobExecutionInfo.getJobId() + "_0");
taskExecutionInfo1.setStartTime(System.currentTimeMillis());
taskExecutionInfo1.setState(TaskStateEnum.PENDING);
taskExecutionInfo1.setLowWatermark(0L);
taskExecutionInfo1.setHighWatermark(1000L);
Table table1 = new Table();
table1.setNamespace("Test");
table1.setName("Test1");
table1.setType(TableTypeEnum.SNAPSHOT_ONLY);
taskExecutionInfo1.setTable(table1);
MetricArray taskMetrics1 = new MetricArray();
Metric taskMetric1 = new Metric();
taskMetric1.setGroup("TASK");
taskMetric1.setName("tm1");
taskMetric1.setType(MetricTypeEnum.COUNTER);
taskMetric1.setValue("100");
taskMetrics1.add(taskMetric1);
taskExecutionInfo1.setMetrics(taskMetrics1);
Map<String, String> taskProperties1 = Maps.newHashMap();
taskProperties1.put("k1", "v1");
taskExecutionInfo1.setTaskProperties(new StringMap(taskProperties1));
taskExecutionInfos.add(taskExecutionInfo1);
TaskExecutionInfo taskExecutionInfo2 = new TaskExecutionInfo();
taskExecutionInfo2.setJobId(jobExecutionInfo.getJobId());
taskExecutionInfo2.setTaskId(jobExecutionInfo.getJobId() + "_1");
taskExecutionInfo2.setStartTime(System.currentTimeMillis());
taskExecutionInfo2.setState(TaskStateEnum.PENDING);
taskExecutionInfo2.setLowWatermark(0L);
taskExecutionInfo2.setHighWatermark(2000L);
Table table2 = new Table();
table2.setNamespace("Test");
table2.setName("Test2");
table2.setType(TableTypeEnum.SNAPSHOT_ONLY);
taskExecutionInfo2.setTable(table2);
MetricArray taskMetrics2 = new MetricArray();
Metric taskMetric2 = new Metric();
taskMetric2.setGroup("TASK");
taskMetric2.setName("tm2");
taskMetric2.setType(MetricTypeEnum.COUNTER);
taskMetric2.setValue("100");
taskMetrics2.add(taskMetric2);
taskExecutionInfo2.setMetrics(taskMetrics2);
Map<String, String> taskProperties2 = Maps.newHashMap();
taskProperties2.put("k2", "v2");
taskExecutionInfo2.setTaskProperties(new StringMap(taskProperties2));
taskExecutionInfos.add(taskExecutionInfo2);
jobExecutionInfo.setTaskExecutions(taskExecutionInfos);
return jobExecutionInfo;
}
private static void assertJobExecution(JobExecutionInfo actual, JobExecutionInfo expected) {
Assert.assertEquals(actual.getJobName(), expected.getJobName());
Assert.assertEquals(actual.getJobId(), expected.getJobId());
if (expected.hasDuration()) {
Assert.assertEquals(actual.getDuration(), expected.getDuration());
} else {
Assert.assertEquals(actual.getDuration().longValue(), -1L);
}
Assert.assertEquals(actual.getState(), expected.getState());
Assert.assertEquals(actual.getLaunchedTasks(), expected.getLaunchedTasks());
Assert.assertEquals(actual.getCompletedTasks(), expected.getCompletedTasks());
Assert.assertEquals(actual.getMetrics(), expected.getMetrics());
for (int i = 0; i < actual.getMetrics().size(); i++) {
assertMetric(actual.getMetrics().get(i), expected.getMetrics().get(i));
}
Assert.assertEquals(actual.getJobProperties(), expected.getJobProperties());
Assert.assertEquals(actual.getTaskExecutions().size(), expected.getTaskExecutions().size());
for (int i = 0; i < actual.getTaskExecutions().size(); i++) {
assertTaskExecution(actual.getTaskExecutions().get(i), expected.getTaskExecutions().get(i));
}
}
private static void assertTaskExecution(TaskExecutionInfo actual, TaskExecutionInfo expected) {
Assert.assertEquals(actual.getJobId(), expected.getJobId());
Assert.assertEquals(actual.getTaskId(), expected.getTaskId());
if (expected.hasDuration()) {
Assert.assertEquals(actual.getDuration(), expected.getDuration());
} else {
Assert.assertEquals(actual.getDuration().longValue(), -1L);
}
Assert.assertEquals(actual.getState(), expected.getState());
Assert.assertEquals(actual.getLowWatermark(), expected.getLowWatermark());
Assert.assertEquals(actual.getHighWatermark(), expected.getHighWatermark());
Assert.assertEquals(actual.getTable(), expected.getTable());
Assert.assertEquals(actual.getMetrics(), expected.getMetrics());
for (int i = 0; i < actual.getMetrics().size(); i++) {
assertMetric(actual.getMetrics().get(i), expected.getMetrics().get(i));
}
Assert.assertEquals(actual.getTaskProperties(), expected.getTaskProperties());
}
private static void assertMetric(Metric actual, Metric expected) {
Assert.assertEquals(actual.getGroup(), expected.getGroup());
Assert.assertEquals(actual.getName(), expected.getName());
Assert.assertEquals(actual.getType(), expected.getType());
Assert.assertEquals(actual.getValue(), expected.getValue());
}
}
| 4,640 |
0 | Create_ds/gobblin/gobblin-rest-service/gobblin-rest-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-rest-service/gobblin-rest-server/src/main/java/org/apache/gobblin/rest/JobExecutionInfoServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.rest;
import java.net.URI;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.AbstractIdleService;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.linkedin.r2.filter.compression.EncodingType;
import com.linkedin.r2.filter.compression.ServerCompressionFilter;
import com.linkedin.r2.filter.FilterChain;
import com.linkedin.r2.filter.FilterChains;
import com.linkedin.r2.transport.common.bridge.server.TransportDispatcher;
import com.linkedin.r2.transport.http.server.HttpNettyServerFactory;
import com.linkedin.r2.transport.http.server.HttpServer;
import com.linkedin.restli.docgen.DefaultDocumentationRequestHandler;
import com.linkedin.restli.server.DelegatingTransportDispatcher;
import com.linkedin.restli.server.RestLiConfig;
import com.linkedin.restli.server.RestLiServer;
import com.linkedin.restli.server.mock.InjectMockResourceFactory;
import com.linkedin.restli.server.mock.SimpleBeanProvider;
import com.linkedin.restli.server.resources.ResourceFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.JobHistoryStore;
import org.apache.gobblin.metastore.MetaStoreModule;
/**
* A server running the Rest.li resource for job execution queries.
*
* @author Yinan Li
*/
public class JobExecutionInfoServer extends AbstractIdleService {
private static final Logger LOGGER = LoggerFactory.getLogger(JobExecutionInfoServer.class);
private final URI serverUri;
private final URI serverAdvertisedUri;
private final int port;
private final Properties properties;
private volatile Optional<HttpServer> httpServer;
public JobExecutionInfoServer(Properties properties) {
this.properties = properties;
port = getPort(properties);
serverUri = getServiceUri(getHost(properties), port);
serverAdvertisedUri = getAdvertisedUri(properties);
}
@Override
protected void startUp()
throws Exception {
// Server configuration
RestLiConfig config = new RestLiConfig();
config.addResourcePackageNames(JobExecutionInfoResource.class.getPackage().getName());
config.setServerNodeUri(serverUri);
config.setDocumentationRequestHandler(new DefaultDocumentationRequestHandler());
// Handle dependency injection
Injector injector = Guice.createInjector(new MetaStoreModule(properties));
JobHistoryStore jobHistoryStore = injector.getInstance(JobHistoryStore.class);
SimpleBeanProvider beanProvider = new SimpleBeanProvider();
beanProvider.add("jobHistoryStore", jobHistoryStore);
// Use InjectMockResourceFactory to keep this Spring free
ResourceFactory factory = new InjectMockResourceFactory(beanProvider);
// Create and start the HTTP server
TransportDispatcher dispatcher = new DelegatingTransportDispatcher(new RestLiServer(config, factory));
String acceptedFilters = EncodingType.SNAPPY.getHttpName() + "," + EncodingType.GZIP.getHttpName();
FilterChain filterChain = FilterChains.createRestChain(new ServerCompressionFilter(acceptedFilters));
this.httpServer = Optional.of(new HttpNettyServerFactory(filterChain).createServer(port, dispatcher));
LOGGER.info("Starting the job execution information server");
this.httpServer.get().start();
}
@Override
protected void shutDown()
throws Exception {
if (this.httpServer.isPresent()) {
LOGGER.info("Stopping the job execution information server");
this.httpServer.get().stop();
}
}
public URI getAdvertisedServerUri() {
return serverAdvertisedUri;
}
private static URI getServiceUri(String host, int port) {
return URI.create(String.format("http://%s:%d", host, port));
}
private static int getPort(Properties properties) {
return Integer.parseInt(properties.getProperty(
ConfigurationKeys.REST_SERVER_PORT_KEY,
ConfigurationKeys.DEFAULT_REST_SERVER_PORT));
}
private static String getHost(Properties properties) {
return properties.getProperty(
ConfigurationKeys.REST_SERVER_HOST_KEY,
ConfigurationKeys.DEFAULT_REST_SERVER_HOST);
}
private static URI getAdvertisedUri(Properties properties) {
return URI.create(properties.getProperty(
ConfigurationKeys.REST_SERVER_ADVERTISED_URI_KEY,
getServiceUri(getHost(properties), getPort(properties)).toString()));
}
}
| 4,641 |
0 | Create_ds/gobblin/gobblin-rest-service/gobblin-rest-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-rest-service/gobblin-rest-server/src/main/java/org/apache/gobblin/rest/JobExecutionInfoResource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.rest;
import com.linkedin.restli.server.ResourceContext;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Named;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Maps;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.resources.ComplexKeyResourceTemplate;
import org.apache.gobblin.metastore.JobHistoryStore;
/**
* A Rest.li resource for serving queries of Gobblin job executions.
*
* @author Yinan Li
*/
@RestLiCollection(name = "jobExecutions", namespace = "org.apache.gobblin.rest")
public class JobExecutionInfoResource extends ComplexKeyResourceTemplate<JobExecutionQuery, EmptyRecord, JobExecutionQueryResult> {
private static final Logger LOGGER = LoggerFactory.getLogger(JobExecutionInfoResource.class);
@Inject
@Named("jobHistoryStore")
private JobHistoryStore jobHistoryStore;
@Override
public JobExecutionQueryResult get(ComplexResourceKey<JobExecutionQuery, EmptyRecord> key) {
JobExecutionQuery query = key.getKey();
JobExecutionInfoArray jobExecutionInfos = new JobExecutionInfoArray();
try {
for (JobExecutionInfo jobExecutionInfo : this.jobHistoryStore.get(query)) {
jobExecutionInfos.add(jobExecutionInfo);
}
} catch (Throwable t) {
LOGGER
.error(String.format("Failed to execute query [id = %s, type = %s]", query.getId(), query.getIdType().name()),
t);
return null;
}
JobExecutionQueryResult result = new JobExecutionQueryResult();
result.setJobExecutions(jobExecutionInfos);
ResourceContext rc = this.getContext();
rc.setResponseHeader("Access-Control-Allow-Origin", "*");
this.setContext(rc);
return result;
}
@Override
public Map<ComplexResourceKey<JobExecutionQuery, EmptyRecord>, JobExecutionQueryResult> batchGet(
Set<ComplexResourceKey<JobExecutionQuery, EmptyRecord>> keys) {
Map<ComplexResourceKey<JobExecutionQuery, EmptyRecord>, JobExecutionQueryResult> results = Maps.newHashMap();
for (ComplexResourceKey<JobExecutionQuery, EmptyRecord> key : keys) {
JobExecutionQueryResult result = get(key);
if (result != null) {
results.put(key, get(key));
}
}
ResourceContext rc = this.getContext();
rc.setResponseHeader("Access-Control-Allow-Origin", "*");
this.setContext(rc);
return results;
}
}
| 4,642 |
0 | Create_ds/gobblin/gobblin-runtime-hadoop/src/test/java/org/apache/gobblin/runtime/instance/plugin | Create_ds/gobblin/gobblin-runtime-hadoop/src/test/java/org/apache/gobblin/runtime/instance/plugin/hadoop/TestHadoopKerberosKeytabAuthenticationPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.instance.plugin.hadoop;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.Assert.ThrowingRunnable;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.runtime.api.GobblinInstanceDriver;
import org.apache.gobblin.runtime.std.DefaultConfigurableImpl;
import com.google.common.collect.ImmutableMap;
/**
* Unit tests for {@link HadoopKerberosKeytabAuthenticationPlugin}
*/
public class TestHadoopKerberosKeytabAuthenticationPlugin {
@Test
public void testConstructor() {
final Config testConfig = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put("hadoop-inject.hadoop.security.authentication", "simple")
.put("gobblin.instance.hadoop.loginUser", "foo")
.put("gobblin.instance.hadoop.loginUserKeytabFile", "/tmp/bar")
.build());
GobblinInstanceDriver instance = Mockito.mock(GobblinInstanceDriver.class);
Mockito.when(instance.getSysConfig()).thenReturn(DefaultConfigurableImpl.createFromConfig(testConfig));
HadoopKerberosKeytabAuthenticationPlugin plugin = (HadoopKerberosKeytabAuthenticationPlugin)
(new HadoopKerberosKeytabAuthenticationPlugin.ConfigBasedFactory()).createPlugin(instance);
Assert.assertEquals(plugin.getLoginUser(), "foo");
Assert.assertEquals(plugin.getLoginUserKeytabFile(), "/tmp/bar");
Assert.assertEquals(plugin.getHadoopConf().get("hadoop.security.authentication"), "simple");
}
@Test
public void testConfigConstructor() {
final Config testConfig = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put("hadoop-inject.hadoop.security.authentication", "simple")
.put("gobblin.instance.hadoop.loginUser", "foo")
.put("gobblin.instance.hadoop.loginUserKeytabFile", "/tmp/bar")
.build());
HadoopKerberosKeytabAuthenticationPlugin plugin = (HadoopKerberosKeytabAuthenticationPlugin)
(new HadoopKerberosKeytabAuthenticationPlugin.ConfigBasedFactory()).createPlugin(testConfig);
Assert.assertEquals(plugin.getLoginUser(), "foo");
Assert.assertEquals(plugin.getLoginUserKeytabFile(), "/tmp/bar");
Assert.assertEquals(plugin.getHadoopConf().get("hadoop.security.authentication"), "simple");
}
@Test
public void testMissingOptions() {
final Config testConfig1 = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put("hadoop-inject.hadoop.security.authentication", "simple")
.put("hadoop.loginUser", "foo")
.put("gobblin.instance.hadoop.loginUserKeytabFile", "/tmp/bar")
.build());
final GobblinInstanceDriver instance1 = Mockito.mock(GobblinInstanceDriver.class);
Mockito.when(instance1.getSysConfig()).thenReturn(DefaultConfigurableImpl.createFromConfig(testConfig1));
Assert.assertThrows(new ThrowingRunnable() {
@Override public void run() throws Throwable {
(new HadoopKerberosKeytabAuthenticationPlugin.ConfigBasedFactory()).createPlugin(instance1);
}
});
final Config testConfig2 = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put("hadoop-inject.hadoop.security.authentication", "simple")
.put("gobblin.instance.hadoop.loginUser", "foo")
.put("hadoop.loginUserKeytabFile", "/tmp/bar")
.build());
final GobblinInstanceDriver instance2 = Mockito.mock(GobblinInstanceDriver.class);
Mockito.when(instance1.getSysConfig()).thenReturn(DefaultConfigurableImpl.createFromConfig(testConfig2));
Assert.assertThrows(new ThrowingRunnable() {
@Override public void run() throws Throwable {
(new HadoopKerberosKeytabAuthenticationPlugin.ConfigBasedFactory()).createPlugin(instance2);
}
});
}
}
| 4,643 |
0 | Create_ds/gobblin/gobblin-runtime-hadoop/src/main/java/org/apache/gobblin/runtime/instance/plugin | Create_ds/gobblin/gobblin-runtime-hadoop/src/main/java/org/apache/gobblin/runtime/instance/plugin/hadoop/HadoopKerberosKeytabAuthenticationPlugin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.instance.plugin.hadoop;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.runtime.api.GobblinInstanceDriver;
import org.apache.gobblin.runtime.api.GobblinInstancePlugin;
import org.apache.gobblin.runtime.api.GobblinInstancePluginFactory;
import org.apache.gobblin.runtime.instance.hadoop.HadoopConfigLoader;
import org.apache.gobblin.runtime.instance.plugin.BaseIdlePluginImpl;
import org.apache.gobblin.runtime.plugins.PluginStaticKeys;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* Loads a Kerberos keytab file for Hadoop authentication.
*/
@Slf4j
public class HadoopKerberosKeytabAuthenticationPlugin extends BaseIdlePluginImpl {
/**
* A {@link GobblinInstancePluginFactory} that instantiates {@link HadoopKerberosKeytabAuthenticationPlugin} inferring
* credentials from sys config. Sys config must contains the keys {@link PluginStaticKeys#LOGIN_USER_FULL_KEY} and
* {@link PluginStaticKeys#LOGIN_USER_KEYTAB_FILE_FULL_KEY}.
*/
@Alias(PluginStaticKeys.HADOOP_LOGIN_FROM_KEYTAB_ALIAS)
public static class ConfigBasedFactory implements GobblinInstancePluginFactory {
@Override
public GobblinInstancePlugin createPlugin(GobblinInstanceDriver instance) {
return createPlugin(instance.getSysConfig().getConfig());
}
public GobblinInstancePlugin createPlugin(Config sysConfig) {
if (!sysConfig.hasPath(PluginStaticKeys.LOGIN_USER_FULL_KEY)) {
throw new RuntimeException("Missing required sys config: " + PluginStaticKeys.LOGIN_USER_FULL_KEY);
}
if (!sysConfig.hasPath(PluginStaticKeys.LOGIN_USER_KEYTAB_FILE_FULL_KEY)) {
throw new RuntimeException("Missing required sys config: " + PluginStaticKeys.LOGIN_USER_KEYTAB_FILE_FULL_KEY);
}
String loginUser = sysConfig.getString(PluginStaticKeys.LOGIN_USER_FULL_KEY);
String loginUserKeytabFile = sysConfig.getString(PluginStaticKeys.LOGIN_USER_KEYTAB_FILE_FULL_KEY);
return new HadoopKerberosKeytabAuthenticationPlugin(sysConfig, loginUser, loginUserKeytabFile);
}
}
/**
* A {@link GobblinInstancePluginFactory} that instantiates {@link HadoopKerberosKeytabAuthenticationPlugin} with
* credentials specified at construction time.
*/
@RequiredArgsConstructor
public static class CredentialsBasedFactory implements GobblinInstancePluginFactory {
private final String _loginUser;
private final String _loginUserKeytabFile;
@Override
public GobblinInstancePlugin createPlugin(GobblinInstanceDriver instance) {
return new HadoopKerberosKeytabAuthenticationPlugin(instance.getSysConfig().getConfig(), _loginUser,
_loginUserKeytabFile);
}
}
private final String _loginUser;
private final String _loginUserKeytabFile;
private final Configuration _hadoopConf;
private HadoopKerberosKeytabAuthenticationPlugin(Config sysConfig, String loginUser, String loginUserKeytabFile) {
super(null);
_loginUser = loginUser;
_loginUserKeytabFile = loginUserKeytabFile;
HadoopConfigLoader configLoader = new HadoopConfigLoader(sysConfig);
_hadoopConf = configLoader.getConf();
}
/** {@inheritDoc} */
@Override
protected void startUp() throws Exception {
try {
UserGroupInformation.setConfiguration(_hadoopConf);
if (UserGroupInformation.isSecurityEnabled()) {
UserGroupInformation.loginUserFromKeytab(_loginUser, _loginUserKeytabFile);
}
} catch (Throwable t) {
log.error("Failed to start up HadoopKerberosKeytabAuthenticationPlugin", t);
throw t;
}
}
@Override
protected void shutDown() throws Exception {
log.info("Plugin shutdown: " + this);
}
public String getLoginUser() {
return _loginUser;
}
public String getLoginUserKeytabFile() {
return _loginUserKeytabFile;
}
public Configuration getHadoopConf() {
return _hadoopConf;
}
}
| 4,644 |
0 | Create_ds/gobblin/gobblin-salesforce/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/test/java/org/apache/gobblin/salesforce/SalesforceSourceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import com.google.gson.Gson;
import com.typesafe.config.ConfigFactory;
import java.util.HashMap;
import java.util.List;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.source.extractor.extract.QueryBasedSource;
import org.apache.gobblin.source.extractor.partition.Partition;
import org.apache.gobblin.source.extractor.partition.Partitioner;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class SalesforceSourceTest {
@Test
void testSourceLineageInfo() {
SourceState sourceState = new SourceState();
sourceState.setProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY, "salesforce");
sourceState.setProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY, "snapshot_append");
sourceState.setProp(Partitioner.HAS_USER_SPECIFIED_PARTITIONS, true);
sourceState.setProp(Partitioner.USER_SPECIFIED_PARTITIONS, "20140213000000,20170407152123");
sourceState.setProp(ConfigurationKeys.SOURCE_QUERYBASED_EXTRACT_TYPE, "SNAPSHOT");
QueryBasedSource.SourceEntity sourceEntity = QueryBasedSource.SourceEntity.fromSourceEntityName("contacts");
SalesforceSource source = new SalesforceSource(new LineageInfo(ConfigFactory.empty()));
List<WorkUnit> workUnits = source.generateWorkUnits(sourceEntity, sourceState, 20140213000000L);
Assert.assertEquals(workUnits.size(), 1);
String expected = "{\"object-type\":\"org.apache.gobblin.dataset.DatasetDescriptor\","
+ "\"object-data\":{\"platform\":\"salesforce\",\"metadata\":{},\"name\":\"contacts\"}}";
Assert.assertEquals(expected, workUnits.get(0).getProp("gobblin.event.lineage.source"));
Assert.assertEquals(workUnits.get(0).getProp("gobblin.event.lineage.name"), "contacts");
}
@Test
void testGenerateSpecifiedPartitionFromSinglePointHistogram() {
Histogram histogram = new Histogram();
histogram.add(new HistogramGroup("2014-02-13-00:00:00", 10));
SalesforceSource source = new SalesforceSource();
long expectedHighWatermark = 20170407152123L;
long lowWatermark = 20140213000000L;
int maxPartitions = 2;
String expectedPartitions = "20140213000000,20170407152123";
String actualPartitions = source.generateSpecifiedPartitions(histogram, 1, maxPartitions, lowWatermark, expectedHighWatermark);
Assert.assertEquals(actualPartitions, expectedPartitions);
}
@Test
void testGenerateSpecifiedPartition() {
Histogram histogram = getHistogram();
SalesforceSource source = new SalesforceSource();
long expectedHighWatermark = 20170407152123L;
long lowWatermark = 20140213000000L;
int maxPartitions = 5;
String expectedPartitions = "20140213000000,20170224000000,20170228000000,20170301000000,20170407000000,20170407152123";
String actualPartitions = source.generateSpecifiedPartitions(histogram, 1, maxPartitions, lowWatermark, expectedHighWatermark);
Assert.assertEquals(actualPartitions, expectedPartitions);
}
@DataProvider
private Object[][] provideGenerateWorkUnitsHelperForSinglePartitionAndEarlyStopTestData() {
return new Object[][] {
{
1000L, // earlyStopRecordCount
20140508000000L // expectedHighWtm
},
{
10000L,
20150119000000L
},
{
100000L,
20170214000000L
},
{
1000000L,
20170301000000L
}
};
}
@Test(dataProvider = "provideGenerateWorkUnitsHelperForSinglePartitionAndEarlyStopTestData")
void testGenerateWorkUnitsHelperForSinglePartitionAndEarlyStop(long earlyStopRecordCount, long expectedHighWtm) {
QueryBasedSource.SourceEntity sourceEntity = QueryBasedSource.SourceEntity.fromSourceEntityName("contacts");
SourceState state = getDefaultSourceState();
state.setProp(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS, 1);
state.setProp(SalesforceSource.ENABLE_DYNAMIC_PARTITIONING, true);
state.setProp(ConfigurationKeys.SOURCE_EARLY_STOP_ENABLED, true);
state.setProp(SalesforceSource.ENABLE_DYNAMIC_PROBING, true);
state.setProp(SalesforceSource.EARLY_STOP_TOTAL_RECORDS_LIMIT, earlyStopRecordCount);
long previousWtm = 20140213000000L;
SalesforceHistogramService salesforceHistogramService = mock(SalesforceHistogramService.class);
String deltaFieldKey = state.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY);
Partition partition = new Partitioner(state).getGlobalPartition(previousWtm);
when(salesforceHistogramService.getHistogram(sourceEntity.getSourceEntityName(), deltaFieldKey, state, partition))
.thenReturn(getHistogram());
List<WorkUnit> actualWorkUnits = new SalesforceSource(salesforceHistogramService).generateWorkUnitsHelper(sourceEntity, state, previousWtm);
Assert.assertEquals(actualWorkUnits.size(), 1);
double actualHighWtm = (double) new Gson().fromJson(actualWorkUnits.get(0).getExpectedHighWatermark(), HashMap.class).get("value");
Assert.assertEquals(actualHighWtm, Double.parseDouble(String.valueOf(expectedHighWtm)));
}
private SourceState getDefaultSourceState() {
SourceState sourceState = new SourceState();
sourceState.setProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY, "salesforce");
sourceState.setProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY, "snapshot_append");
sourceState.setProp(ConfigurationKeys.SOURCE_QUERYBASED_EXTRACT_TYPE, "SNAPSHOT");
sourceState.setProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY, "LastModifiedDate");
return sourceState;
}
private Histogram getHistogram() {
Histogram histogram = new Histogram();
for (String group: HISTOGRAM.split(", ")) {
String[] groupInfo = group.split("::");
histogram.add(new HistogramGroup(groupInfo[0], Integer.parseInt(groupInfo[1])));
}
return histogram;
}
static final String HISTOGRAM = "2014-02-13-00:00:00::3, 2014-04-15-00:00:00::1, 2014-05-06-00:00:00::624, 2014-05-07-00:00:00::1497, 2014-05-08-00:00:00::10, 2014-05-18-00:00:00::3, 2014-05-19-00:00:00::2, 2014-05-20-00:00:00::1, 2014-05-21-00:00:00::8, 2014-05-26-00:00:00::2, 2014-05-28-00:00:00::1, 2014-05-31-00:00:00::1, 2014-06-02-00:00:00::1, 2014-06-03-00:00:00::1, 2014-06-04-00:00:00::1, 2014-06-10-00:00:00::2, 2014-06-12-00:00:00::1, 2014-06-23-00:00:00::1, 2014-06-24-00:00:00::1, 2014-06-26-00:00:00::32, 2014-06-27-00:00:00::40, 2014-06-30-00:00:00::2, 2014-07-01-00:00:00::2, 2014-07-02-00:00:00::1, 2014-07-07-00:00:00::1, 2014-07-08-00:00:00::2, 2014-07-09-00:00:00::2, 2014-07-10-00:00:00::3, 2014-07-11-00:00:00::5, 2014-07-14-00:00:00::1, 2014-07-15-00:00:00::2, 2014-07-16-00:00:00::8, 2014-07-17-00:00:00::5, 2014-07-18-00:00:00::2, 2014-07-21-00:00:00::1, 2014-07-22-00:00:00::3, 2014-07-23-00:00:00::3, 2014-07-24-00:00:00::1, 2014-07-25-00:00:00::1, 2014-07-26-00:00:00::103, 2014-07-28-00:00:00::1, 2014-07-29-00:00:00::1, 2014-07-30-00:00:00::3, 2014-08-01-00:00:00::1, 2014-08-06-00:00:00::3, 2014-08-18-00:00:00::3, 2014-08-19-00:00:00::2, 2014-08-21-00:00:00::1, 2014-08-25-00:00:00::1, 2014-08-26-00:00:00::1, 2014-08-27-00:00:00::2, 2014-08-28-00:00:00::2, 2014-08-29-00:00:00::1, 2014-09-03-00:00:00::2, 2014-09-04-00:00:00::1, 2014-09-05-00:00:00::9, 2014-09-09-00:00:00::1, 2014-09-10-00:00:00::1, 2014-09-11-00:00:00::4, 2014-09-12-00:00:00::1, 2014-09-15-00:00:00::1, 2014-09-16-00:00:00::7, 2014-09-18-00:00:00::3, 2014-09-19-00:00:00::2, 2014-09-21-00:00:00::1, 2014-09-24-00:00:00::3, 2014-09-26-00:00:00::1, 2014-09-29-00:00:00::3, 2014-10-03-00:00:00::1, 2014-10-06-00:00:00::1, 2014-10-07-00:00:00::1, 2014-10-09-00:00:00::1, 2014-10-16-00:00:00::1, 2014-10-23-00:00:00::1, 2014-10-24-00:00:00::1, 2014-10-28-00:00:00::1, 2014-11-03-00:00:00::1, 2014-11-05-00:00:00::2, 2014-11-07-00:00:00::1, 2014-11-10-00:00:00::5, 2014-11-12-00:00:00::1, 2014-11-13-00:00:00::4, 2014-11-18-00:00:00::1, 2014-11-24-00:00:00::1, 2014-11-25-00:00:00::1, 2014-11-26-00:00:00::2, 2014-11-27-00:00:00::2, 2014-11-28-00:00:00::1, 2014-12-01-00:00:00::2, 2014-12-02-00:00:00::3, 2014-12-03-00:00:00::5, 2014-12-04-00:00:00::1, 2014-12-08-00:00:00::1, 2014-12-09-00:00:00::3, 2014-12-12-00:00:00::3, 2014-12-14-00:00:00::1, 2014-12-15-00:00:00::4, 2014-12-16-00:00:00::1, 2014-12-17-00:00:00::2, 2014-12-19-00:00:00::2, 2014-12-22-00:00:00::1, 2014-12-23-00:00:00::3, 2014-12-24-00:00:00::1, 2014-12-30-00:00:00::1, 2014-12-31-00:00:00::1, 2015-01-02-00:00:00::1, 2015-01-04-00:00:00::1, 2015-01-05-00:00:00::1, 2015-01-06-00:00:00::1, 2015-01-07-00:00:00::1, 2015-01-08-00:00:00::2, 2015-01-09-00:00:00::2, 2015-01-14-00:00:00::1, 2015-01-15-00:00:00::2, 2015-01-16-00:00:00::15685, 2015-01-19-00:00:00::1, 2015-01-21-00:00:00::1, 2015-01-22-00:00:00::1, 2015-01-27-00:00:00::2, 2015-01-28-00:00:00::4, 2015-01-29-00:00:00::1, 2015-01-30-00:00:00::1, 2015-02-01-00:00:00::1, 2015-02-02-00:00:00::4, 2015-02-05-00:00:00::11, 2015-02-06-00:00:00::3, 2015-02-09-00:00:00::2, 2015-02-10-00:00:00::1, 2015-02-11-00:00:00::2, 2015-02-12-00:00:00::15, 2015-02-13-00:00:00::4, 2015-02-14-00:00:00::3, 2015-02-15-00:00:00::2, 2015-02-16-00:00:00::2, 2015-02-18-00:00:00::8, 2015-02-19-00:00:00::2, 2015-02-20-00:00:00::4, 2015-02-21-00:00:00::1, 2015-02-22-00:00:00::4, 2015-02-23-00:00:00::1, 2015-02-24-00:00:00::4, 2015-02-25-00:00:00::1, 2015-02-26-00:00:00::3, 2015-02-27-00:00:00::3, 2015-03-02-00:00:00::5, 2015-03-03-00:00:00::5, 2015-03-04-00:00:00::21, 2015-03-05-00:00:00::2, 2015-03-06-00:00:00::5, 2015-03-07-00:00:00::5, 2015-03-09-00:00:00::9, 2015-03-10-00:00:00::2050, 2015-03-11-00:00:00::13, 2015-03-12-00:00:00::3035, 2015-03-13-00:00:00::1, 2015-03-16-00:00:00::10, 2015-03-17-00:00:00::2, 2015-03-18-00:00:00::1, 2015-03-19-00:00:00::10, 2015-03-20-00:00:00::4, 2015-03-23-00:00:00::281, 2015-03-24-00:00:00::60, 2015-03-25-00:00:00::30, 2015-03-26-00:00:00::7, 2015-03-27-00:00:00::10, 2015-03-29-00:00:00::1, 2015-03-30-00:00:00::1, 2015-03-31-00:00:00::3, 2015-04-01-00:00:00::5, 2015-04-02-00:00:00::2, 2015-04-03-00:00:00::4, 2015-04-05-00:00:00::3, 2015-04-06-00:00:00::5, 2015-04-07-00:00:00::16, 2015-04-08-00:00:00::6, 2015-04-09-00:00:00::8, 2015-04-10-00:00:00::2, 2015-04-11-00:00:00::1, 2015-04-12-00:00:00::1, 2015-04-13-00:00:00::2, 2015-04-14-00:00:00::1, 2015-04-15-00:00:00::3, 2015-04-16-00:00:00::3, 2015-04-17-00:00:00::1, 2015-04-18-00:00:00::2, 2015-04-20-00:00:00::10, 2015-04-21-00:00:00::2, 2015-04-22-00:00:00::20, 2015-04-23-00:00:00::49, 2015-04-24-00:00:00::1, 2015-04-25-00:00:00::3, 2015-04-27-00:00:00::8, 2015-04-28-00:00:00::115, 2015-04-29-00:00:00::120, 2015-04-30-00:00:00::397, 2015-05-01-00:00:00::4, 2015-05-04-00:00:00::605, 2015-05-05-00:00:00::3, 2015-05-06-00:00:00::7, 2015-05-07-00:00:00::14, 2015-05-08-00:00:00::1, 2015-05-09-00:00:00::2, 2015-05-11-00:00:00::5, 2015-05-12-00:00:00::5, 2015-05-13-00:00:00::2, 2015-05-14-00:00:00::3, 2015-05-15-00:00:00::2, 2015-05-18-00:00:00::2, 2015-05-19-00:00:00::3, 2015-05-21-00:00:00::4, 2015-05-22-00:00:00::4, 2015-05-25-00:00:00::1, 2015-05-26-00:00:00::2, 2015-05-27-00:00:00::2, 2015-05-28-00:00:00::3, 2015-05-29-00:00:00::20, 2015-05-30-00:00:00::1, 2015-06-01-00:00:00::1, 2015-06-02-00:00:00::11, 2015-06-03-00:00:00::2, 2015-06-04-00:00:00::2, 2015-06-08-00:00:00::3, 2015-06-09-00:00:00::9, 2015-06-10-00:00:00::3, 2015-06-11-00:00:00::3, 2015-06-13-00:00:00::2, 2015-06-15-00:00:00::11, 2015-06-16-00:00:00::7, 2015-06-17-00:00:00::2, 2015-06-18-00:00:00::1, 2015-06-19-00:00:00::1, 2015-06-22-00:00:00::4, 2015-06-23-00:00:00::1, 2015-06-24-00:00:00::12, 2015-06-25-00:00:00::13, 2015-06-26-00:00:00::9, 2015-06-28-00:00:00::1, 2015-06-29-00:00:00::655, 2015-06-30-00:00:00::5, 2015-07-01-00:00:00::30, 2015-07-02-00:00:00::7, 2015-07-03-00:00:00::3, 2015-07-04-00:00:00::1, 2015-07-05-00:00:00::1, 2015-07-06-00:00:00::10, 2015-07-07-00:00:00::18, 2015-07-08-00:00:00::2, 2015-07-09-00:00:00::17, 2015-07-10-00:00:00::5, 2015-07-11-00:00:00::1, 2015-07-13-00:00:00::6, 2015-07-14-00:00:00::9, 2015-07-15-00:00:00::3, 2015-07-16-00:00:00::21, 2015-07-17-00:00:00::3, 2015-07-18-00:00:00::1, 2015-07-20-00:00:00::92, 2015-07-22-00:00:00::4, 2015-07-23-00:00:00::5, 2015-07-24-00:00:00::5, 2015-07-25-00:00:00::1, 2015-07-27-00:00:00::7, 2015-07-28-00:00:00::19, 2015-07-30-00:00:00::10, 2015-07-31-00:00:00::6, 2015-08-01-00:00:00::1, 2015-08-03-00:00:00::1, 2015-08-04-00:00:00::4, 2015-08-05-00:00:00::17, 2015-08-06-00:00:00::4, 2015-08-07-00:00:00::3, 2015-08-08-00:00:00::2, 2015-08-10-00:00:00::6, 2015-08-11-00:00:00::2, 2015-08-12-00:00:00::4, 2015-08-13-00:00:00::2, 2015-08-14-00:00:00::1, 2015-08-16-00:00:00::3, 2015-08-17-00:00:00::4, 2015-08-18-00:00:00::2, 2015-08-19-00:00:00::4, 2015-08-20-00:00:00::216, 2015-08-21-00:00:00::6, 2015-08-24-00:00:00::5, 2015-08-25-00:00:00::4, 2015-08-26-00:00:00::384, 2015-08-27-00:00:00::3, 2015-08-28-00:00:00::2, 2015-08-31-00:00:00::5, 2015-09-01-00:00:00::7, 2015-09-02-00:00:00::2, 2015-09-03-00:00:00::9, 2015-09-06-00:00:00::1, 2015-09-07-00:00:00::2, 2015-09-08-00:00:00::25, 2015-09-09-00:00:00::4, 2015-09-10-00:00:00::5, 2015-09-11-00:00:00::3, 2015-09-12-00:00:00::1, 2015-09-13-00:00:00::1, 2015-09-14-00:00:00::4, 2015-09-15-00:00:00::45, 2015-09-16-00:00:00::2, 2015-09-17-00:00:00::2, 2015-09-18-00:00:00::2, 2015-09-21-00:00:00::205, 2015-09-22-00:00:00::88, 2015-09-23-00:00:00::23, 2015-09-24-00:00:00::37, 2015-09-25-00:00:00::7, 2015-09-28-00:00:00::5, 2015-09-29-00:00:00::35, 2015-09-30-00:00:00::24, 2015-10-01-00:00:00::16, 2015-10-02-00:00:00::31, 2015-10-04-00:00:00::1, 2015-10-05-00:00:00::5, 2015-10-06-00:00:00::1, 2015-10-07-00:00:00::10, 2015-10-08-00:00:00::9, 2015-10-09-00:00:00::8, 2015-10-10-00:00:00::219, 2015-10-11-00:00:00::77, 2015-10-12-00:00:00::191, 2015-10-13-00:00:00::9, 2015-10-14-00:00:00::23, 2015-10-15-00:00:00::103, 2015-10-16-00:00:00::52, 2015-10-17-00:00:00::1, 2015-10-18-00:00:00::5, 2015-10-19-00:00:00::4, 2015-10-20-00:00:00::822, 2015-10-21-00:00:00::34, 2015-10-22-00:00:00::41, 2015-10-23-00:00:00::2045, 2015-10-24-00:00:00::1, 2015-10-25-00:00:00::2, 2015-10-26-00:00:00::7, 2015-10-27-00:00:00::19, 2015-10-28-00:00:00::17, 2015-10-29-00:00:00::14, 2015-10-30-00:00:00::12, 2015-11-02-00:00:00::4, 2015-11-03-00:00:00::7, 2015-11-04-00:00:00::11, 2015-11-05-00:00:00::6, 2015-11-06-00:00:00::8, 2015-11-07-00:00:00::2, 2015-11-08-00:00:00::1, 2015-11-09-00:00:00::10, 2015-11-10-00:00:00::10, 2015-11-11-00:00:00::10, 2015-11-12-00:00:00::5, 2015-11-13-00:00:00::6, 2015-11-15-00:00:00::3, 2015-11-16-00:00:00::8, 2015-11-17-00:00:00::4, 2015-11-18-00:00:00::8, 2015-11-19-00:00:00::458, 2015-11-20-00:00:00::390, 2015-11-22-00:00:00::1, 2015-11-23-00:00:00::12, 2015-11-24-00:00:00::13, 2015-11-25-00:00:00::542, 2015-11-26-00:00:00::2, 2015-11-27-00:00:00::1, 2015-11-30-00:00:00::4, 2015-12-01-00:00:00::3, 2015-12-02-00:00:00::4, 2015-12-03-00:00:00::8, 2015-12-04-00:00:00::6, 2015-12-05-00:00:00::1, 2015-12-06-00:00:00::1, 2015-12-07-00:00:00::3, 2015-12-08-00:00:00::18, 2015-12-09-00:00:00::3, 2015-12-10-00:00:00::22, 2015-12-11-00:00:00::4, 2015-12-14-00:00:00::13, 2015-12-15-00:00:00::8, 2015-12-16-00:00:00::8, 2015-12-17-00:00:00::5, 2015-12-18-00:00:00::9, 2015-12-20-00:00:00::2, 2015-12-21-00:00:00::7, 2015-12-22-00:00:00::4, 2015-12-23-00:00:00::15, 2015-12-24-00:00:00::2, 2015-12-28-00:00:00::2, 2015-12-31-00:00:00::1, 2016-01-01-00:00:00::10, 2016-01-02-00:00:00::2, 2016-01-03-00:00:00::1, 2016-01-04-00:00:00::15, 2016-01-05-00:00:00::9, 2016-01-06-00:00:00::19, 2016-01-07-00:00:00::30, 2016-01-08-00:00:00::9711, 2016-01-09-00:00:00::9, 2016-01-10-00:00:00::9, 2016-01-11-00:00:00::20, 2016-01-12-00:00:00::14, 2016-01-13-00:00:00::3084, 2016-01-14-00:00:00::17, 2016-01-15-00:00:00::9, 2016-01-16-00:00:00::1, 2016-01-17-00:00:00::1, 2016-01-18-00:00:00::2, 2016-01-19-00:00:00::9, 2016-01-20-00:00:00::12, 2016-01-21-00:00:00::15, 2016-01-22-00:00:00::9, 2016-01-23-00:00:00::6, 2016-01-24-00:00:00::2, 2016-01-25-00:00:00::3, 2016-01-26-00:00:00::1, 2016-01-27-00:00:00::3, 2016-01-28-00:00:00::7, 2016-01-29-00:00:00::4, 2016-01-30-00:00:00::7, 2016-01-31-00:00:00::3, 2016-02-01-00:00:00::17, 2016-02-02-00:00:00::28, 2016-02-03-00:00:00::20, 2016-02-04-00:00:00::20, 2016-02-05-00:00:00::40, 2016-02-06-00:00:00::3, 2016-02-07-00:00:00::9, 2016-02-08-00:00:00::28, 2016-02-09-00:00:00::33, 2016-02-10-00:00:00::100, 2016-02-11-00:00:00::52, 2016-02-12-00:00:00::103, 2016-02-13-00:00:00::5, 2016-02-14-00:00:00::5, 2016-02-15-00:00:00::7, 2016-02-16-00:00:00::23, 2016-02-17-00:00:00::210, 2016-02-18-00:00:00::29, 2016-02-19-00:00:00::24, 2016-02-20-00:00:00::5, 2016-02-21-00:00:00::29, 2016-02-22-00:00:00::26, 2016-02-23-00:00:00::12, 2016-02-24-00:00:00::19, 2016-02-25-00:00:00::46, 2016-02-26-00:00:00::23, 2016-02-27-00:00:00::7, 2016-02-28-00:00:00::1, 2016-02-29-00:00:00::45, 2016-03-01-00:00:00::48, 2016-03-02-00:00:00::49, 2016-03-03-00:00:00::34, 2016-03-04-00:00:00::61, 2016-03-05-00:00:00::5, 2016-03-06-00:00:00::15, 2016-03-07-00:00:00::22, 2016-03-08-00:00:00::3649, 2016-03-09-00:00:00::49, 2016-03-10-00:00:00::39, 2016-03-11-00:00:00::67, 2016-03-12-00:00:00::3, 2016-03-13-00:00:00::1, 2016-03-14-00:00:00::31, 2016-03-15-00:00:00::31, 2016-03-16-00:00:00::37, 2016-03-17-00:00:00::43, 2016-03-18-00:00:00::47, 2016-03-19-00:00:00::5, 2016-03-20-00:00:00::14, 2016-03-21-00:00:00::47, 2016-03-22-00:00:00::31, 2016-03-23-00:00:00::41, 2016-03-24-00:00:00::38, 2016-03-25-00:00:00::36, 2016-03-26-00:00:00::5, 2016-03-27-00:00:00::3, 2016-03-28-00:00:00::42, 2016-03-29-00:00:00::43, 2016-03-30-00:00:00::68, 2016-03-31-00:00:00::35, 2016-04-01-00:00:00::26, 2016-04-02-00:00:00::1, 2016-04-03-00:00:00::2, 2016-04-04-00:00:00::66, 2016-04-05-00:00:00::35, 2016-04-06-00:00:00::26, 2016-04-07-00:00:00::25, 2016-04-08-00:00:00::31, 2016-04-09-00:00:00::2, 2016-04-10-00:00:00::1, 2016-04-11-00:00:00::27, 2016-04-12-00:00:00::35, 2016-04-13-00:00:00::5, 2016-04-14-00:00:00::185, 2016-04-15-00:00:00::121, 2016-04-16-00:00:00::83, 2016-04-18-00:00:00::9, 2016-04-19-00:00:00::1, 2016-04-20-00:00:00::53, 2016-04-21-00:00:00::1, 2016-04-22-00:00:00::95, 2016-04-23-00:00:00::126, 2016-04-24-00:00:00::3, 2016-04-25-00:00:00::1, 2016-04-26-00:00:00::202, 2016-04-27-00:00:00::58, 2016-04-28-00:00:00::64, 2016-04-29-00:00:00::54, 2016-04-30-00:00:00::139, 2016-05-01-00:00:00::2, 2016-05-02-00:00:00::4, 2016-05-03-00:00:00::46, 2016-05-04-00:00:00::108, 2016-05-05-00:00:00::34, 2016-05-06-00:00:00::24, 2016-05-07-00:00:00::7, 2016-05-08-00:00:00::3, 2016-05-09-00:00:00::44, 2016-05-10-00:00:00::28, 2016-05-11-00:00:00::44, 2016-05-12-00:00:00::58, 2016-05-13-00:00:00::40, 2016-05-14-00:00:00::2, 2016-05-15-00:00:00::1, 2016-05-16-00:00:00::29, 2016-05-17-00:00:00::16, 2016-05-18-00:00:00::31, 2016-05-19-00:00:00::58, 2016-05-20-00:00:00::59, 2016-05-21-00:00:00::14, 2016-05-22-00:00:00::16, 2016-05-23-00:00:00::68, 2016-05-24-00:00:00::19, 2016-05-25-00:00:00::55, 2016-05-26-00:00:00::78, 2016-05-27-00:00:00::64, 2016-05-28-00:00:00::76, 2016-05-29-00:00:00::2, 2016-05-30-00:00:00::15, 2016-05-31-00:00:00::24, 2016-06-01-00:00:00::11, 2016-06-02-00:00:00::31, 2016-06-03-00:00:00::39, 2016-06-04-00:00:00::2, 2016-06-06-00:00:00::783, 2016-06-07-00:00:00::14, 2016-06-08-00:00:00::51, 2016-06-09-00:00:00::25, 2016-06-10-00:00:00::14, 2016-06-12-00:00:00::2, 2016-06-13-00:00:00::57, 2016-06-14-00:00:00::20, 2016-06-15-00:00:00::36, 2016-06-16-00:00:00::15, 2016-06-17-00:00:00::49, 2016-06-18-00:00:00::3, 2016-06-20-00:00:00::38, 2016-06-21-00:00:00::45, 2016-06-22-00:00:00::5215, 2016-06-23-00:00:00::4977, 2016-06-24-00:00:00::50, 2016-06-25-00:00:00::2, 2016-06-26-00:00:00::2, 2016-06-27-00:00:00::27, 2016-06-28-00:00:00::1102, 2016-06-29-00:00:00::59, 2016-06-30-00:00:00::38, 2016-07-01-00:00:00::34, 2016-07-02-00:00:00::1, 2016-07-03-00:00:00::3, 2016-07-04-00:00:00::69, 2016-07-05-00:00:00::26, 2016-07-06-00:00:00::5, 2016-07-07-00:00:00::5, 2016-07-08-00:00:00::26, 2016-07-09-00:00:00::58, 2016-07-10-00:00:00::3, 2016-07-11-00:00:00::13, 2016-07-12-00:00:00::1, 2016-07-13-00:00:00::68, 2016-07-14-00:00:00::73, 2016-07-15-00:00:00::69, 2016-07-16-00:00:00::65, 2016-07-17-00:00:00::4, 2016-07-18-00:00:00::1, 2016-07-19-00:00:00::14, 2016-07-20-00:00:00::60, 2016-07-21-00:00:00::328, 2016-07-22-00:00:00::41, 2016-07-23-00:00:00::105, 2016-07-24-00:00:00::16, 2016-07-25-00:00:00::1, 2016-07-26-00:00:00::37, 2016-07-27-00:00:00::72, 2016-07-28-00:00:00::59, 2016-07-29-00:00:00::53, 2016-07-30-00:00:00::241, 2016-07-31-00:00:00::1, 2016-08-01-00:00:00::10, 2016-08-02-00:00:00::2, 2016-08-03-00:00:00::45, 2016-08-04-00:00:00::44, 2016-08-05-00:00:00::76, 2016-08-06-00:00:00::96, 2016-08-07-00:00:00::8, 2016-08-08-00:00:00::129, 2016-08-09-00:00:00::14, 2016-08-10-00:00:00::50, 2016-08-11-00:00:00::47, 2016-08-12-00:00:00::56, 2016-08-13-00:00:00::95, 2016-08-14-00:00:00::6, 2016-08-15-00:00:00::20, 2016-08-16-00:00:00::11, 2016-08-17-00:00:00::5, 2016-08-18-00:00:00::94, 2016-08-19-00:00:00::51, 2016-08-20-00:00:00::110, 2016-08-21-00:00:00::2, 2016-08-22-00:00:00::30, 2016-08-23-00:00:00::3, 2016-08-24-00:00:00::50, 2016-08-25-00:00:00::55, 2016-08-26-00:00:00::51, 2016-08-27-00:00:00::103, 2016-08-28-00:00:00::5, 2016-08-29-00:00:00::6, 2016-08-31-00:00:00::58, 2016-09-01-00:00:00::72, 2016-09-02-00:00:00::94, 2016-09-03-00:00:00::107, 2016-09-04-00:00:00::9, 2016-09-05-00:00:00::1, 2016-09-06-00:00:00::10, 2016-09-07-00:00:00::58, 2016-09-08-00:00:00::82, 2016-09-09-00:00:00::44, 2016-09-10-00:00:00::162, 2016-09-11-00:00:00::5, 2016-09-12-00:00:00::5, 2016-09-13-00:00:00::1, 2016-09-14-00:00:00::64, 2016-09-15-00:00:00::62, 2016-09-16-00:00:00::55, 2016-09-17-00:00:00::106, 2016-09-18-00:00:00::17, 2016-09-19-00:00:00::6, 2016-09-20-00:00:00::4, 2016-09-21-00:00:00::68, 2016-09-22-00:00:00::69, 2016-09-23-00:00:00::62, 2016-09-24-00:00:00::101, 2016-09-25-00:00:00::11, 2016-09-26-00:00:00::13, 2016-09-27-00:00:00::2, 2016-09-28-00:00:00::94, 2016-09-29-00:00:00::42, 2016-09-30-00:00:00::47, 2016-10-01-00:00:00::109, 2016-10-02-00:00:00::13, 2016-10-03-00:00:00::2, 2016-10-04-00:00:00::43, 2016-10-05-00:00:00::38, 2016-10-06-00:00:00::41, 2016-10-07-00:00:00::31, 2016-10-08-00:00:00::64, 2016-10-09-00:00:00::7, 2016-10-10-00:00:00::13, 2016-10-11-00:00:00::2, 2016-10-12-00:00:00::47, 2016-10-13-00:00:00::49, 2016-10-14-00:00:00::114, 2016-10-15-00:00:00::96, 2016-10-16-00:00:00::9, 2016-10-17-00:00:00::3, 2016-10-18-00:00:00::3, 2016-10-19-00:00:00::60, 2016-10-20-00:00:00::9, 2016-10-21-00:00:00::160, 2016-10-22-00:00:00::53, 2016-10-23-00:00:00::2, 2016-10-24-00:00:00::4, 2016-10-25-00:00:00::4, 2016-10-26-00:00:00::69, 2016-10-27-00:00:00::67, 2016-10-28-00:00:00::4, 2016-10-29-00:00:00::182, 2016-10-31-00:00:00::8, 2016-11-01-00:00:00::72, 2016-11-02-00:00:00::68, 2016-11-03-00:00:00::53, 2016-11-04-00:00:00::54, 2016-11-05-00:00:00::94, 2016-11-06-00:00:00::2, 2016-11-07-00:00:00::24, 2016-11-08-00:00:00::14, 2016-11-09-00:00:00::83, 2016-11-10-00:00:00::71, 2016-11-11-00:00:00::74, 2016-11-12-00:00:00::129, 2016-11-13-00:00:00::3, 2016-11-14-00:00:00::4, 2016-11-16-00:00:00::55, 2016-11-17-00:00:00::54, 2016-11-18-00:00:00::77, 2016-11-19-00:00:00::119, 2016-11-20-00:00:00::4, 2016-11-21-00:00:00::14, 2016-11-22-00:00:00::3, 2016-11-23-00:00:00::52, 2016-11-24-00:00:00::33, 2016-11-25-00:00:00::45, 2016-11-26-00:00:00::66, 2016-11-27-00:00:00::4, 2016-11-28-00:00:00::3, 2016-11-29-00:00:00::1, 2016-11-30-00:00:00::60, 2016-12-01-00:00:00::78, 2016-12-02-00:00:00::84, 2016-12-03-00:00:00::66, 2016-12-04-00:00:00::7, 2016-12-05-00:00:00::38, 2016-12-06-00:00:00::53, 2016-12-07-00:00:00::147, 2016-12-08-00:00:00::472, 2016-12-09-00:00:00::112, 2016-12-10-00:00:00::80, 2016-12-11-00:00:00::8, 2016-12-12-00:00:00::9, 2016-12-14-00:00:00::42, 2016-12-16-00:00:00::76, 2016-12-17-00:00:00::105, 2016-12-18-00:00:00::5, 2016-12-19-00:00:00::8, 2016-12-21-00:00:00::63, 2016-12-22-00:00:00::31, 2016-12-23-00:00:00::56, 2016-12-24-00:00:00::51, 2016-12-25-00:00:00::9, 2016-12-26-00:00:00::1, 2016-12-28-00:00:00::3, 2016-12-30-00:00:00::7, 2016-12-31-00:00:00::4, 2017-01-02-00:00:00::1, 2017-01-04-00:00:00::10, 2017-01-05-00:00:00::57, 2017-01-06-00:00:00::133, 2017-01-07-00:00:00::99, 2017-01-08-00:00:00::5, 2017-01-09-00:00:00::5, 2017-01-10-00:00:00::14, 2017-01-11-00:00:00::61, 2017-01-12-00:00:00::39, 2017-01-13-00:00:00::55, 2017-01-14-00:00:00::107, 2017-01-15-00:00:00::5, 2017-01-16-00:00:00::3, 2017-01-18-00:00:00::39, 2017-01-19-00:00:00::48, 2017-01-20-00:00:00::38, 2017-01-21-00:00:00::73, 2017-01-22-00:00:00::6, 2017-01-23-00:00:00::10, 2017-01-25-00:00:00::1, 2017-01-26-00:00:00::104, 2017-01-27-00:00:00::1, 2017-01-28-00:00:00::117, 2017-01-29-00:00:00::3, 2017-01-30-00:00:00::4, 2017-01-31-00:00:00::78, 2017-02-01-00:00:00::60, 2017-02-02-00:00:00::49, 2017-02-03-00:00:00::46, 2017-02-04-00:00:00::113, 2017-02-05-00:00:00::6, 2017-02-06-00:00:00::56, 2017-02-07-00:00:00::302, 2017-02-08-00:00:00::75, 2017-02-09-00:00:00::54, 2017-02-10-00:00:00::226, 2017-02-11-00:00:00::1039, 2017-02-13-00:00:00::29515, 2017-02-14-00:00:00::602, 2017-02-15-00:00:00::1099, 2017-02-16-00:00:00::97, 2017-02-17-00:00:00::1608, 2017-02-18-00:00:00::1258, 2017-02-19-00:00:00::109, 2017-02-20-00:00:00::116, 2017-02-21-00:00:00::336, 2017-02-22-00:00:00::664, 2017-02-23-00:00:00::154529, 2017-02-24-00:00:00::40521, 2017-02-25-00:00:00::2100, 2017-02-26-00:00:00::131, 2017-02-27-00:00:00::415645, 2017-02-28-00:00:00::276943, 2017-03-01-00:00:00::3306, 2017-03-02-00:00:00::3552, 2017-03-03-00:00:00::4227, 2017-03-04-00:00:00::6963, 2017-03-05-00:00:00::440, 2017-03-06-00:00:00::3365, 2017-03-07-00:00:00::389, 2017-03-08-00:00:00::3870, 2017-03-09-00:00:00::3867, 2017-03-10-00:00:00::4218, 2017-03-11-00:00:00::7977, 2017-03-12-00:00:00::391, 2017-03-13-00:00:00::27355, 2017-03-14-00:00:00::34284, 2017-03-15-00:00:00::14698, 2017-03-16-00:00:00::2926, 2017-03-17-00:00:00::5338, 2017-03-18-00:00:00::7752, 2017-03-19-00:00:00::510, 2017-03-20-00:00:00::7459, 2017-03-21-00:00:00::440, 2017-03-22-00:00:00::6040, 2017-03-23-00:00:00::6313, 2017-03-24-00:00:00::5792, 2017-03-25-00:00:00::9397, 2017-03-26-00:00:00::613, 2017-03-27-00:00:00::1365, 2017-03-28-00:00:00::7063, 2017-03-29-00:00:00::2110, 2017-03-30-00:00:00::8154, 2017-03-31-00:00:00::9777, 2017-04-01-00:00:00::16354, 2017-04-02-00:00:00::1288, 2017-04-03-00:00:00::1426, 2017-04-04-00:00:00::2666, 2017-04-05-00:00:00::13562, 2017-04-06-00:00:00::54698, 2017-04-07-00:00:00::22457";
}
| 4,645 |
0 | Create_ds/gobblin/gobblin-salesforce/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/test/java/org/apache/gobblin/salesforce/SalesforceExtractorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import java.util.Collections;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.exception.HighWatermarkException;
import org.apache.gobblin.source.extractor.exception.RestApiClientException;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiCommand;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiCommandOutput;
import org.apache.gobblin.source.extractor.partition.Partition;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import org.apache.gobblin.source.extractor.watermark.TimestampWatermark;
import org.apache.gobblin.source.extractor.watermark.WatermarkType;
import org.apache.gobblin.source.workunit.WorkUnit;
public class SalesforceExtractorTest {
private static final String DEFAULT_SCHEMA = "test-schema";
private static final String DEFAULT_ENTITY = "test-entity";
private static final String DEFAULT_WATERMARK_COLUMN = "test-watermark-column";
private static final String GTE_OPERATOR = ">=";
private static final String LTE_OPERATOR = "<=";
private static final long LWM_VALUE_1 = 20131212121212L;
private static final long HWM_VALUE_1 = 20231212121212L;
private static final String DEFAULT_WATERMARK_VALUE_FORMAT = "yyyyMMddHHmmss";
private SalesforceExtractor _classUnderTest;
@BeforeTest
public void beforeTest() {
WorkUnit workUnit = WorkUnit.createEmpty();
workUnit.setProp(Partition.IS_LAST_PARTIITON, false);
workUnit.setProp(ConfigurationKeys.SOURCE_QUERYBASED_EXTRACT_TYPE, "SNAPSHOT");
WorkUnitState workUnitState = new WorkUnitState(workUnit, new State());
workUnitState.setId("test");
_classUnderTest = new SalesforceExtractor(workUnitState);
}
@DataProvider
private Object[][] provideGetHighWatermarkMetadataTestData() {
TimestampWatermark watermark =
new TimestampWatermark(DEFAULT_WATERMARK_COLUMN, DEFAULT_WATERMARK_VALUE_FORMAT);
String lwmCondition = watermark.getWatermarkCondition(_classUnderTest, LWM_VALUE_1, GTE_OPERATOR);
String hwmCondition = watermark.getWatermarkCondition(_classUnderTest, HWM_VALUE_1, LTE_OPERATOR);
Predicate lwmPredicate =
new Predicate(DEFAULT_WATERMARK_COLUMN, LWM_VALUE_1, lwmCondition,
_classUnderTest.getWatermarkSourceFormat(WatermarkType.TIMESTAMP), Predicate.PredicateType.LWM);
Predicate hwmPredicate =
new Predicate(DEFAULT_WATERMARK_COLUMN, HWM_VALUE_1, hwmCondition,
_classUnderTest.getWatermarkSourceFormat(WatermarkType.TIMESTAMP), Predicate.PredicateType.HWM);
return new Object[][] {
{
// With low and high watermark predicates
ImmutableList.of(lwmPredicate, hwmPredicate),
String.format("SELECT MAX(%s) FROM %s where (%s) and (%s)",
DEFAULT_WATERMARK_COLUMN, DEFAULT_ENTITY, lwmPredicate.getCondition(), hwmPredicate.getCondition())
},
{
// With no predicates
ImmutableList.of(),
String.format("SELECT MAX(%s) FROM %s",
DEFAULT_WATERMARK_COLUMN, DEFAULT_ENTITY)
}
};
}
@Test(dataProvider = "provideGetHighWatermarkMetadataTestData")
public void testGetHighWatermarkMetadata(List<Predicate> predicateList,
String restQueryExpected) throws HighWatermarkException, RestApiClientException {
List<Command> commandsActual =
_classUnderTest.getHighWatermarkMetadata(DEFAULT_SCHEMA, DEFAULT_ENTITY, DEFAULT_WATERMARK_COLUMN,
predicateList);
String fullUri = new SalesforceConnector(new State()).getFullUri(SalesforceExtractor.getSoqlUrl(restQueryExpected));
List<Command> commandsExpected = Collections.singletonList(
new RestApiCommand().build(Collections.singletonList(fullUri), RestApiCommand.RestApiCommandType.GET));
Assert.assertEquals(commandsActual.size(), 1);
Assert.assertEquals(commandsActual.get(0).getCommandType(), commandsExpected.get(0).getCommandType());
Assert.assertEquals(commandsActual.get(0).getParams(), commandsExpected.get(0).getParams());
}
@DataProvider
private Object[][] provideGetHighWatermarkTestData() {
return new Object[][] {
{
"{}",
ConfigurationKeys.DEFAULT_WATERMARK_VALUE
},
{
"{\"records\": [{}]}",
ConfigurationKeys.DEFAULT_WATERMARK_VALUE
},
{
"{"
+ " \"totalSize\": 1,"
+ " \"done\": true,"
+ " \"records\": ["
+ " {"
+ " \"attributes\": {"
+ " \"type\": \"AggregateResult\""
+ " },"
+ " \"expr0\": null"
+ " }"
+ " ]"
+ "}",
ConfigurationKeys.DEFAULT_WATERMARK_VALUE
},
{
"{"
+ " \"totalSize\": 1,"
+ " \"done\": true,"
+ " \"records\": ["
+ " {"
+ " \"attributes\": {"
+ " \"type\": \"AggregateResult\""
+ " },"
+ " \"expr0\": \"2023-09-15T05:21:41.000Z\""
+ " }"
+ " ]"
+ "}",
20230915052141L
},
};
}
@Test(dataProvider = "provideGetHighWatermarkTestData")
public void testGetHighWatermark(String commandOutputAsStr, long expectedHwm) throws HighWatermarkException {
CommandOutput<RestApiCommand, String> response = new RestApiCommandOutput();
RestApiCommand command = new RestApiCommand();
response.put(command, commandOutputAsStr);
long actualHighWtm =
_classUnderTest.getHighWatermark(response, DEFAULT_WATERMARK_COLUMN, SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT);
Assert.assertEquals(actualHighWtm, expectedHwm);
}
}
| 4,646 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
/**
* SalesforceConfigurationKeys
*/
public final class SalesforceConfigurationKeys {
private SalesforceConfigurationKeys() {
}
public static final String SOURCE_QUERYBASED_SALESFORCE_IS_SOFT_DELETES_PULL_DISABLED =
"source.querybased.salesforce.is.soft.deletes.pull.disabled";
// bulk api retry sleep duration for avoid resource consuming peak.
public static final String RETRY_EXCEED_QUOTA_INTERVAL = "salesforce.retry.exceedQuotaInterval";
public static final long RETRY_EXCEED_QUOTA_INTERVAL_DEFAULT = 5 * 60 * 1000;
public static final String RETRY_INTERVAL = "salesforce.retry.interval";
public static final long RETRY_INTERVAL_DEFAULT = 1 * 60 * 1000;
// pk-chunking
public static final String BULK_TEST_JOB_ID = "salesforce.bulk.testJobId";
public static final String BULK_TEST_BATCH_ID_LIST = "salesforce.bulk.testBatchIds";
public static final String SALESFORCE_PARTITION_TYPE = "salesforce.partitionType";
public static final String PK_CHUNKING_JOB_ID = "__salesforce.job.id"; // don't use in ini config
public static final String PK_CHUNKING_BATCH_RESULT_ID_PAIRS = "__salesforce.batch.result.id.pairs"; // don't use in ini config
}
| 4,647 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceHistogramService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import com.google.common.math.DoubleMath;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import java.math.RoundingMode;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import lombok.RequiredArgsConstructor;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.text.StrSubstitutor;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.exception.RestApiClientException;
import org.apache.gobblin.source.extractor.exception.RestApiConnectionException;
import org.apache.gobblin.source.extractor.exception.RestApiProcessingException;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiConnector;
import org.apache.gobblin.source.extractor.partition.Partition;
import org.apache.gobblin.source.extractor.partition.Partitioner;
import org.apache.gobblin.source.extractor.utils.Utils;
import static org.apache.gobblin.configuration.ConfigurationKeys.*;
/**
* This class encapsulates everything related to histogram calculation for Salesforce. A histogram here refers to a
* mapping of number of records to be fetched by time intervals.
*/
@Slf4j
public class SalesforceHistogramService {
private static final int MIN_SPLIT_TIME_MILLIS = 1000;
private static final String ZERO_TIME_SUFFIX = "-00:00:00";
private static final Gson GSON = new Gson();
// this is used to generate histogram buckets smaller than the target partition size to allow for more even
// packing of the generated partitions
private static final String PROBE_TARGET_RATIO = "salesforce.probeTargetRatio";
private static final double DEFAULT_PROBE_TARGET_RATIO = 0.60;
private static final String DYNAMIC_PROBING_LIMIT = "salesforce.dynamicProbingLimit";
private static final int DEFAULT_DYNAMIC_PROBING_LIMIT = 1000;
private static final String DAY_PARTITION_QUERY_TEMPLATE =
"SELECT count(${column}) cnt, DAY_ONLY(${column}) time FROM ${table} " + "WHERE ${column} ${greater} ${start}"
+ " AND ${column} ${less} ${end} GROUP BY DAY_ONLY(${column}) ORDER BY DAY_ONLY(${column})";
private static final String PROBE_PARTITION_QUERY_TEMPLATE = "SELECT count(${column}) cnt FROM ${table} "
+ "WHERE ${column} ${greater} ${start} AND ${column} ${less} ${end}";
protected SalesforceConnector salesforceConnector;
private final SfConfig sfConfig;
SalesforceHistogramService(SfConfig sfConfig, SalesforceConnector connector) {
this.sfConfig = sfConfig;
salesforceConnector = connector;
}
/**
* Generate the histogram
*/
Histogram getHistogram(String entity, String watermarkColumn, SourceState state,
Partition partition) {
try {
if (!salesforceConnector.connect()) {
throw new RuntimeException("Failed to connect.");
}
} catch (RestApiConnectionException e) {
throw new RuntimeException("Failed to connect.", e);
}
Histogram histogram = getHistogramByDayBucketing(salesforceConnector, entity, watermarkColumn, partition);
// exchange the first histogram group key with the global low watermark to ensure that the low watermark is captured
// in the range of generated partitions
HistogramGroup firstGroup = histogram.get(0);
Date lwmDate = Utils.toDate(partition.getLowWatermark(), Partitioner.WATERMARKTIMEFORMAT);
histogram.getGroups().set(0, new HistogramGroup(Utils.epochToDate(lwmDate.getTime(), SalesforceSource.SECONDS_FORMAT),
firstGroup.getCount()));
// refine the histogram
if (state.getPropAsBoolean(SalesforceSource.ENABLE_DYNAMIC_PROBING)) {
histogram = getRefinedHistogram(salesforceConnector, entity, watermarkColumn, state, partition, histogram);
}
return histogram;
}
/**
* Get a histogram with day granularity buckets.
*/
private Histogram getHistogramByDayBucketing(SalesforceConnector connector, String entity, String watermarkColumn,
Partition partition) {
Histogram histogram = new Histogram();
Calendar calendar = new GregorianCalendar();
Date startDate = Utils.toDate(partition.getLowWatermark(), Partitioner.WATERMARKTIMEFORMAT);
calendar.setTime(startDate);
int startYear = calendar.get(Calendar.YEAR);
String lowWatermarkDate = Utils.dateToString(startDate, SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT);
Date endDate = Utils.toDate(partition.getHighWatermark(), Partitioner.WATERMARKTIMEFORMAT);
calendar.setTime(endDate);
int endYear = calendar.get(Calendar.YEAR);
String highWatermarkDate = Utils.dateToString(endDate, SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT);
Map<String, String> values = new HashMap<>();
values.put("table", entity);
values.put("column", watermarkColumn);
StrSubstitutor sub = new StrSubstitutor(values);
for (int year = startYear; year <= endYear; year++) {
if (year == startYear) {
values.put("start", lowWatermarkDate);
values.put("greater", partition.isLowWatermarkInclusive() ? ">=" : ">");
} else {
values.put("start", getDateString(year));
values.put("greater", ">=");
}
if (year == endYear) {
values.put("end", highWatermarkDate);
values.put("less", partition.isHighWatermarkInclusive() ? "<=" : "<");
} else {
values.put("end", getDateString(year + 1));
values.put("less", "<");
}
String query = sub.replace(DAY_PARTITION_QUERY_TEMPLATE);
log.info("Histogram query: " + query);
histogram.add(parseDayBucketingHistogram(getRecordsForQuery(connector, query)));
}
return histogram;
}
/**
* Refine the histogram by probing to split large buckets
* @return the refined histogram
*/
private Histogram getRefinedHistogram(SalesforceConnector connector, String entity, String watermarkColumn,
SourceState state, Partition partition, Histogram histogram) {
final int maxPartitions = state.getPropAsInt(SOURCE_MAX_NUMBER_OF_PARTITIONS, DEFAULT_MAX_NUMBER_OF_PARTITIONS);
final int probeLimit = state.getPropAsInt(
DYNAMIC_PROBING_LIMIT, DEFAULT_DYNAMIC_PROBING_LIMIT);
final int minTargetPartitionSize = state.getPropAsInt(
SalesforceSource.MIN_TARGET_PARTITION_SIZE, SalesforceSource.DEFAULT_MIN_TARGET_PARTITION_SIZE);
final Histogram outputHistogram = new Histogram();
final double probeTargetRatio = state.getPropAsDouble(
PROBE_TARGET_RATIO, DEFAULT_PROBE_TARGET_RATIO);
final int bucketSizeLimit =
(int) (probeTargetRatio * computeTargetPartitionSize(histogram, minTargetPartitionSize, maxPartitions));
log.info("Refining histogram with bucket size limit {}.", bucketSizeLimit);
HistogramGroup currentGroup;
HistogramGroup nextGroup;
final TableCountProbingContext probingContext =
new TableCountProbingContext(connector, entity, watermarkColumn, bucketSizeLimit, probeLimit);
if (histogram.getGroups().isEmpty()) {
return outputHistogram;
}
// make a copy of the histogram list and add a dummy entry at the end to avoid special processing of the last group
List<HistogramGroup> list = new ArrayList<>(histogram.getGroups());
Date hwmDate = Utils.toDate(partition.getHighWatermark(), Partitioner.WATERMARKTIMEFORMAT);
list.add(new HistogramGroup(Utils.epochToDate(hwmDate.getTime(), SalesforceSource.SECONDS_FORMAT), 0));
for (int i = 0; i < list.size() - 1; i++) {
currentGroup = list.get(i);
nextGroup = list.get(i + 1);
// split the group if it is larger than the bucket size limit
if (currentGroup.getCount() > bucketSizeLimit) {
long startEpoch = Utils.toDate(currentGroup.getKey(), SalesforceSource.SECONDS_FORMAT).getTime();
long endEpoch = Utils.toDate(nextGroup.getKey(), SalesforceSource.SECONDS_FORMAT).getTime();
outputHistogram.add(getHistogramByProbing(probingContext, currentGroup.getCount(), startEpoch, endEpoch));
} else {
outputHistogram.add(currentGroup);
}
}
log.info("Executed {} probes for refining the histogram.", probingContext.probeCount);
// if the probe limit has been reached then print a warning
if (probingContext.probeCount >= probingContext.probeLimit) {
log.warn("Reached the probe limit");
}
return outputHistogram;
}
/**
* Get a histogram for the time range by probing to break down large buckets. Use count instead of
* querying if it is non-negative.
*/
private Histogram getHistogramByProbing(TableCountProbingContext probingContext, int count, long startEpoch,
long endEpoch) {
Histogram histogram = new Histogram();
Map<String, String> values = new HashMap<>();
values.put("table", probingContext.entity);
values.put("column", probingContext.watermarkColumn);
values.put("greater", ">=");
values.put("less", "<");
StrSubstitutor sub = new StrSubstitutor(values);
getHistogramRecursively(probingContext, histogram, sub, values, count, startEpoch, endEpoch);
return histogram;
}
private String getDateString(int year) {
Calendar calendar = new GregorianCalendar();
calendar.clear();
calendar.set(Calendar.YEAR, year);
return Utils.dateToString(calendar.getTime(), SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT);
}
/**
* Parse the query results into a {@link Histogram}
*/
private Histogram parseDayBucketingHistogram(JsonArray records) {
log.info("Parse day-based histogram");
Histogram histogram = new Histogram();
Iterator<JsonElement> elements = records.iterator();
JsonObject element;
while (elements.hasNext()) {
element = elements.next().getAsJsonObject();
String time = element.get("time").getAsString() + ZERO_TIME_SUFFIX;
int count = element.get("cnt").getAsInt();
histogram.add(new HistogramGroup(time, count));
}
return histogram;
}
/**
* Split a histogram bucket along the midpoint if it is larger than the bucket size limit.
*/
private void getHistogramRecursively(TableCountProbingContext probingContext, Histogram histogram, StrSubstitutor sub,
Map<String, String> values, int count, long startEpoch, long endEpoch) {
long midpointEpoch = startEpoch + (endEpoch - startEpoch) / 2;
// don't split further if small, above the probe limit, or less than 1 second difference between the midpoint and start
if (count <= probingContext.bucketSizeLimit
|| probingContext.probeCount > probingContext.probeLimit
|| (midpointEpoch - startEpoch < MIN_SPLIT_TIME_MILLIS)) {
histogram.add(new HistogramGroup(Utils.epochToDate(startEpoch, SalesforceSource.SECONDS_FORMAT), count));
return;
}
int countLeft = getCountForRange(probingContext, sub, values, startEpoch, midpointEpoch);
getHistogramRecursively(probingContext, histogram, sub, values, countLeft, startEpoch, midpointEpoch);
log.info("Count {} for left partition {} to {}", countLeft, startEpoch, midpointEpoch);
int countRight = count - countLeft;
getHistogramRecursively(probingContext, histogram, sub, values, countRight, midpointEpoch, endEpoch);
log.info("Count {} for right partition {} to {}", countRight, midpointEpoch, endEpoch);
}
/**
* Get a {@link JsonArray} containing the query results
*/
@SneakyThrows
private JsonArray getRecordsForQuery(SalesforceConnector connector, String query) {
RestApiProcessingException exception = null;
for (int i = 0; i < sfConfig.restApiRetryLimit + 1; i++) {
try {
String soqlQuery = SalesforceExtractor.getSoqlUrl(query);
List<Command> commands = RestApiConnector.constructGetCommand(connector.getFullUri(soqlQuery));
CommandOutput<?, ?> response = connector.getResponse(commands);
String output;
Iterator<String> itr = (Iterator<String>) response.getResults().values().iterator();
if (itr.hasNext()) {
output = itr.next();
} else {
throw new DataRecordException("Failed to get data from salesforce; REST response has no output");
}
return GSON.fromJson(output, JsonObject.class).getAsJsonArray("records");
} catch (RestApiClientException | DataRecordException e) {
throw new RuntimeException("Fail to get data from salesforce", e);
} catch (RestApiProcessingException e) {
exception = e;
log.info("Caught RestApiProcessingException, retrying({}) rest query: {}", i+1, query);
Thread.sleep(sfConfig.restApiRetryInterval);
}
}
throw new RuntimeException("Fail to get data from salesforce", exception);
}
/**
* Get the row count for a time range
*/
private int getCountForRange(TableCountProbingContext probingContext, StrSubstitutor sub,
Map<String, String> subValues, long startTime, long endTime) {
String startTimeStr = Utils.dateToString(new Date(startTime), SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT);
String endTimeStr = Utils.dateToString(new Date(endTime), SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT);
subValues.put("start", startTimeStr);
subValues.put("end", endTimeStr);
String query = sub.replace(PROBE_PARTITION_QUERY_TEMPLATE);
log.info("Count query: {}", query);
probingContext.probeCount++;
JsonArray records = getRecordsForQuery(probingContext.connector, query);
Iterator<JsonElement> elements = records.iterator();
JsonObject element = elements.next().getAsJsonObject();
return element.get("cnt").getAsInt();
}
/**
* Compute the target partition size.
*/
private int computeTargetPartitionSize(Histogram histogram, int minTargetPartitionSize, int maxPartitions) {
return Math.max(minTargetPartitionSize,
DoubleMath.roundToInt((double) histogram.getTotalRecordCount() / maxPartitions, RoundingMode.CEILING));
}
/**
* Context for probing the table for row counts of a time range
*/
@RequiredArgsConstructor
public static class TableCountProbingContext {
private final SalesforceConnector connector;
private final String entity;
private final String watermarkColumn;
private final int bucketSizeLimit;
private final int probeLimit;
private int probeCount = 0;
}
}
| 4,648 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/BulkResultIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import com.google.gson.JsonElement;
import com.sforce.async.AsyncApiException;
import com.sforce.async.AsyncExceptionCode;
import com.sforce.async.BulkConnection;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.Iterator;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.source.extractor.utils.InputStreamCSVReader;
import org.apache.gobblin.source.extractor.utils.Utils;
/**
* Iterator for fetching result file of Bulk API.
*/
@Slf4j
public class BulkResultIterator implements Iterator<JsonElement> {
private final FileIdVO fileIdVO;
private final int retryLimit;
private final BulkConnection conn;
private InputStreamCSVReader csvReader;
private List<String> header;
private int columnSize;
private int lineCount = 0; // this is different than currentFileRowCount. cvs file has header
private final long retryInterval;
private final long retryExceedQuotaInterval;
private List<String> preLoadedLine = null;
public BulkResultIterator(BulkConnection conn, FileIdVO fileIdVO, int retryLimit, long retryInterval, long retryExceedQuotaInterval) {
log.info("create BulkResultIterator: {} with retry limit as {} and retryInterval as {}", fileIdVO, retryLimit, retryInterval);
this.retryInterval = retryInterval;
this.retryExceedQuotaInterval = retryExceedQuotaInterval;
this.conn = conn;
this.fileIdVO = fileIdVO;
this.retryLimit = retryLimit;
}
/**
* read first data record from cvsReader and initiate header
* not supposed to do it in constructor function, for delay creating file stream
*/
private void initHeader() {
this.header = this.nextLineWithRetry(); // first line is header
this.columnSize = this.header.size();
this.preLoadedLine = this.nextLineWithRetry(); // initialize: buffer one record data
}
private List<String> nextLineWithRetry() {
Throwable rootCause = null;
int executeCount = 0;
while (executeCount < retryLimit + 1) {
executeCount++;
try {
if (this.csvReader == null) {
this.csvReader = openAndSeekCsvReader(rootCause);
}
List<String> line = this.csvReader.nextRecord();
this.lineCount++;
return line;
} catch (InputStreamCSVReader.CSVParseException e) {
throw new RuntimeException(e); // don't retry if it is parse error
} catch (OpenAndSeekException e) {
rootCause = e.getCause();
// Each organization is allowed 10 concurrent long-running requests. If the limit is reached,
// any new synchronous Apex request results in a runtime exception.
if (e.isCurrentExceptionExceedQuota()) {
log.warn("--Caught ExceededQuota: ", e);
threadSleep(retryExceedQuotaInterval);
executeCount--; // if the current exception is Quota Exceeded, keep trying forever
}
log.info("***Retrying***1: {} - Attempt {}/{}", fileIdVO, executeCount + 1, retryLimit, e);
this.csvReader = null; // in next loop, call openAndSeekCsvReader
} catch (Exception e) {
// Retry may resolve other exceptions.
rootCause = e;
threadSleep(retryInterval);
log.info("***Retrying***2: {} - Attempt {}/{}", fileIdVO, executeCount + 1, retryLimit, e);
this.csvReader = null; // in next loop, call openAndSeekCsvReader
}
}
if (executeCount == 1) {
throw new RuntimeException("***Fetch***: Failed", rootCause);
} else {
throw new RuntimeException("***Retried***: Failed, tried " + retryLimit + " times - ", rootCause);
}
}
private void threadSleep(long millis) {
try {
Thread.sleep(millis);
} catch (Exception e) {
log.error("--Failed to sleep--", e);
throw new RuntimeException(e);
}
}
@Override
public boolean hasNext() {
if (this.header == null) {
initHeader();
}
return this.preLoadedLine != null;
}
@Override
public JsonElement next() {
if (this.header == null) {
initHeader();
}
JsonElement jsonObject = Utils.csvToJsonObject(this.header, this.preLoadedLine, this.columnSize);
this.preLoadedLine = this.nextLineWithRetry();
if (this.preLoadedLine == null) {
log.info("----Record count: [{}] for {}", getRowCount(), fileIdVO);
}
return jsonObject;
}
private InputStreamCSVReader openAndSeekCsvReader(Throwable rootCause) throws OpenAndSeekException {
while (rootCause != null && rootCause.getCause() != null) {
rootCause = rootCause.getCause(); // find the root cause
}
String jobId = fileIdVO.getJobId();
String batchId = fileIdVO.getBatchId();
String resultId = fileIdVO.getResultId();
log.info("Fetching [jobId={}, batchId={}, resultId={}]", jobId, batchId, resultId);
closeCsvReader();
try {
InputStream is = conn.getQueryResultStream(jobId, batchId, resultId);
BufferedReader br = new BufferedReader(new InputStreamReader(is, ConfigurationKeys.DEFAULT_CHARSET_ENCODING));
csvReader = new InputStreamCSVReader(br);
List<String> lastSkippedLine = null;
for (int j = 0; j < lineCount; j++) {
lastSkippedLine = csvReader.nextRecord(); // skip these records
}
if ((lastSkippedLine == null && preLoadedLine != null) || (lastSkippedLine != null && !lastSkippedLine.equals(
preLoadedLine))) {
// check if last skipped line is same as the line before error
String msg = rootCause == null ? "null" : rootCause.getMessage();
throw new OpenAndSeekException("Failed to verify last skipped line - root cause [" + msg + "]", rootCause);
}
return csvReader;
} catch (Exception currentException) { // failed to open reader and skip lineCount lines // ssl failures go here
Throwable cause = rootCause == null ? currentException : rootCause;
throw new OpenAndSeekException("Failed to [" + cause.getMessage() + "]" , cause, currentException);
}
}
private int getRowCount() {
// first line is header, last line is `null`,
// because cvsReader doesn't have hasNext to check end of the stream, we will get null as last line
return lineCount - 2;
}
private void closeCsvReader() {
if (this.csvReader != null) {
try {
this.csvReader.close();
} catch (IOException e) {
// ignore the exception
}
}
}
}
class OpenAndSeekException extends Exception {
private boolean _isCurrentExceptionExceedQuota;
public OpenAndSeekException(String msg, Throwable rootCause) {
super(msg, rootCause);
if (rootCause instanceof AsyncApiException &&
((AsyncApiException) rootCause).getExceptionCode() == AsyncExceptionCode.ExceededQuota) {
_isCurrentExceptionExceedQuota = true;
}
}
public OpenAndSeekException(String msg, Throwable rootCause, Exception currentException) {
super(msg, rootCause);
if (currentException instanceof AsyncApiException &&
((AsyncApiException) currentException).getExceptionCode() == AsyncExceptionCode.ExceededQuota) {
_isCurrentExceptionExceedQuota = true;
}
}
public boolean isCurrentExceptionExceedQuota() {
return _isCurrentExceptionExceedQuota;
}
}
| 4,649 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import java.io.ByteArrayInputStream;
import java.io.FileNotFoundException;
import java.net.URI;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.lang.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.NameValuePair;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.message.BasicNameValuePair;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.sforce.async.AsyncApiException;
import com.sforce.async.BatchInfo;
import com.sforce.async.BatchInfoList;
import com.sforce.async.BatchStateEnum;
import com.sforce.async.BulkConnection;
import com.sforce.async.ConcurrencyMode;
import com.sforce.async.ContentType;
import com.sforce.async.JobInfo;
import com.sforce.async.JobStateEnum;
import com.sforce.async.OperationEnum;
import com.sforce.async.QueryResultList;
import com.sforce.soap.partner.PartnerConnection;
import com.sforce.ws.ConnectorConfig;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.exception.HighWatermarkException;
import org.apache.gobblin.source.extractor.exception.RecordCountException;
import org.apache.gobblin.source.extractor.exception.RestApiClientException;
import org.apache.gobblin.source.extractor.exception.RestApiConnectionException;
import org.apache.gobblin.source.extractor.exception.SchemaException;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiCommand;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiCommand.RestApiCommandType;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiConnector;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiExtractor;
import org.apache.gobblin.source.extractor.schema.Schema;
import org.apache.gobblin.source.extractor.utils.Utils;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import org.apache.gobblin.source.extractor.watermark.WatermarkType;
import org.apache.gobblin.source.jdbc.SqlQueryUtils;
import org.apache.gobblin.source.workunit.WorkUnit;
import static org.apache.gobblin.salesforce.SalesforceConfigurationKeys.PK_CHUNKING_BATCH_RESULT_ID_PAIRS;
import static org.apache.gobblin.salesforce.SalesforceConfigurationKeys.PK_CHUNKING_JOB_ID;
import static org.apache.gobblin.salesforce.SalesforceConfigurationKeys.SOURCE_QUERYBASED_SALESFORCE_IS_SOFT_DELETES_PULL_DISABLED;
/**
* An implementation of salesforce extractor for extracting data from SFDC
*/
@Slf4j
public class SalesforceExtractor extends RestApiExtractor {
private static final String SOQL_RESOURCE = "/queryAll";
public static final String SALESFORCE_TIMESTAMP_FORMAT = "yyyy-MM-dd'T'HH:mm:ss'.000Z'";
private static final String SALESFORCE_DATE_FORMAT = "yyyy-MM-dd";
private static final String SALESFORCE_HOUR_FORMAT = "HH";
private static final String SALESFORCE_SOAP_SERVICE = "/services/Soap/u";
/*
This is the key in the response of the aggregate MAX query, and the corresponding
value is the column value on which MAX is applied.
*/
private static final String MAX_QUERY_RESPONSE_KEY = "expr0";
private static final Gson GSON = new Gson();
private static final int MAX_RETRY_INTERVAL_SECS = 600;
private boolean pullStatus = true;
private String nextUrl;
private BulkConnection bulkConnection = null;
private JobInfo bulkJob = new JobInfo();
private final int pkChunkingSize;
private final SalesforceConnector sfConnector;
private final int retryLimit;
private final long retryInterval;
private final long retryExceedQuotaInterval;
private final boolean bulkApiUseQueryAll;
private boolean isPkChunkingFetchDone = false;
public SalesforceExtractor(WorkUnitState state) {
super(state);
SfConfig conf = new SfConfig(state.getProperties());
this.sfConnector = (SalesforceConnector) this.connector;
this.pkChunkingSize = conf.pkChunkingSize;
this.retryInterval = conf.retryInterval;
this.retryExceedQuotaInterval = conf.retryExceedQuotaInterval;
this.bulkApiUseQueryAll = conf.bulkApiUseQueryAll;
this.retryLimit = conf.fetchRetryLimit;
}
@Override
protected RestApiConnector getConnector(WorkUnitState state) {
return new SalesforceConnector(state);
}
/**
* true is further pull required else false
*/
private void setPullStatus(boolean pullStatus) {
this.pullStatus = pullStatus;
}
/**
* url for the next pull from salesforce
*/
private void setNextUrl(String nextUrl) {
this.nextUrl = nextUrl;
}
@Override
public HttpEntity getAuthentication() throws RestApiConnectionException {
log.debug("Authenticating salesforce");
return this.connector.getAuthentication();
}
@Override
public List<Command> getSchemaMetadata(String schema, String entity) {
log.debug("Build url to retrieve schema");
return constructGetCommand(this.sfConnector.getFullUri("/sobjects/" + entity.trim() + "/describe"));
}
@Override
public JsonArray getSchema(CommandOutput<?, ?> response) throws SchemaException {
log.info("Get schema from salesforce");
String output;
Iterator<String> itr = (Iterator<String>) response.getResults().values().iterator();
if (itr.hasNext()) {
output = itr.next();
} else {
throw new SchemaException("Failed to get schema from salesforce; REST response has no output");
}
JsonArray fieldJsonArray = new JsonArray();
JsonElement element = GSON.fromJson(output, JsonObject.class);
JsonObject jsonObject = element.getAsJsonObject();
try {
JsonArray array = jsonObject.getAsJsonArray("fields");
for (JsonElement columnElement : array) {
JsonObject field = columnElement.getAsJsonObject();
Schema schema = new Schema();
schema.setColumnName(field.get("name").getAsString());
String dataType = field.get("type").getAsString();
String elementDataType = "string";
List<String> mapSymbols = null;
JsonObject newDataType =
this.convertDataType(field.get("name").getAsString(), dataType, elementDataType, mapSymbols);
log.debug("ColumnName:" + field.get("name").getAsString() + "; old datatype:" + dataType + "; new datatype:"
+ newDataType);
schema.setDataType(newDataType);
schema.setLength(field.get("length").getAsLong());
schema.setPrecision(field.get("precision").getAsInt());
schema.setScale(field.get("scale").getAsInt());
schema.setNullable(field.get("nillable").getAsBoolean());
schema.setFormat(null);
schema.setComment((field.get("label").isJsonNull() ? null : field.get("label").getAsString()));
schema
.setDefaultValue((field.get("defaultValue").isJsonNull() ? null : field.get("defaultValue").getAsString()));
schema.setUnique(field.get("unique").getAsBoolean());
String jsonStr = GSON.toJson(schema);
JsonObject obj = GSON.fromJson(jsonStr, JsonObject.class).getAsJsonObject();
fieldJsonArray.add(obj);
}
} catch (Exception e) {
throw new SchemaException("Failed to get schema from salesforce; error - " + e.getMessage(), e);
}
return fieldJsonArray;
}
@Override
public List<Command> getHighWatermarkMetadata(String schema, String entity, String watermarkColumn,
List<Predicate> predicateList) throws HighWatermarkException {
log.debug("Build url to retrieve high watermark");
String query = "SELECT MAX(" + watermarkColumn + ") FROM " + entity;
String existingPredicate = "";
if (this.updatedQuery != null) {
String queryLowerCase = this.updatedQuery.toLowerCase();
int startIndex = queryLowerCase.indexOf(" where ");
if (startIndex > 0) {
existingPredicate = this.updatedQuery.substring(startIndex);
}
}
query = query + existingPredicate;
String limitString = getLimitFromInputQuery(query);
query = query.replace(limitString, "");
for (Predicate predicate : predicateList) {
query = SqlQueryUtils.addPredicate(query, predicate.getCondition());
}
log.info("getHighWatermarkMetadata - QUERY: {}", query);
try {
return constructGetCommand(this.sfConnector.getFullUri(getSoqlUrl(query)));
} catch (Exception e) {
throw new HighWatermarkException("Failed to get salesforce url for high watermark", e);
}
}
@Override
public long getHighWatermark(CommandOutput<?, ?> response, String watermarkColumn, String format)
throws HighWatermarkException {
log.info("Get high watermark from salesforce");
String output;
Iterator<String> itr = (Iterator<String>) response.getResults().values().iterator();
if (itr.hasNext()) {
output = itr.next();
} else {
throw new HighWatermarkException("Failed to get high watermark from salesforce; REST response has no output");
}
JsonElement element = GSON.fromJson(output, JsonObject.class);
long highTs;
try {
JsonObject jsonObject = element.getAsJsonObject();
log.info("High watermark json object: {}", jsonObject.toString());
JsonArray jsonArray = jsonObject.getAsJsonArray("records");
if (jsonArray == null || jsonArray.size() == 0) {
return ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
}
JsonElement hwmJsonElement = jsonArray.get(0).getAsJsonObject().get(MAX_QUERY_RESPONSE_KEY);
if (hwmJsonElement == null || hwmJsonElement.isJsonNull()) {
return ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
}
String value = hwmJsonElement.getAsString();
if (format != null) {
SimpleDateFormat inFormat = new SimpleDateFormat(format);
Date date = null;
try {
date = inFormat.parse(value);
} catch (ParseException e) {
log.error("ParseException: " + e.getMessage(), e);
}
SimpleDateFormat outFormat = new SimpleDateFormat("yyyyMMddHHmmss");
highTs = Long.parseLong(outFormat.format(date));
} else {
highTs = Long.parseLong(value);
}
} catch (Exception e) {
throw new HighWatermarkException("Failed to get high watermark from salesforce; error - " + e.getMessage(), e);
}
return highTs;
}
@Override
public List<Command> getCountMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList)
throws RecordCountException {
log.debug("Build url to retrieve source record count");
String existingPredicate = "";
if (this.updatedQuery != null) {
String queryLowerCase = this.updatedQuery.toLowerCase();
int startIndex = queryLowerCase.indexOf(" where ");
if (startIndex > 0) {
existingPredicate = this.updatedQuery.substring(startIndex);
}
}
String query = "SELECT COUNT() FROM " + entity + existingPredicate;
String limitString = getLimitFromInputQuery(query);
query = query.replace(limitString, "");
try {
if (isNullPredicate(predicateList)) {
log.info("QUERY with null predicate: " + query);
return constructGetCommand(this.sfConnector.getFullUri(getSoqlUrl(query)));
}
Iterator<Predicate> i = predicateList.listIterator();
while (i.hasNext()) {
Predicate predicate = i.next();
query = SqlQueryUtils.addPredicate(query, predicate.getCondition());
}
query = query + getLimitFromInputQuery(this.updatedQuery);
log.info("getCountMetadata - QUERY: " + query);
return constructGetCommand(this.sfConnector.getFullUri(getSoqlUrl(query)));
} catch (Exception e) {
throw new RecordCountException("Failed to get salesforce url for record count; error - " + e.getMessage(), e);
}
}
@Override
public long getCount(CommandOutput<?, ?> response) throws RecordCountException {
log.info("Get source record count from salesforce");
String output;
Iterator<String> itr = (Iterator<String>) response.getResults().values().iterator();
if (itr.hasNext()) {
output = itr.next();
} else {
throw new RecordCountException("Failed to get count from salesforce; REST response has no output");
}
JsonElement element = GSON.fromJson(output, JsonObject.class);
long count;
try {
JsonObject jsonObject = element.getAsJsonObject();
count = jsonObject.get("totalSize").getAsLong();
} catch (Exception e) {
throw new RecordCountException("Failed to get record count from salesforce; error - " + e.getMessage(), e);
}
return count;
}
@Override
public List<Command> getDataMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList)
throws DataRecordException {
log.debug("Build url to retrieve data records");
String query = this.updatedQuery;
String url = null;
try {
if (this.getNextUrl() != null && this.pullStatus) {
url = this.getNextUrl();
} else {
if (isNullPredicate(predicateList)) {
log.info("getDataMetaData null predicate - QUERY:" + query);
return constructGetCommand(this.sfConnector.getFullUri(getSoqlUrl(query)));
}
String limitString = getLimitFromInputQuery(query);
query = query.replace(limitString, "");
for (Predicate predicate : predicateList) {
query = SqlQueryUtils.addPredicate(query, predicate.getCondition());
}
if (Boolean.parseBoolean(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_IS_SPECIFIC_API_ACTIVE))) {
query = SqlQueryUtils.addPredicate(query, "IsDeleted = true");
}
query = query + limitString;
log.info("getDataMetadata - QUERY: " + query);
url = this.sfConnector.getFullUri(getSoqlUrl(query));
}
return constructGetCommand(url);
} catch (Exception e) {
throw new DataRecordException("Failed to get salesforce url for data records; error - " + e.getMessage(), e);
}
}
private static String getLimitFromInputQuery(String query) {
String inputQuery = query.toLowerCase();
int limitIndex = inputQuery.indexOf(" limit");
if (limitIndex > 0) {
return query.substring(limitIndex);
}
return "";
}
@Override
public Iterator<JsonElement> getData(CommandOutput<?, ?> response) throws DataRecordException {
log.debug("Get data records from response");
String output;
Iterator<String> itr = (Iterator<String>) response.getResults().values().iterator();
if (itr.hasNext()) {
output = itr.next();
} else {
throw new DataRecordException("Failed to get data from salesforce; REST response has no output");
}
List<JsonElement> records = Lists.newArrayList();
JsonElement element = GSON.fromJson(output, JsonObject.class);
JsonArray partRecords;
try {
JsonObject jsonObject = element.getAsJsonObject();
partRecords = jsonObject.getAsJsonArray("records");
if (jsonObject.get("done").getAsBoolean()) {
setPullStatus(false);
} else {
setNextUrl(this.sfConnector.getFullUri(
jsonObject.get("nextRecordsUrl").getAsString().replaceAll(this.sfConnector.getServicesDataEnvPath(), "")));
}
JsonArray array = Utils.removeElementFromJsonArray(partRecords, "attributes");
for (JsonElement recordElement : array) {
records.add(recordElement);
}
return records.iterator();
} catch (Exception e) {
throw new DataRecordException("Failed to get records from salesforce", e);
}
}
@Override
public boolean getPullStatus() {
return this.pullStatus;
}
@Override
public String getNextUrl() {
return this.nextUrl;
}
public static String getSoqlUrl(String soqlQuery) throws RestApiClientException {
String path = SOQL_RESOURCE + "/";
NameValuePair pair = new BasicNameValuePair("q", soqlQuery);
List<NameValuePair> qparams = new ArrayList<>();
qparams.add(pair);
return buildUrl(path, qparams);
}
private static String buildUrl(String path, List<NameValuePair> qparams) throws RestApiClientException {
URIBuilder builder = new URIBuilder();
builder.setPath(path);
for (NameValuePair keyValue : qparams) {
builder.setParameter(keyValue.getName(), keyValue.getValue());
}
URI uri;
try {
uri = builder.build();
} catch (Exception e) {
throw new RestApiClientException("Failed to build url; error - " + e.getMessage(), e);
}
return new HttpGet(uri).getURI().toString();
}
private static boolean isNullPredicate(List<Predicate> predicateList) {
return predicateList == null || predicateList.isEmpty();
}
@Override
public String getWatermarkSourceFormat(WatermarkType watermarkType) {
switch (watermarkType) {
case TIMESTAMP:
return "yyyy-MM-dd'T'HH:mm:ss";
case DATE:
return "yyyy-MM-dd";
default:
return null;
}
}
@Override
public String getHourPredicateCondition(String column, long value, String valueFormat, String operator) {
log.info("Getting hour predicate from salesforce");
String formattedValue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, SALESFORCE_HOUR_FORMAT);
return column + " " + operator + " " + formattedValue;
}
@Override
public String getDatePredicateCondition(String column, long value, String valueFormat, String operator) {
log.info("Getting date predicate from salesforce");
String formattedValue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, SALESFORCE_DATE_FORMAT);
return column + " " + operator + " " + formattedValue;
}
@Override
public String getTimestampPredicateCondition(String column, long value, String valueFormat, String operator) {
log.info("Getting timestamp predicate from salesforce");
String formattedValue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, SALESFORCE_TIMESTAMP_FORMAT);
return column + " " + operator + " " + formattedValue;
}
@Override
public Map<String, String> getDataTypeMap() {
return ImmutableMap.<String, String>builder().put("url", "string")
.put("textarea", "string").put("reference", "string").put("phone", "string").put("masterrecord", "string")
.put("location", "string").put("id", "string").put("encryptedstring", "string").put("email", "string")
.put("DataCategoryGroupReference", "string").put("calculated", "string").put("anyType", "string")
.put("address", "string").put("blob", "string").put("date", "date").put("datetime", "timestamp")
.put("time", "time").put("object", "string").put("string", "string").put("int", "int").put("long", "long")
.put("double", "double").put("percent", "double").put("currency", "double").put("decimal", "double")
.put("boolean", "boolean").put("picklist", "string").put("multipicklist", "string").put("combobox", "string")
.put("list", "string").put("set", "string").put("map", "string").put("enum", "string").build();
}
private Iterator<JsonElement> fetchRecordSetPkChunking(WorkUnit workUnit) {
if (isPkChunkingFetchDone) {
return null; // must return null to represent no more data.
}
log.info("----Get records for pk-chunking----" + workUnit.getProp(PK_CHUNKING_JOB_ID));
isPkChunkingFetchDone = true; // set to true, never come here twice.
bulkApiLogin();
String jobId = workUnit.getProp(PK_CHUNKING_JOB_ID);
String batchIdResultIdPairString = workUnit.getProp(PK_CHUNKING_BATCH_RESULT_ID_PAIRS);
List<FileIdVO> fileIdList = this.parseBatchIdResultIdString(jobId, batchIdResultIdPairString);
return new ResultChainingIterator(bulkConnection, fileIdList, retryLimit, retryInterval, retryExceedQuotaInterval);
}
private List<FileIdVO> parseBatchIdResultIdString(String jobId, String batchIdResultIdString) {
return Arrays.stream(batchIdResultIdString.split(","))
.map( x -> x.split(":")).map(x -> new FileIdVO(jobId, x[0], x[1]))
.collect(Collectors.toList());
}
private Boolean isBulkFetchDone = false;
private Iterator<JsonElement> fetchRecordSet(
String schema,
String entity,
WorkUnit workUnit,
List<Predicate> predicateList
) {
if (isBulkFetchDone) {
return null; // need to return null to indicate no more data.
}
isBulkFetchDone = true;
log.info("----Get records for bulk batch job----");
try {
List<BatchIdAndResultId> batchIdAndResultIds = getQueryResultIds(entity, predicateList);
log.info("Number of bulk api resultSet Ids:" + batchIdAndResultIds.size());
List<FileIdVO> fileIdVoList = batchIdAndResultIds.stream()
.map(batchIdAndResultId -> new FileIdVO(this.bulkJob.getId(), batchIdAndResultId.batchId, batchIdAndResultId.resultId))
.collect(Collectors.toList());
ResultChainingIterator chainingIter = new ResultChainingIterator(
bulkConnection, fileIdVoList, retryLimit, retryInterval, retryExceedQuotaInterval);
chainingIter.add(getSoftDeletedRecords(schema, entity, workUnit, predicateList));
return chainingIter;
} catch (Exception e) {
throw new RuntimeException("Failed to get records using bulk api", e);
}
}
@Override
public Iterator<JsonElement> getRecordSetFromSourceApi(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) {
log.debug("Getting salesforce data using bulk api");
if (workUnit.contains(PK_CHUNKING_JOB_ID)) {
return fetchRecordSetPkChunking(workUnit);
} else {
return fetchRecordSet(schema, entity, workUnit, predicateList);
}
}
/**
* Get soft deleted records using Rest Api
* @return iterator with deleted records
*/
private Iterator<JsonElement> getSoftDeletedRecords(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList)
throws DataRecordException {
boolean disableSoftDeletePull = this.workUnit.getPropAsBoolean(SOURCE_QUERYBASED_SALESFORCE_IS_SOFT_DELETES_PULL_DISABLED);
if (this.columnList.contains("IsDeleted") && !disableSoftDeletePull) {
return new QueryResultIterator(this, schema, entity, workUnit, predicateList);
} else {
log.info("Ignoring soft delete records");
return null;
}
}
private void bulkApiLogin() {
try {
if (!doBulkApiLogin()) {
throw new RuntimeException("invalid login");
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Login to salesforce
* @return login status
*/
private boolean doBulkApiLogin() throws Exception {
log.info("Authenticating salesforce bulk api");
boolean success = false;
String hostName = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME);
String apiVersion = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_VERSION);
if (Strings.isNullOrEmpty(apiVersion)) {
// queryAll was introduced in version 39.0, so need to use a higher version when using queryAll with the bulk api
apiVersion = this.bulkApiUseQueryAll ? "42.0" : "29.0";
}
String soapAuthEndPoint = hostName + SALESFORCE_SOAP_SERVICE + "/" + apiVersion;
try {
ConnectorConfig partnerConfig = new ConnectorConfig();
if (super.workUnitState.contains(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL)
&& !super.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL).isEmpty()) {
partnerConfig.setProxy(super.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL),
super.workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_CONN_USE_PROXY_PORT));
}
String accessToken = sfConnector.getAccessToken();
if (accessToken == null) {
boolean isConnectSuccess = sfConnector.connect();
if (isConnectSuccess) {
accessToken = sfConnector.getAccessToken();
}
}
if (accessToken != null) {
String serviceEndpoint = sfConnector.getInstanceUrl() + SALESFORCE_SOAP_SERVICE + "/" + apiVersion;
partnerConfig.setSessionId(accessToken);
partnerConfig.setServiceEndpoint(serviceEndpoint);
} else {
String securityToken = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_SECURITY_TOKEN);
String password = PasswordManager.getInstance(this.workUnitState)
.readPassword(this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PASSWORD));
partnerConfig.setUsername(this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USERNAME));
partnerConfig.setPassword(password + securityToken);
}
partnerConfig.setAuthEndpoint(soapAuthEndPoint);
new PartnerConnection(partnerConfig);
String soapEndpoint = partnerConfig.getServiceEndpoint();
String restEndpoint = soapEndpoint.substring(0, soapEndpoint.indexOf("Soap/")) + "async/" + apiVersion;
ConnectorConfig config = createConfig();
config.setSessionId(partnerConfig.getSessionId());
config.setRestEndpoint(restEndpoint);
this.bulkConnection = getBulkConnection(config);
success = true;
} catch (RuntimeException e) {
throw new RuntimeException("Failed to connect to salesforce bulk api; error - " + e, e);
}
return success;
}
/**
* same as getQueryResultIdsPkChunking but the arguments are different.
* this function can take existing batch ids to return ResultFileIdsStruct
* It is for test/debug. developers may want to skip execute query on SFDC, use a list of existing batch ids
*/
public ResultFileIdsStruct getQueryResultIdsPkChunkingFetchOnly(String jobId, String batchIdListStr) {
bulkApiLogin();
try {
int retryInterval = Math.min(MAX_RETRY_INTERVAL_SECS * 1000, 30 + this.pkChunkingSize * 2);
if (StringUtils.isNotEmpty(batchIdListStr)) {
log.info("The batchId is specified.");
return retrievePkChunkingResultIdsByBatchId(this.bulkConnection, jobId, batchIdListStr);
} else {
ResultFileIdsStruct resultStruct = retrievePkChunkingResultIds(this.bulkConnection, jobId, retryInterval);
if (resultStruct.getBatchIdAndResultIdList().isEmpty()) {
String msg = String.format("There are no result for the [jobId: %s, batchIds: %s]", jobId, batchIdListStr);
throw new RuntimeException(msg);
}
return resultStruct;
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* get BulkConnection instance
* @return
*/
public BulkConnection getBulkConnection(ConnectorConfig config) throws AsyncApiException {
return new BulkConnection(config);
}
/**
* this function currently only for pk-chunking. it is from getQueryResultIds
* TODO: abstract this function to a common function: arguments need to add connetion, header, output-format
* TODO: make it and its related functions pure function (no side effect). Currently still unnecesarily changing this.bulkJob)
*/
public ResultFileIdsStruct getQueryResultIdsPkChunking(String entity, List<Predicate> predicateList) {
bulkApiLogin();
try {
BulkConnection connection = this.bulkConnection;
JobInfo jobRequest = new JobInfo();
jobRequest.setObject(entity);
jobRequest.setOperation(OperationEnum.queryAll);
jobRequest.setConcurrencyMode(ConcurrencyMode.Parallel);
log.info("Enabling pk chunking with size {}", this.pkChunkingSize);
connection.addHeader("Sforce-Enable-PKChunking", "chunkSize=" + this.pkChunkingSize);
// Result type as CSV
jobRequest.setContentType(ContentType.CSV);
JobInfo createdJob = connection.createJob(jobRequest);
log.info("Created bulk job: {}", createdJob.getId());
this.bulkJob = createdJob; // other functions need to use it TODO: remove bulkJob from this class
String jobId = createdJob.getId();
JobInfo jobResponse = connection.getJobStatus(jobId);
// Construct query with the predicates
String query = this.updatedQuery;
if (!isNullPredicate(predicateList)) {
String limitString = getLimitFromInputQuery(query);
query = query.replace(limitString, "");
for (Predicate predicate : predicateList) {
query = SqlQueryUtils.addPredicate(query, predicate.getCondition());
}
query = query + limitString;
}
log.info("Submitting PK Chunking query:" + query);
ByteArrayInputStream bout = new ByteArrayInputStream(query.getBytes(ConfigurationKeys.DEFAULT_CHARSET_ENCODING));
BatchInfo executeQueryBatch = connection.createBatchFromStream(jobResponse, bout);
String pkChunkingBatchId = executeQueryBatch.getId();
int waitMilliSecond = 60 * 1000;
// Get batch info with complete resultset (info id - refers to the resultset id corresponding to entire resultset)
BatchInfo batchResponse = connection.getBatchInfo(jobId, pkChunkingBatchId);
// wait for completion, failure, or formation of PK chunking batches
// user query will be submitted to sfdc and create the first batch,
// It is supposed to create more batch after the initial batch
BatchStateEnum batchState = batchResponse.getState();
while (batchState == BatchStateEnum.InProgress || batchState == BatchStateEnum.Queued) {
Thread.sleep(waitMilliSecond);
batchResponse = connection.getBatchInfo(createdJob.getId(), executeQueryBatch.getId());
log.info("Waiting for first batch (jobId={}, pkChunkingBatchId={})", jobId, pkChunkingBatchId);
batchState = batchResponse.getState();
}
if (batchResponse.getState() == BatchStateEnum.Failed) {
log.error("Bulk batch failed: " + batchResponse.toString());
throw new Exception("Failed to get bulk batch info for jobId " + jobId + " error - " + batchResponse.getStateMessage());
}
return retrievePkChunkingResultIds(connection, jobId, waitMilliSecond);
} catch (Exception e) {
throw new RuntimeException("getQueryResultIdsPkChunking: error - " + e.getMessage(), e);
}
}
/**
* Get Record set using salesforce specific API(Bulk API)
* @param entity/tablename
* @param predicateList of all predicate conditions
* @return iterator with batch of records
*/
private List<BatchIdAndResultId> getQueryResultIds(String entity, List<Predicate> predicateList) {
bulkApiLogin();
try {
// Set bulk job attributes
this.bulkJob.setObject(entity);
this.bulkJob.setOperation(this.bulkApiUseQueryAll ? OperationEnum.queryAll : OperationEnum.query);
this.bulkJob.setConcurrencyMode(ConcurrencyMode.Parallel);
// Result type as CSV
this.bulkJob.setContentType(ContentType.CSV);
this.bulkJob = this.bulkConnection.createJob(this.bulkJob);
log.info("Created bulk job [jobId={}]", this.bulkJob.getId());
this.bulkJob = this.bulkConnection.getJobStatus(this.bulkJob.getId());
// Construct query with the predicates
String query = this.updatedQuery;
if (!isNullPredicate(predicateList)) {
String limitString = getLimitFromInputQuery(query);
query = query.replace(limitString, "");
for (Predicate predicate : predicateList) {
query = SqlQueryUtils.addPredicate(query, predicate.getCondition());
}
query = query + limitString;
}
log.info("getQueryResultIds - QUERY: {}", query);
ByteArrayInputStream bout = new ByteArrayInputStream(query.getBytes(ConfigurationKeys.DEFAULT_CHARSET_ENCODING));
BatchInfo bulkBatchInfo = this.bulkConnection.createBatchFromStream(this.bulkJob, bout);
// Get batch info with complete resultset (info id - refers to the resultset id corresponding to entire resultset)
bulkBatchInfo = this.bulkConnection.getBatchInfo(this.bulkJob.getId(), bulkBatchInfo.getId());
// wait for completion, failure, or formation of PK chunking batches
// if it is InProgress or Queued, continue to wait.
int count = 0;
long minWaitTimeInMilliSeconds = super.workUnitState.getPropAsLong(
ConfigurationKeys.EXTRACT_SALESFORCE_BULK_API_MIN_WAIT_TIME_IN_MILLIS_KEY,
ConfigurationKeys.DEFAULT_EXTRACT_SALESFORCE_BULK_API_MIN_WAIT_TIME_IN_MILLIS);
long maxWaitTimeInMilliSeconds = super.workUnitState.getPropAsLong(
ConfigurationKeys.EXTRACT_SALESFORCE_BULK_API_MAX_WAIT_TIME_IN_MILLIS_KEY,
ConfigurationKeys.DEFAULT_EXTRACT_SALESFORCE_BULK_API_MAX_WAIT_TIME_IN_MILLIS);
while (bulkBatchInfo.getState() == BatchStateEnum.InProgress || bulkBatchInfo.getState() == BatchStateEnum.Queued) {
log.info("Waiting for bulk resultSetIds | Job ID: {} | State: {} | State message: {} |"
+ " Num records processed: {} | Num records failed: {}",
bulkBatchInfo.getJobId(), bulkBatchInfo.getState(), bulkBatchInfo.getStateMessage(),
bulkBatchInfo.getNumberRecordsProcessed(), bulkBatchInfo.getNumberRecordsFailed());
// Exponential backoff
long waitMilliSeconds = Math.min((long) (Math.pow(2, count) * minWaitTimeInMilliSeconds), maxWaitTimeInMilliSeconds);
Thread.sleep(waitMilliSeconds);
bulkBatchInfo = this.bulkConnection.getBatchInfo(this.bulkJob.getId(), bulkBatchInfo.getId());
count++;
}
// Wait for pk chunking batches
BatchInfoList batchInfoList = this.bulkConnection.getBatchInfoList(this.bulkJob.getId());
BatchStateEnum state = bulkBatchInfo.getState();
// not sure if the state can be "NotProcessed" in non-pk-chunking bulk request
// SFDC doc says - This state is assigned when a job is aborted while the batch is queued
if (state == BatchStateEnum.Failed || state == BatchStateEnum.InProgress) {
log.error("Bulk batch failed: {}", bulkBatchInfo);
throw new RuntimeException("Failed to get bulk batch info for jobId " + bulkBatchInfo.getJobId()
+ " error - " + bulkBatchInfo.getStateMessage());
}
// Get resultset ids of all the batches from the batch info list
List<BatchIdAndResultId> batchIdAndResultIdList = Lists.newArrayList();
for (BatchInfo bi : batchInfoList.getBatchInfo()) {
QueryResultList list = this.bulkConnection.getQueryResultList(this.bulkJob.getId(), bi.getId());
for (String result : list.getResult()) {
batchIdAndResultIdList.add(new BatchIdAndResultId(bi.getId(), result));
}
}
log.info("QueryResultList: " + batchIdAndResultIdList);
return batchIdAndResultIdList;
} catch (RuntimeException | AsyncApiException | InterruptedException e) {
throw new RuntimeException("Failed to get query result ids from salesforce using bulk api", e);
}
}
@Override
public void closeConnection() throws Exception {
if (this.bulkConnection != null
&& !JobStateEnum.Closed.equals(this.bulkConnection.getJobStatus(this.getBulkJobId()).getState())) {
log.info("Closing salesforce bulk job connection");
this.bulkConnection.closeJob(this.getBulkJobId());
}
this.sfConnector.close();
}
private static List<Command> constructGetCommand(String restQuery) {
return Collections.singletonList(new RestApiCommand().build(Collections.singletonList(restQuery), RestApiCommandType.GET));
}
private ResultFileIdsStruct retrievePkChunkingResultIdsByBatchId(BulkConnection connection, String jobId, String batchIdListStr) {
Iterator<String> batchIds = Arrays.stream(batchIdListStr.split(",")).map(String::trim).filter(x -> !x.isEmpty()).iterator();
try {
List<BatchIdAndResultId> batchIdAndResultIdList = fetchBatchResultIds(connection, jobId, batchIds);
return new ResultFileIdsStruct(jobId, batchIdAndResultIdList);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Waits for the PK batches to complete. The wait will stop after all batches are complete or on the first failed batch
*/
private ResultFileIdsStruct retrievePkChunkingResultIds(BulkConnection connection, String jobId, int waitMilliSecond) {
log.info("Waiting for completion of the the bulk job [jobId={}])'s sub queries.", jobId);
try {
while (true) {
BatchInfoList batchInfoList = connection.getBatchInfoList(jobId);
BatchInfo[] batchInfos = batchInfoList.getBatchInfo();
if (needContinueToPoll(batchInfos, waitMilliSecond)) {
continue; // continue to wait
}
if (Arrays.stream(batchInfos).filter(x -> x.getState() == BatchStateEnum.NotProcessed).count() != 1) {
throw new Exception("PK-Chunking query should have 1 and only 1 batch with state=NotProcessed.");
}
Stream<BatchInfo> stream = Arrays.stream(batchInfos);
Iterator<String> batchIds = stream.filter(x -> x.getNumberRecordsProcessed() != 0).map(BatchInfo::getId).iterator();
List<BatchIdAndResultId> batchIdAndResultIdList = fetchBatchResultIds(connection, jobId, batchIds);
return new ResultFileIdsStruct(jobId, batchIdAndResultIdList);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private List<BatchIdAndResultId> fetchBatchResultIds(BulkConnection connection, String jobId, Iterator<String> batchIds) throws Exception {
List<BatchIdAndResultId> batchIdAndResultIdList = Lists.newArrayList();
while (batchIds.hasNext()) {
String batchId = batchIds.next();
QueryResultList result = connection.getQueryResultList(jobId, batchId);
Stream<String> stream = Arrays.stream(result.getResult());
Iterator<BatchIdAndResultId> it = stream.map(rId -> new BatchIdAndResultId(batchId, rId)).iterator();
Iterators.addAll(batchIdAndResultIdList, it);
}
return batchIdAndResultIdList;
}
private Boolean needContinueToPoll(BatchInfo[] batchInfos, long toWait) {
long queued = Arrays.stream(batchInfos).filter(x -> x.getState() == BatchStateEnum.Queued).count();
long inProgress = Arrays.stream(batchInfos).filter(x -> x.getState() == BatchStateEnum.InProgress).count();
for (BatchInfo bi : batchInfos) {
BatchStateEnum state = bi.getState();
if (state == BatchStateEnum.InProgress || state == BatchStateEnum.Queued) {
try {
log.info("Total: {}, queued: {}, InProgress: {}, waiting for [batchId: {}, state: {}]", batchInfos.length, queued, inProgress, bi.getId(), state);
Thread.sleep(toWait);
} catch (InterruptedException e) { // skip
}
return true; // need to continue to wait
}
if (state == BatchStateEnum.Failed) {
throw new RuntimeException(String.format("[batchId=%s] failed", bi.getId()));
}
}
return false; // no need to wait
}
//Moving config creation in a separate method for custom config parameters like setting up transport factory.
public ConnectorConfig createConfig() {
ConnectorConfig config = new ConnectorConfig();
config.setCompression(true);
try {
config.setTraceFile("traceLogs.txt");
} catch (FileNotFoundException e) {
e.printStackTrace();
}
config.setTraceMessage(false);
config.setPrettyPrintXml(true);
if (super.workUnitState.contains(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL)
&& !super.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL).isEmpty()) {
config.setProxy(super.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL),
super.workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_CONN_USE_PROXY_PORT));
}
return config;
}
public String getBulkJobId() {
return workUnit.getProp(PK_CHUNKING_JOB_ID, this.bulkJob.getId());
}
@Data
public static class BatchIdAndResultId {
private final String batchId;
private final String resultId;
}
@Data
public static class ResultFileIdsStruct {
private final String jobId;
private final List<BatchIdAndResultId> batchIdAndResultIdList;
}
}
| 4,650 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceConnector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import java.io.Closeable;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.message.BasicNameValuePair;
import com.google.common.collect.Lists;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.source.extractor.exception.RestApiConnectionException;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiConnector;
/**
* An extension of {@link RestApiConnector} for Salesforce API.
*/
@Slf4j
public class SalesforceConnector extends RestApiConnector {
private static final String DEFAULT_SERVICES_DATA_PATH = "/services/data";
private static final String DEFAULT_AUTH_TOKEN_PATH = "/services/oauth2/token";
protected String refreshToken;
public SalesforceConnector(State state) {
super(state);
if (isPasswordGrant(state)) {
this.refreshToken = null;
} else {
this.refreshToken = state.getProp(ConfigurationKeys.SOURCE_CONN_REFRESH_TOKEN);
}
}
@Getter
private String servicesDataEnvPath;
@Override
public HttpEntity getAuthentication() throws RestApiConnectionException {
log.debug("Authenticating salesforce");
String clientId = this.state.getProp(ConfigurationKeys.SOURCE_CONN_CLIENT_ID);
String clientSecret = this.state.getProp(ConfigurationKeys.SOURCE_CONN_CLIENT_SECRET);
if (this.state.getPropAsBoolean(ConfigurationKeys.SOURCE_CONN_DECRYPT_CLIENT_SECRET, false)) {
PasswordManager passwordManager = PasswordManager.getInstance(this.state);
clientId = passwordManager.readPassword(clientId);
clientSecret = passwordManager.readPassword(clientSecret);
}
String host = this.state.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME);
List<NameValuePair> formParams = Lists.newArrayList();
formParams.add(new BasicNameValuePair("client_id", clientId));
formParams.add(new BasicNameValuePair("client_secret", clientSecret));
if (refreshToken == null) {
log.info("Authenticating salesforce with username/password");
String userName = this.state.getProp(ConfigurationKeys.SOURCE_CONN_USERNAME);
String password = PasswordManager.getInstance(this.state)
.readPassword(this.state.getProp(ConfigurationKeys.SOURCE_CONN_PASSWORD));
String securityToken = PasswordManager.getInstance(this.state)
.readPassword(this.state.getProp(ConfigurationKeys.SOURCE_CONN_SECURITY_TOKEN));
formParams.add(new BasicNameValuePair("grant_type", "password"));
formParams.add(new BasicNameValuePair("username", userName));
formParams.add(new BasicNameValuePair("password", password + securityToken));
} else {
log.info("Authenticating salesforce with refresh_token");
formParams.add(new BasicNameValuePair("grant_type", "refresh_token"));
formParams.add(new BasicNameValuePair("refresh_token", refreshToken));
}
try {
HttpPost post = new HttpPost(host + DEFAULT_AUTH_TOKEN_PATH);
post.setEntity(new UrlEncodedFormEntity(formParams));
HttpResponse httpResponse= getHttpClient().execute(post);
if (httpResponse instanceof Closeable) {
this.closer.register((Closeable) httpResponse);
}
return httpResponse.getEntity();
} catch (Exception e) {
throw new RestApiConnectionException("Failed to authenticate salesforce host:"
+ host + "; error-" + e.getMessage(), e);
}
}
@Override
protected void addHeaders(HttpRequestBase httpRequest) {
if (refreshToken == null) {
super.addHeaders(httpRequest);
} else {
if (this.accessToken != null) {
httpRequest.addHeader("Authorization", "Bearer " + this.accessToken);
}
httpRequest.addHeader("Content-Type", "application/json");
}
}
protected static boolean isPasswordGrant(State state) {
String userName = state.getProp(ConfigurationKeys.SOURCE_CONN_USERNAME);
String securityToken = state.getProp(ConfigurationKeys.SOURCE_CONN_SECURITY_TOKEN);
return (userName != null && securityToken != null);
}
private String getServiceBaseUrl() {
String dataEnvPath = DEFAULT_SERVICES_DATA_PATH + "/v" + this.state.getProp(ConfigurationKeys.SOURCE_CONN_VERSION);
this.servicesDataEnvPath = dataEnvPath;
return this.instanceUrl + dataEnvPath;
}
public String getFullUri(String resourcePath) {
return StringUtils.removeEnd(getServiceBaseUrl(), "/") + StringUtils.removeEnd(resourcePath, "/");
}
String getAccessToken() {
return accessToken;
}
String getInstanceUrl() {
return instanceUrl;
}
}
| 4,651 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import java.io.IOException;
import java.math.RoundingMode;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.math.DoubleMath;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.salesforce.SalesforceExtractor.BatchIdAndResultId;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.exception.ExtractPrepareException;
import org.apache.gobblin.source.extractor.exception.RestApiConnectionException;
import org.apache.gobblin.source.extractor.exception.RestApiProcessingException;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
import org.apache.gobblin.source.extractor.extract.QueryBasedSource;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiConnector;
import org.apache.gobblin.source.extractor.partition.Partition;
import org.apache.gobblin.source.extractor.partition.Partitioner;
import org.apache.gobblin.source.extractor.utils.Utils;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import org.apache.gobblin.source.extractor.watermark.WatermarkType;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.WorkUnit;
import static org.apache.gobblin.configuration.ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS;
import static org.apache.gobblin.salesforce.SalesforceConfigurationKeys.*;
/**
* An implementation of {@link QueryBasedSource} for salesforce data sources.
*/
@Slf4j
public class SalesforceSource extends QueryBasedSource<JsonArray, JsonElement> {
public static final String USE_ALL_OBJECTS = "use.all.objects";
public static final boolean DEFAULT_USE_ALL_OBJECTS = false;
@VisibleForTesting
static final String ENABLE_DYNAMIC_PROBING = "salesforce.enableDynamicProbing";
static final String MIN_TARGET_PARTITION_SIZE = "salesforce.minTargetPartitionSize";
static final int DEFAULT_MIN_TARGET_PARTITION_SIZE = 250000;
@VisibleForTesting
static final String ENABLE_DYNAMIC_PARTITIONING = "salesforce.enableDynamicPartitioning";
@VisibleForTesting
static final String EARLY_STOP_TOTAL_RECORDS_LIMIT = "salesforce.earlyStopTotalRecordsLimit";
private static final long DEFAULT_EARLY_STOP_TOTAL_RECORDS_LIMIT = DEFAULT_MIN_TARGET_PARTITION_SIZE * 4;
static final String SECONDS_FORMAT = "yyyy-MM-dd-HH:mm:ss";
private boolean isEarlyStopped = false;
protected SalesforceConnector salesforceConnector = null;
private SalesforceHistogramService salesforceHistogramService;
public SalesforceSource() {
this.lineageInfo = Optional.absent();
}
@VisibleForTesting
SalesforceSource(LineageInfo lineageInfo) {
this.lineageInfo = Optional.fromNullable(lineageInfo);
}
@VisibleForTesting
SalesforceSource(SalesforceHistogramService salesforceHistogramService) {
this.lineageInfo = Optional.absent();
this.salesforceHistogramService = salesforceHistogramService;
}
@Override
public Extractor<JsonArray, JsonElement> getExtractor(WorkUnitState state) throws IOException {
try {
return new SalesforceExtractor(state).build();
} catch (ExtractPrepareException e) {
log.error("Failed to prepare extractor", e);
throw new IOException(e);
}
}
@Override
public boolean isEarlyStopped() {
return isEarlyStopped;
}
@Override
protected void addLineageSourceInfo(SourceState sourceState, SourceEntity entity, WorkUnit workUnit) {
DatasetDescriptor source =
new DatasetDescriptor(DatasetConstants.PLATFORM_SALESFORCE, entity.getSourceEntityName());
if (lineageInfo.isPresent()) {
lineageInfo.get().setSource(source, workUnit);
}
}
@Override
protected List<WorkUnit> generateWorkUnits(SourceEntity sourceEntity, SourceState state, long previousWatermark) {
SalesforceConnector connector = getConnector(state);
SfConfig sfConfig = new SfConfig(state.getProperties());
if (salesforceHistogramService == null) {
salesforceHistogramService = new SalesforceHistogramService(sfConfig, connector);
}
List<WorkUnit> workUnits;
String partitionType = state.getProp(SALESFORCE_PARTITION_TYPE, "");
if (partitionType.equals("PK_CHUNKING")) {
// pk-chunking only supports start-time by source.querybased.start.value, and does not support end-time.
// always ingest data later than or equal source.querybased.start.value.
// we should only pk chunking based work units only in case of snapshot/full ingestion
workUnits = generateWorkUnitsPkChunking(sourceEntity, state, previousWatermark);
} else {
workUnits = generateWorkUnitsHelper(sourceEntity, state, previousWatermark);
}
log.info("====Generated {} workUnit(s)====", workUnits.size());
if (sfConfig.partitionOnly) {
log.info("It is partitionOnly mode, return blank workUnit list");
return new ArrayList<>();
} else {
return workUnits;
}
}
/**
* generate workUnit for pk chunking
*/
private List<WorkUnit> generateWorkUnitsPkChunking(SourceEntity sourceEntity, SourceState state, long previousWatermark) {
SalesforceExtractor.ResultFileIdsStruct resultFileIdsStruct = executeQueryWithPkChunking(state, previousWatermark);
return createWorkUnits(sourceEntity, state, resultFileIdsStruct);
}
private SalesforceExtractor.ResultFileIdsStruct executeQueryWithPkChunking(
SourceState sourceState,
long previousWatermark
) throws RuntimeException {
State state = new State(sourceState);
WorkUnit workUnit = WorkUnit.createEmpty();
WorkUnitState workUnitState = new WorkUnitState(workUnit, state);
workUnitState.setId("Execute pk-chunking");
SalesforceExtractor salesforceExtractor = null;
try {
salesforceExtractor = (SalesforceExtractor) this.getExtractor(workUnitState);
Partitioner partitioner = new Partitioner(sourceState);
if (isEarlyStopEnabled(state) && partitioner.isFullDump()) {
throw new UnsupportedOperationException("Early stop mode cannot work with full dump mode.");
}
Partition partition = partitioner.getGlobalPartition(previousWatermark);
String condition = "";
Date startDate = Utils.toDate(partition.getLowWatermark(), Partitioner.WATERMARKTIMEFORMAT);
String field = sourceState.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY);
// pk-chunking only supports start-time by source.querybased.start.value, and does not support end-time.
// always ingest data later than or equal source.querybased.start.value.
// we should only pk chunking based work units only in case of snapshot/full ingestion
if (startDate != null && field != null) {
String lowWatermarkDate = Utils.dateToString(startDate, SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT);
condition = field + " >= " + lowWatermarkDate;
}
Predicate predicate = new Predicate(null, 0, condition, "", null);
List<Predicate> predicateList = Arrays.asList(predicate);
String entity = sourceState.getProp(ConfigurationKeys.SOURCE_ENTITY);
if (state.contains(BULK_TEST_JOB_ID)) {
String jobId = state.getProp(BULK_TEST_JOB_ID, "");
log.info("---Skip query, fetching result files directly for [jobId={}]", jobId);
String batchIdListStr = state.getProp(BULK_TEST_BATCH_ID_LIST);
return salesforceExtractor.getQueryResultIdsPkChunkingFetchOnly(jobId, batchIdListStr);
} else {
log.info("---Pk Chunking query submit.");
return salesforceExtractor.getQueryResultIdsPkChunking(entity, predicateList);
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
if(salesforceExtractor != null) {
try {
// Only close connection here since we don't want to update the high watermark for the workUnitState here
salesforceExtractor.closeConnection();
} catch (Exception e) {
log.error("Failed to close the extractor connections", e);
}
}
}
}
/**
* Create work units by taking a bulkJobId.
* The work units won't contain a query in this case. Instead, they will contain a BulkJobId and a list of `batchId:resultId`
* So in extractor, the work to do is just to fetch the resultSet files.
*/
private List<WorkUnit> createWorkUnits(
SourceEntity sourceEntity,
SourceState state,
SalesforceExtractor.ResultFileIdsStruct resultFileIdsStruct
) {
String nameSpaceName = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
Extract.TableType tableType = Extract.TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
String outputTableName = sourceEntity.getDestTableName();
Extract extract = createExtract(tableType, nameSpaceName, outputTableName);
List<WorkUnit> workUnits = Lists.newArrayList();
int partitionNumber = state.getPropAsInt(SOURCE_MAX_NUMBER_OF_PARTITIONS, 1);
List<BatchIdAndResultId> batchResultIds = resultFileIdsStruct.getBatchIdAndResultIdList();
int total = batchResultIds.size();
// size of every partition should be: math.ceil(total/partitionNumber), use simpler way: (total+partitionNumber-1)/partitionNumber
int sizeOfPartition = (total + partitionNumber - 1) / partitionNumber;
List<List<BatchIdAndResultId>> partitionedResultIds = Lists.partition(batchResultIds, sizeOfPartition);
log.info("----partition strategy: max-parti={}, size={}, actual-parti={}, total={}", partitionNumber, sizeOfPartition, partitionedResultIds.size(), total);
for (List<BatchIdAndResultId> resultIds : partitionedResultIds) {
WorkUnit workunit = new WorkUnit(extract);
String bulkJobId = resultFileIdsStruct.getJobId();
workunit.setProp(PK_CHUNKING_JOB_ID, bulkJobId);
String resultIdStr = resultIds.stream().map(x -> x.getBatchId() + ":" + x.getResultId()).collect(Collectors.joining(","));
workunit.setProp(PK_CHUNKING_BATCH_RESULT_ID_PAIRS, resultIdStr);
workunit.setProp(ConfigurationKeys.SOURCE_ENTITY, sourceEntity.getSourceEntityName());
workunit.setProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, sourceEntity.getDestTableName());
workunit.setProp(WORK_UNIT_STATE_VERSION_KEY, CURRENT_WORK_UNIT_STATE_VERSION);
addLineageSourceInfo(state, sourceEntity, workunit);
workUnits.add(workunit);
}
return workUnits;
}
/**
* Generates {@link WorkUnit}s based on a bunch of config values like max number of partitions, early stop,
* dynamic partitioning, dynamic probing, etc.
*/
@VisibleForTesting
List<WorkUnit> generateWorkUnitsHelper(SourceEntity sourceEntity, SourceState state, long previousWatermark) {
boolean isSoftDeletePullDisabled = state.getPropAsBoolean(SOURCE_QUERYBASED_SALESFORCE_IS_SOFT_DELETES_PULL_DISABLED, false);
log.info("disable soft delete pull: " + isSoftDeletePullDisabled);
WatermarkType watermarkType = WatermarkType.valueOf(
state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE, ConfigurationKeys.DEFAULT_WATERMARK_TYPE)
.toUpperCase());
String watermarkColumn = state.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY);
int maxPartitions = state.getPropAsInt(SOURCE_MAX_NUMBER_OF_PARTITIONS,
ConfigurationKeys.DEFAULT_MAX_NUMBER_OF_PARTITIONS);
int minTargetPartitionSize = state.getPropAsInt(MIN_TARGET_PARTITION_SIZE, DEFAULT_MIN_TARGET_PARTITION_SIZE);
// Only support time related watermark
if (watermarkType == WatermarkType.SIMPLE
|| Strings.isNullOrEmpty(watermarkColumn)
|| !state.getPropAsBoolean(ENABLE_DYNAMIC_PARTITIONING)) {
List<WorkUnit> workUnits = super.generateWorkUnits(sourceEntity, state, previousWatermark);
workUnits.forEach(workUnit ->
workUnit.setProp(SOURCE_QUERYBASED_SALESFORCE_IS_SOFT_DELETES_PULL_DISABLED, isSoftDeletePullDisabled));
return workUnits;
}
Partitioner partitioner = new Partitioner(state);
if (isEarlyStopEnabled(state) && partitioner.isFullDump()) {
throw new UnsupportedOperationException("Early stop mode cannot work with full dump mode.");
}
Partition partition = partitioner.getGlobalPartition(previousWatermark);
Histogram histogram =
salesforceHistogramService.getHistogram(sourceEntity.getSourceEntityName(), watermarkColumn, state, partition);
// we should look if the count is too big, cut off early if count exceeds the limit, or bucket size is too large
Histogram histogramAdjust;
// TODO: we should consider move this logic into getRefinedHistogram so that we can early terminate the search
if (isEarlyStopEnabled(state)) {
histogramAdjust = new Histogram();
for (HistogramGroup group : histogram.getGroups()) {
histogramAdjust.add(group);
long earlyStopRecordLimit = state.getPropAsLong(EARLY_STOP_TOTAL_RECORDS_LIMIT, DEFAULT_EARLY_STOP_TOTAL_RECORDS_LIMIT);
if (histogramAdjust.getTotalRecordCount() > earlyStopRecordLimit) {
break;
}
}
} else {
histogramAdjust = histogram;
}
long expectedHighWatermark = partition.getHighWatermark();
if (histogramAdjust.getGroups().size() < histogram.getGroups().size()) {
HistogramGroup lastPlusOne = histogram.get(histogramAdjust.getGroups().size());
long earlyStopHighWatermark = Long.parseLong(Utils.toDateTimeFormat(lastPlusOne.getKey(), SECONDS_FORMAT, Partitioner.WATERMARKTIMEFORMAT));
log.info("Job {} will be stopped earlier. [LW : {}, early-stop HW : {}, expected HW : {}]",
state.getProp(ConfigurationKeys.JOB_NAME_KEY), partition.getLowWatermark(), earlyStopHighWatermark, expectedHighWatermark);
this.isEarlyStopped = true;
expectedHighWatermark = earlyStopHighWatermark;
} else {
log.info("Job {} will be finished in a single run. [LW : {}, expected HW : {}]",
state.getProp(ConfigurationKeys.JOB_NAME_KEY), partition.getLowWatermark(), expectedHighWatermark);
}
String specifiedPartitions = generateSpecifiedPartitions(histogramAdjust, minTargetPartitionSize, maxPartitions,
partition.getLowWatermark(), expectedHighWatermark);
state.setProp(Partitioner.HAS_USER_SPECIFIED_PARTITIONS, true);
state.setProp(Partitioner.USER_SPECIFIED_PARTITIONS, specifiedPartitions);
state.setProp(Partitioner.IS_EARLY_STOPPED, isEarlyStopped);
List<WorkUnit> workUnits = super.generateWorkUnits(sourceEntity, state, previousWatermark);
workUnits.forEach(workUnit ->
workUnit.setProp(SOURCE_QUERYBASED_SALESFORCE_IS_SOFT_DELETES_PULL_DISABLED, isSoftDeletePullDisabled));
return workUnits;
}
private boolean isEarlyStopEnabled(State state) {
return state.getPropAsBoolean(ConfigurationKeys.SOURCE_EARLY_STOP_ENABLED, ConfigurationKeys.DEFAULT_SOURCE_EARLY_STOP_ENABLED);
}
@VisibleForTesting
String generateSpecifiedPartitions(Histogram histogram, int minTargetPartitionSize, int maxPartitions, long lowWatermark,
long expectedHighWatermark) {
int interval = computeTargetPartitionSize(histogram, minTargetPartitionSize, maxPartitions);
int totalGroups = histogram.getGroups().size();
log.info("Histogram total record count: " + histogram.getTotalRecordCount());
log.info("Histogram total groups: " + totalGroups);
log.info("maxPartitions: " + maxPartitions);
log.info("interval: " + interval);
List<HistogramGroup> groups = histogram.getGroups();
List<String> partitionPoints = new ArrayList<>();
DescriptiveStatistics statistics = new DescriptiveStatistics();
int count = 0;
/*
Using greedy algorithm by keep adding group until it exceeds the interval size (x2)
Proof: Assuming nth group violates 2 x interval size, then all groups from 0th to (n-1)th, plus nth group,
will have total size larger or equal to interval x 2. Hence, we are saturating all intervals (with original size)
without leaving any unused space in between. We could choose x3,x4... but it is not space efficient.
*/
for (HistogramGroup group : groups) {
if (count == 0) {
// Add a new partition point;
partitionPoints.add(Utils.toDateTimeFormat(group.getKey(), SECONDS_FORMAT, Partitioner.WATERMARKTIMEFORMAT));
}
/*
Using greedy algorithm by keep adding group until it exceeds the interval size (x2)
Proof: Assuming nth group violates 2 x interval size, then all groups from 0th to (n-1)th, plus nth group,
will have total size larger or equal to interval x 2. Hence, we are saturating all intervals (with original size)
without leaving any unused space in between. We could choose x3,x4... but it is not space efficient.
*/
if (count != 0 && count + group.getCount() >= 2 * interval) {
// Summarize current group
statistics.addValue(count);
// A step-in start
partitionPoints.add(Utils.toDateTimeFormat(group.getKey(), SECONDS_FORMAT, Partitioner.WATERMARKTIMEFORMAT));
count = group.getCount();
} else {
// Add group into current partition
count += group.getCount();
}
if (count >= interval) {
// Summarize current group
statistics.addValue(count);
// A fresh start next time
count = 0;
}
}
if (partitionPoints.isEmpty()) {
throw new RuntimeException("Unexpected empty partition list");
}
if (count > 0) {
// Summarize last group
statistics.addValue(count);
}
// Add global high watermark as last point
partitionPoints.add(Long.toString(expectedHighWatermark));
log.info("Dynamic partitioning statistics: ");
log.info("data: " + Arrays.toString(statistics.getValues()));
log.info(statistics.toString());
String specifiedPartitions = Joiner.on(",").join(partitionPoints);
log.info("Calculated specified partitions: " + specifiedPartitions);
return specifiedPartitions;
}
/**
* Compute the target partition size.
*/
private int computeTargetPartitionSize(Histogram histogram, int minTargetPartitionSize, int maxPartitions) {
return Math.max(minTargetPartitionSize,
DoubleMath.roundToInt((double) histogram.getTotalRecordCount() / maxPartitions, RoundingMode.CEILING));
}
protected Set<SourceEntity> getSourceEntities(State state) {
if (!state.getPropAsBoolean(USE_ALL_OBJECTS, DEFAULT_USE_ALL_OBJECTS)) {
return super.getSourceEntities(state);
}
SalesforceConnector connector = getConnector(state);
try {
if (!connector.connect()) {
throw new RuntimeException("Failed to connect.");
}
} catch (RestApiConnectionException e) {
throw new RuntimeException("Failed to connect.", e);
}
List<Command> commands = RestApiConnector.constructGetCommand(connector.getFullUri("/sobjects"));
try {
CommandOutput<?, ?> response = connector.getResponse(commands);
Iterator<String> itr = (Iterator<String>) response.getResults().values().iterator();
if (itr.hasNext()) {
String next = itr.next();
return getSourceEntities(next);
}
throw new RuntimeException("Unable to retrieve source entities");
} catch (RestApiProcessingException e) {
throw Throwables.propagate(e);
}
}
private static Set<SourceEntity> getSourceEntities(String response) {
Set<SourceEntity> result = Sets.newHashSet();
JsonObject jsonObject = new Gson().fromJson(response, JsonObject.class).getAsJsonObject();
JsonArray array = jsonObject.getAsJsonArray("sobjects");
for (JsonElement element : array) {
String sourceEntityName = element.getAsJsonObject().get("name").getAsString();
result.add(SourceEntity.fromSourceEntityName(sourceEntityName));
}
return result;
}
protected SalesforceConnector getConnector(State state) {
if (this.salesforceConnector == null) {
this.salesforceConnector = new SalesforceConnector(state);
}
return this.salesforceConnector;
}
}
| 4,652 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SfConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import java.util.Properties;
import org.apache.gobblin.typedconfig.Default;
import org.apache.gobblin.typedconfig.Key;
import org.apache.gobblin.typedconfig.compiletime.IntRange;
public class SfConfig extends QueryBasedSourceConfig {
public SfConfig(Properties prop) {
super(prop);
}
@Key("salesforce.partition.pkChunkingSize")@Default("250000")@IntRange({20_000, 250_000})
public int pkChunkingSize;
@Key("salesforce.bulkApiUseQueryAll")@Default("false")
public boolean bulkApiUseQueryAll;
@Key("salesforce.retry.interval")@Default("60000")
public int retryInterval;
@Key("salesforce.fetchRetryLimit")@Default("5")
public int fetchRetryLimit;
@Key("salesforce.retry.exceedQuotaInterval")@Default("300000")
public int retryExceedQuotaInterval;
@Key("sf.rest.api.retryLimit")@Default("3")
public int restApiRetryLimit;
@Key("sf.rest.api.retryInterval")@Default("10000") // 10 seconds
public int restApiRetryInterval;
// it is for test. if true, it will only execute partition part and stop.
@Key("sf.test.partitionOnly")@Default("false")
public boolean partitionOnly;
}
| 4,653 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/QueryResultIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import com.google.gson.JsonElement;
import java.util.Iterator;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* Iterator for rest api query
* It is a wrapper of
* RestApiExtractor.getRecordSet(schema, entity, workUnit, predicateList)
* the reason why we want to a wrapper for the function is -
* We want to delay the execution of the function. Only when the next get called, we fetch the data.
*/
@Slf4j
public class QueryResultIterator implements Iterator<JsonElement> {
private int recordCount = 0;
private SalesforceExtractor extractor;
private String schema;
private String entity;
private WorkUnit workUnit;
private List<Predicate> predicateList;
private Iterator<JsonElement> queryResultIter;
public QueryResultIterator(
SalesforceExtractor extractor,
String schema,
String entity,
WorkUnit workUnit,
List<Predicate> predicateList
) {
log.info("create query result iterator.");
this.extractor = extractor;
this.schema = schema;
this.entity = entity;
this.workUnit = workUnit;
this.predicateList = predicateList;
}
@Override
public boolean hasNext() {
if (queryResultIter == null) {
initQueryResultIter();
}
return queryResultIter.hasNext();
}
private void initQueryResultIter() {
try {
queryResultIter = extractor.getRecordSet(schema, entity, workUnit, predicateList);
} catch (DataRecordException e) {
throw new RuntimeException(e);
}
}
@Override
public JsonElement next() {
if (queryResultIter == null) {
initQueryResultIter();
}
JsonElement jsonElement = queryResultIter.next();
recordCount ++;
if (!queryResultIter.hasNext()) {
// variable `jsonElement` has last record. no more data, print out total
log.info("----Rest API query records total:{}----", recordCount);
}
return jsonElement;
}
}
| 4,654 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/HistogramGroup.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import lombok.AllArgsConstructor;
import lombok.Getter;
@Getter
@AllArgsConstructor
class HistogramGroup {
private final String key;
private final int count;
@Override
public String toString() {
return key + ":" + count;
}
} | 4,655 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/FileIdVO.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import lombok.Data;
/**
* FileIdVO class
*/
@Data
public class FileIdVO {
private final String jobId;
private final String batchId;
private final String resultId;
public String toString() {
return String.format("[jobId=%s, batchId=%s, resultId=%s]", jobId, batchId, resultId);
}
}
| 4,656 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/Histogram.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import java.util.ArrayList;
import java.util.List;
import lombok.Getter;
@Getter
public class Histogram {
private long totalRecordCount;
private final List<HistogramGroup> groups;
Histogram() {
totalRecordCount = 0;
groups = new ArrayList<>();
}
void add(HistogramGroup group) {
groups.add(group);
totalRecordCount += group.getCount();
}
void add(Histogram histogram) {
groups.addAll(histogram.getGroups());
totalRecordCount += histogram.totalRecordCount;
}
HistogramGroup get(int idx) {
return this.groups.get(idx);
}
@Override
public String toString() {
return groups.toString();
}
}
| 4,657 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/ResultChainingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import java.util.Iterator;
import java.util.List;
import com.google.common.collect.Iterators;
import com.google.gson.JsonElement;
import com.sforce.async.BulkConnection;
import lombok.extern.slf4j.Slf4j;
/**
* The Iterator to chain all result iterators together.
* It is to create only one iterator for a list of result files of BulkAPI.
* Same time it can also be able to add other iterator with function `add` to combine to 1 iterator
*/
@Slf4j
public class ResultChainingIterator implements Iterator<JsonElement> {
private Iterator<JsonElement> iter;
private int recordCount = 0;
private int isDeletedRecordCount = 0;
public ResultChainingIterator(BulkConnection conn, List<FileIdVO> fileIdList, int retryLimit,
long retryInterval, long retryExceedQuotaInterval) {
Iterator<BulkResultIterator> iterOfFiles = fileIdList.stream().map(x ->
new BulkResultIterator(conn, x, retryLimit, retryInterval, retryExceedQuotaInterval)).iterator();
iter = Iterators.<JsonElement>concat(iterOfFiles);
}
public Iterator<JsonElement> get() {
return iter;
}
public void add(Iterator<JsonElement> iter) {
if (iter != null) {
this.iter = Iterators.concat(this.iter, iter);
}
}
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public JsonElement next() {
JsonElement jsonElement = iter.next();
recordCount ++;
JsonElement isDeletedElement = jsonElement.getAsJsonObject().get("IsDeleted");
if (isDeletedElement != null && isDeletedElement.getAsBoolean()) {
isDeletedRecordCount ++;
}
if (!iter.hasNext()) {
// `jsonElement` has last record, print out total and isDeleted=true records(soft deleted) total
log.info("====Total records: [{}] isDeleted=true records: [{}]====", recordCount, isDeletedRecordCount);
}
return jsonElement;
}
}
| 4,658 |
0 | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/QueryBasedSourceConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.salesforce;
import java.util.Properties;
import org.apache.gobblin.typedconfig.TypedConfig;
public class QueryBasedSourceConfig extends TypedConfig {
public QueryBasedSourceConfig(Properties prop) {
super(prop);
}
}
| 4,659 |
0 | Create_ds/creadur-tentacles/src/test/java/org/apache/creadur | Create_ds/creadur-tentacles/src/test/java/org/apache/creadur/tentacles/ArchivesJsonTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.creadur.tentacles;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.module.jaxb.JaxbAnnotationModule;
import org.apache.creadur.tentacles.model.Archives;
import org.junit.Assert;
import org.junit.Test;
public class ArchivesJsonTest extends Assert {
@Test
public void test() throws Exception {
final Archives archives = new Archives();
archives.addItem()
.name("hello.jar")
.path("one/two/hello.jar")
;
final ObjectMapper mapper = new ObjectMapper();
JaxbAnnotationModule module = new JaxbAnnotationModule();
// configure as necessary
mapper.registerModule(module);
mapper.writerWithDefaultPrettyPrinter().writeValue(System.out, archives);
}
}
| 4,660 |
0 | Create_ds/creadur-tentacles/src/test/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/test/java/org/apache/creadur/tentacles/filter/ListOfFilesFilterTest.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
import static org.junit.Assert.*;
import org.junit.Test;
public class ListOfFilesFilterTest {
@Test
public void testCreationAndToString() {
ListOfFilesFilter filter = new ListOfFilesFilter("a12", "b23", "c34");
String toString = filter.toString();
assertTrue(toString.contains("a12"));
assertTrue(toString.contains("b23"));
assertTrue(toString.contains("c34"));
assertEquals(3, filter.getListOfFiles().size());
}
}
| 4,661 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/FileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.creadur.tentacles;
import java.io.File;
import java.io.FileFilter;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.creadur.tentacles.filter.Filters;
public class FileSystem {
private final Filters filters;
public FileSystem() {
this.filters = new Filters();
}
public List<File> legalDocumentsUndeclaredIn(final File contents) {
return collect(contents,
this.filters.legalDocumentsUndeclaredIn(contents));
}
public List<File> legalDocumentsDeclaredIn(final File contents) {
return collect(contents,
this.filters.legalDocumentsDeclaredIn(contents));
}
public List<File> collect(final File dir, final String regex) {
return collect(dir, Pattern.compile(regex));
}
private List<File> collect(final File dir, final Pattern pattern) {
return collect(dir, new FileFilter() {
@Override
public boolean accept(final File file) {
return pattern.matcher(file.getAbsolutePath()).matches();
}
});
}
public List<File> collect(final File dir, final FileFilter filter) {
final List<File> accepted = new ArrayList<File>();
if (filter.accept(dir)) {
accepted.add(dir);
}
final File[] files = dir.listFiles();
if (files != null) {
for (final File file : files) {
accepted.addAll(collect(file, filter));
}
}
return accepted;
}
public void mkparent(final File file) {
mkdirs(file.getParentFile());
}
public void mkdirs(final File file) {
if (!file.exists()) {
final boolean success = file.mkdirs();
assert success : "mkdirs failed to create " + file;
return;
}
final boolean isDirectory = file.isDirectory();
assert isDirectory : "Not a directory: " + file;
}
public List<File> documentsFrom(final File repository) {
return collect(repository, this.filters.filesOnly());
}
public List<File> licensesFrom(final File directory) {
return collect(directory, this.filters.licensesOnly());
}
public List<File> noticesOnly(final File directory) {
return collect(directory, this.filters.noticesOnly());
}
public List<File> licensesDeclaredIn(final File contents) {
return collect(contents, this.filters.licensesDeclaredIn(contents));
}
public List<File> noticesDeclaredIn(final File contents) {
return collect(contents, this.filters.noticesDeclaredIn(contents));
}
public List<File> archivesInPath(final File file,
final String fileRepositoryPathNameFilter) {
return collect(file, this.filters.archivesInPath(fileRepositoryPathNameFilter));
}
}
| 4,662 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/TentaclesResources.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.net.URL;
public class TentaclesResources {
private final IOSystem ioSystem;
public TentaclesResources(final IOSystem ioSystem) {
super();
this.ioSystem = ioSystem;
}
public Reader read(final String resourceName) throws IOException {
final URL resourceUrl = toUrl(resourceName);
final InputStreamReader templateReader =
new InputStreamReader(resourceUrl.openStream());
return templateReader;
}
public String readText(final String resourcePath) throws IOException {
final String text = this.ioSystem.slurp(toUrl(resourcePath));
return text;
}
public void copyTo(final String resourcePath, final File to)
throws IOException {
this.ioSystem.copy(toUrl(resourcePath).openStream(), to);
}
private URL toUrl(final String resourcePath) {
final URL resourceUrl =
this.getClass().getClassLoader().getResource(resourcePath);
if (resourceUrl == null) {
throw new IllegalStateException(
"Tentacles expects the classpath to contain "
+ resourcePath);
}
return resourceUrl;
}
}
| 4,663 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/License.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.io.File;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class License {
private final String text;
private final String key;
private final Set<Archive> archives = new HashSet<Archive>();
private final List<File> locations = new ArrayList<File>();
public License(final String key, final String text) {
this.text = text;
this.key = key;
}
public String getText() {
return this.text;
}
public String getKey() {
return this.key;
}
public Set<Archive> getArchives() {
return this.archives;
}
public List<File> getLocations() {
return this.locations;
}
public Set<URI> locations(final Archive archive) {
final URI contents = archive.contentsURI();
final Set<URI> locations = new HashSet<URI>();
for (final File file : this.locations) {
final URI uri = file.toURI();
final URI relativize = contents.relativize(uri);
if (!relativize.equals(uri)) {
locations.add(relativize);
}
}
return locations;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final License license = (License) o;
if (!this.key.equals(license.key)) {
return false;
}
return true;
}
@Override
public int hashCode() {
return this.key.hashCode();
}
public boolean implies(final License fullLicense) {
return fullLicense.key.contains(this.key);
}
} | 4,664 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Layout.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.io.File;
public class Layout {
private final File localRootDirectory;
private final File output;
private final File repository;
private final File contentRootDirectory;
public Layout(final Platform platform, final Configuration configuration) {
super();
this.localRootDirectory =
new File(configuration.getRootDirectoryForLocalOutput());
final FileSystem fileSystem = platform.getFileSystem();
fileSystem.mkdirs(this.localRootDirectory);
this.repository = new File(this.localRootDirectory, "repo");
this.contentRootDirectory =
new File(this.localRootDirectory, "content");
this.output = this.localRootDirectory;
fileSystem.mkdirs(this.repository);
fileSystem.mkdirs(this.contentRootDirectory);
}
public File getLocalRootDirectory() {
return this.localRootDirectory;
}
public File getOutputDirectory() {
return this.output;
}
public File getRepositoryDirectory() {
return this.repository;
}
public File getContentRootDirectory() {
return this.contentRootDirectory;
}
}
| 4,665 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Configuration.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.io.File;
import java.net.URI;
import java.net.URISyntaxException;
public class Configuration {
private static final String DEFAULT_FILE_REPOSITORY_PATH_NAME_FILTER =
"org/apache/openejb";
private static final String SYSTEM_PROPERTY_NAME_FOR_FILE_REPOSITORY_PATH_NAME_FILTER =
"filter";
private static final int ARGUMENT_INDEX_FOR_LOCAL_ROOT_DIRECTORY = 1;
private static final int ARGUMENT_INDEX_FOR_URI_CONFIGURATION = 0;
private static final int ARGUMENT_LENGTH_FOR_URI_CONFIGURATION_ONLY =
ARGUMENT_INDEX_FOR_URI_CONFIGURATION + 1;
private static URI toURI(final String arg) throws URISyntaxException {
final URI uri = new URI(arg);
if (arg.startsWith("file:")) {
return new File(uri).getAbsoluteFile().toURI();
}
return uri;
}
private final URI stagingRepositoryURI;
private final String rootDirectoryForLocalOutput;
private final String fileRepositoryPathNameFilter;
public Configuration(final String... args) throws URISyntaxException {
this.stagingRepositoryURI = toURI(args[ARGUMENT_INDEX_FOR_URI_CONFIGURATION]);
this.rootDirectoryForLocalOutput = rootDirectoryForLocalOutput(args);
this.fileRepositoryPathNameFilter =
System.getProperty(
SYSTEM_PROPERTY_NAME_FOR_FILE_REPOSITORY_PATH_NAME_FILTER,
DEFAULT_FILE_REPOSITORY_PATH_NAME_FILTER);
}
public String getFileRepositoryPathNameFilter() {
return this.fileRepositoryPathNameFilter;
}
public URI getStagingRepositoryURI() {
return this.stagingRepositoryURI;
}
public String getRootDirectoryForLocalOutput() {
return this.rootDirectoryForLocalOutput;
}
private String rootDirectoryForLocalOutput(final String... args) {
final String rootDirectoryForLocal;
if (args.length > ARGUMENT_LENGTH_FOR_URI_CONFIGURATION_ONLY) {
rootDirectoryForLocal =
args[ARGUMENT_INDEX_FOR_LOCAL_ROOT_DIRECTORY];
} else {
rootDirectoryForLocal = new File(this.stagingRepositoryURI.getPath()).getName();
}
return rootDirectoryForLocal;
}
}
| 4,666 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Licenses.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
public class Licenses {
private final IOSystem ioSystem;
private final Map<String, String> licenses;
public Licenses(final Map<String, String> licenses, final Platform platform) {
super();
this.ioSystem = platform.getIoSystem();
this.licenses = Collections.unmodifiableMap(licenses);
}
public License from(final File document) throws IOException {
return license(this.ioSystem.slurp(document));
}
private License license(final String text) {
final String key = toKey(text);
return new License(key, normalize(text));
}
private String toKey(final String text) {
return text.replaceAll("[ \\n\\t\\r]+", "").toLowerCase().intern();
}
private String normalize(String text) {
for (final Map.Entry<String, String> license : this.licenses.entrySet()) {
text =
text.replace(
license.getValue(),
String.format("---[%s - full text]---\n\n",
license.getKey()));
}
text = text.intern();
return text;
}
}
| 4,667 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/IOSystem.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.creadur.tentacles;
import org.apache.logging.log4j.*;
import java.io.*;
import java.net.URL;
import java.util.zip.ZipInputStream;
public class IOSystem {
private static final Logger LOG = LogManager.getLogger(IOSystem.class);
public String slurp(final File file) throws IOException {
final ByteArrayOutputStream out = new ByteArrayOutputStream();
copy(file, out);
return new String(out.toByteArray());
}
public String slurp(final URL url) throws IOException {
final ByteArrayOutputStream out = new ByteArrayOutputStream();
copy(url.openStream(), out);
return new String(out.toByteArray());
}
public void writeString(final File file, final String string) throws IOException {
final FileWriter out = new FileWriter(file);
try {
final BufferedWriter bufferedWriter = new BufferedWriter(out);
try {
bufferedWriter.write(string);
bufferedWriter.newLine();
} finally {
close(bufferedWriter);
}
} finally {
close(out);
}
}
private void copy(final File from, final OutputStream to) throws IOException {
final InputStream read = read(from);
try {
copy(read, to);
} finally {
close(read);
}
}
public void copy(final InputStream from, final File to) throws IOException {
final OutputStream write = write(to);
try {
copy(from, write);
} finally {
close(write);
}
}
private void copy(final InputStream from, final OutputStream to) throws IOException {
final byte[] buffer = new byte[1024];
int length = 0;
while ((length = from.read(buffer)) != -1) {
to.write(buffer, 0, length);
}
to.flush();
}
public void copy(final byte[] from, final File to) throws IOException {
copy(new ByteArrayInputStream(from), to);
}
public ZipInputStream unzip(final File file) throws IOException {
final InputStream read = read(file);
return new ZipInputStream(read);
}
public void close(final Closeable closeable) throws IOException {
if (closeable == null) {
return;
}
try {
if (closeable instanceof Flushable) {
((Flushable) closeable).flush();
}
} catch (final IOException e) {
LOG.trace("Error when trying to flush before closing " + closeable, e);
}
try {
closeable.close();
} catch (final IOException e) {
LOG.trace("Error when trying to close " + closeable, e);
}
}
public OutputStream write(final File destination) throws FileNotFoundException {
final OutputStream out = new FileOutputStream(destination);
return new BufferedOutputStream(out, 32768);
}
public InputStream read(final File source) throws FileNotFoundException {
final InputStream in = new FileInputStream(source);
return new BufferedInputStream(in, 32768);
}
public byte[] read(final InputStream in) throws IOException {
final ByteArrayOutputStream out = new ByteArrayOutputStream();
copy(in, out);
out.close();
return out.toByteArray();
}
}
| 4,668 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Templates.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.creadur.tentacles;
import java.util.Properties;
import org.apache.velocity.app.VelocityEngine;
import org.apache.velocity.runtime.log.CommonsLogLogChute;
public final class Templates {
private final IOSystem ioSystem;
private final VelocityEngine engine;
private final TentaclesResources tentaclesResources;
public Templates(final Platform platform) {
this.ioSystem = platform.getIoSystem();
this.tentaclesResources = platform.getTentaclesResources();
final Properties properties = new Properties();
properties.setProperty("file.resource.loader.cache", "true");
properties.setProperty("resource.loader", "file, class");
properties.setProperty("class.resource.loader.description",
"Velocity Classpath Resource Loader");
properties
.setProperty("class.resource.loader.class",
"org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader");
properties.setProperty("runtime.log.logsystem.class",
CommonsLogLogChute.class.getName());
properties.setProperty("runtime.log.logsystem.commons.logging.name",
Templates.class.getName());
this.engine = new VelocityEngine();
this.engine.init(properties);
}
public TemplateBuilder template(final String name) {
return new TemplateBuilder(name, this.ioSystem, this.engine,
this.tentaclesResources);
}
}
| 4,669 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/NexusClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.creadur.tentacles;
import org.apache.http.Header;
import org.apache.http.HttpHeaders;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.logging.log4j.*;
import org.codehaus.swizzle.stream.StreamLexer;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.LinkedHashSet;
import java.util.Set;
public class NexusClient {
private static final Logger log = LogManager.getLogger(NexusClient.class);
private static final String SLASH = "/";
private static final String ONE_UP = "../";
private static final String USER_AGENT_CONTENTS = "Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101206 Ubuntu/10.10 (maverick) Firefox/3.6.13";
private final CloseableHttpClient client;
private final FileSystem fileSystem;
private final IOSystem ioSystem;
private final int retries;
public NexusClient(final Platform platform) {
System.setProperty("http.keepAlive", "false");
System.setProperty("http.maxConnections", "50");
this.retries = Integer.parseInt(System.getProperty("NexusClient.retries", "5"));
this.client = HttpClientBuilder.create().disableContentCompression()
.build();
this.fileSystem = platform.getFileSystem();
this.ioSystem = platform.getIoSystem();
}
public File download(final URI uri, final File file) throws IOException {
if (file.exists()) {
final long length = getContentLength(uri);
if (file.length() == length) {
log.info("Exists " + uri);
return file;
} else {
log.info("Incomplete " + uri);
}
}
log.info("Download " + uri);
final CloseableHttpResponse response = get(uri);
InputStream content = null;
try {
content = response.getEntity().getContent();
this.fileSystem.mkparent(file);
this.ioSystem.copy(content, file);
} finally {
if (content != null) {
content.close();
}
response.close();
}
return file;
}
private Long getContentLength(final URI uri) throws IOException {
final CloseableHttpResponse head = head(uri);
final Header[] headers = head.getHeaders(HttpHeaders.CONTENT_LENGTH);
if (headers != null && headers.length >= 1) {
return Long.valueOf(headers[0].getValue());
}
head.close();
return (long) -1;
}
private CloseableHttpResponse get(final URI uri) throws IOException {
return get(new HttpGet(uri), this.retries);
}
private CloseableHttpResponse head(final URI uri) throws IOException {
return get(new HttpHead(uri), this.retries);
}
private CloseableHttpResponse get(final HttpUriRequest request, int tries) throws IOException {
try {
request.setHeader(HttpHeaders.USER_AGENT, USER_AGENT_CONTENTS);
return this.client.execute(request);
} catch (final IOException e) {
if (tries > 0) {
try {
Thread.sleep(250);
} catch (final InterruptedException ie) {
Thread.interrupted();
throw new IOException("Interrupted", ie);
}
return get(request, tries--);
} else {
throw e;
}
}
}
public Set<URI> crawl(final URI index) throws IOException {
log.info("Crawl " + index);
final Set<URI> resources = new LinkedHashSet<URI>();
final CloseableHttpResponse response = get(index);
final InputStream content = response.getEntity().getContent();
final StreamLexer lexer = new StreamLexer(content);
final Set<URI> crawl = new LinkedHashSet<URI>();
// <a
// href="https://repository.apache.org/content/repositories/orgapacheopenejb-094/archetype-catalog.xml">archetype-catalog.xml</a>
while (lexer.readAndMark("<a ", "/a>")) {
try {
final String link = lexer.peek("href=\"", "\"");
final String name = lexer.peek(">", "<");
final URI uri = index.resolve(link);
if (name.equals(ONE_UP)) {
continue;
}
if (link.equals(ONE_UP)) {
continue;
}
if (name.endsWith(SLASH)) {
crawl.add(uri);
continue;
}
resources.add(uri);
} finally {
lexer.unmark();
}
}
content.close();
response.close();
for (final URI uri : crawl) {
resources.addAll(crawl(uri));
}
return resources;
}
} | 4,670 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Platform.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
public class Platform {
public static Platform aPlatform() {
final FileSystem fileSystem = new FileSystem();
final IOSystem ioSystem = new IOSystem();
final TentaclesResources tentaclesResources =
new TentaclesResources(ioSystem);
return new Platform(tentaclesResources, fileSystem, ioSystem);
}
private final TentaclesResources tentaclesResources;
private final FileSystem fileSystem;
private final IOSystem ioSystem;
public Platform(final TentaclesResources tentaclesResources,
final FileSystem fileSystem, final IOSystem ioSystem) {
super();
this.tentaclesResources = tentaclesResources;
this.fileSystem = fileSystem;
this.ioSystem = ioSystem;
}
public TentaclesResources getTentaclesResources() {
return this.tentaclesResources;
}
public FileSystem getFileSystem() {
return this.fileSystem;
}
public IOSystem getIoSystem() {
return this.ioSystem;
}
}
| 4,671 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Deauthorize.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.creadur.tentacles;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import org.codehaus.swizzle.stream.DelimitedTokenReplacementInputStream;
import org.codehaus.swizzle.stream.ExcludeFilterInputStream;
import org.codehaus.swizzle.stream.StringTokenHandler;
/**
* Little utility that will yank the author comments from java files.
*
* If the resulting comment block is effectively empty, it will be yanked too.
*/
public class Deauthorize {
/**
* All input must be valid directories.
*
* Invalid input is logged to System.err and skipped
*
* @param args
* a list of directories to scan and fix
* @throws Exception
* in case of errors.
*/
public static void main(final String[] args) throws Exception {
if (args.length == 0) {
throw new IllegalArgumentException(
"At least one directory must be specified");
}
final List<File> dirs = new ArrayList<File>();
// Check the input args upfront
for (final String arg : args) {
final File dir = new File(arg);
if (not(dir.exists(), "Does not exist: %s", arg)) {
continue;
}
if (not(dir.isDirectory(), "Not a directory: %s", arg)) {
continue;
}
dirs.add(dir);
}
// Exit if we got bad input
if (dirs.size() != args.length) {
System.exit(1);
}
// Go!
for (final File dir : dirs) {
deauthorize(dir);
}
}
/**
* Iterate over all the java files in the given directory
*
* Read in the file so we can guess the line ending -- if we didn't need to
* do that we could just stream. Run the content through Swizzle Stream and
* filter out any author tags as well as any comment blocks that wind up (or
* already were) empty as a result.
*
* If that had any effect on the contents of the file, write it back out.
*
* Should skip any files that are not readable or writable.
*
* Will log an error on System.err for any files that were updated and were
* not writable. Files that are not writable and don't need updating are
* simply ignored.
*
* @param dir
* @throws IOException
*/
private static void deauthorize(final File dir) throws IOException {
deauthorize(dir, new IOSystem());
}
private static void deauthorize(final File dir, final IOSystem io)
throws IOException {
for (final File file : new FileSystem().collect(dir, ".*\\.java")) {
if (not(file.canRead(), "File not readable: %s",
file.getAbsolutePath())) {
continue;
}
final String text = io.slurp(file);
// You really can't trust text to be in the native line ending
final String eol = text.contains("\r\n") ? "\r\n" : "\n";
final String startComment = eol + "/*";
final String endComment = "*/" + eol;
final InputStream baseIn = new ByteArrayInputStream(text.getBytes());
// Yank author tags
InputStream in = new ExcludeFilterInputStream(baseIn, " * @author",
eol);
// Clean "empty" comments
in = new DelimitedTokenReplacementInputStream(in, startComment,
endComment, new StringTokenHandler() {
@Override
public String handleToken(final String commentBlock)
throws IOException {
// Yank if empty
if (commentBlock.replaceAll("[\\s*]", "").length() == 0) {
return eol;
}
// Keep otherwise
return startComment + commentBlock + endComment;
}
});
final byte[] content = io.read(in);
if (content.length != file.length()) {
if (not(file.canWrite(), "File not writable: %s",
file.getAbsolutePath())) {
continue;
}
io.copy(content, file);
}
}
}
private static boolean not(boolean b, final String message,
final Object... details) {
b = !b;
if (b) {
System.err.printf(message, details);
System.err.println();
}
return b;
}
}
| 4,672 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Notice.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.io.File;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class Notice {
private final String text;
private final String key;
private final Set<Archive> archives = new HashSet<Archive>();
private final List<File> locations = new ArrayList<File>();
public Notice(final String text) {
this.text = text.intern();
this.key = text.replaceAll("[ \\n\\t\\r]+", "").toLowerCase().intern();
}
public String getText() {
return this.text;
}
public String getKey() {
return this.key;
}
public Set<Archive> getArchives() {
return this.archives;
}
public List<File> getLocations() {
return this.locations;
}
public Set<URI> locations(final Archive archive) {
final URI contents = archive.contentsURI();
final Set<URI> locations = new HashSet<URI>();
for (final File file : this.locations) {
final URI uri = file.toURI();
final URI relativize = contents.relativize(uri);
if (!relativize.equals(uri)) {
locations.add(relativize);
}
}
return locations;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final Notice notice = (Notice) o;
if (!this.key.equals(notice.key)) {
return false;
}
return true;
}
@Override
public int hashCode() {
return this.key.hashCode();
}
public boolean implies(final Notice fullLicense) {
return fullLicense.key.contains(this.key);
}
} | 4,673 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/ArchivesJson.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.creadur.tentacles;
public class ArchivesJson {
}
| 4,674 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/TemplateBuilder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.io.StringWriter;
import java.io.Writer;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.velocity.VelocityContext;
import org.apache.velocity.app.VelocityEngine;
public class TemplateBuilder {
private static final String LOG_TAG_NAME = TemplateBuilder.class.getName();
private final TentaclesResources tentaclesResources;
private final VelocityEngine engine;
private final IOSystem ioSystem;
private final String templateName;
private final Map<String, Object> templateContextMap =
new ConcurrentHashMap<String, Object>();
public TemplateBuilder(final String template, final IOSystem ioSystem,
final VelocityEngine engine,
final TentaclesResources tentaclesResources) {
this.templateName = template;
this.ioSystem = ioSystem;
this.engine = engine;
this.tentaclesResources = tentaclesResources;
}
public TemplateBuilder add(final String key, final Object value) {
this.templateContextMap.put(key, value);
return this;
}
public TemplateBuilder addAll(final Map<String, Object> map) {
this.templateContextMap.putAll(map);
return this;
}
public String apply() {
final StringWriter writer = new StringWriter();
evaluate(writer);
return writer.toString();
}
public File write(final File file) throws IOException {
this.ioSystem.writeString(file, apply());
return file;
}
private void evaluate(final Writer writer) {
try {
final Reader templateReader =
this.tentaclesResources.read(this.templateName);
final VelocityContext context =
new VelocityContext(this.templateContextMap);
this.engine.evaluate(context, writer, LOG_TAG_NAME, templateReader);
} catch (final IOException ioe) {
throw new RuntimeException("can't apply template "
+ this.templateName, ioe);
}
}
} | 4,675 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Main.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.creadur.tentacles;
import static org.apache.creadur.tentacles.LicenseType.loadLicensesFrom;
import static org.apache.creadur.tentacles.RepositoryType.HTTP;
import static org.apache.creadur.tentacles.RepositoryType.LOCAL_FILE_SYSTEM;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.logging.log4j.*;
public class Main {
static {
/* TENTACLES-12: disabled root logger configuration
final Logger root = LogManager.getRootLogger();
root.addAppender(new ConsoleAppender(new PatternLayout(
PatternLayout.TTCC_CONVERSION_PATTERN)));
root.setLevel(Level.INFO);
*/
}
private static final Logger log = LogManager.getLogger(Main.class);
private static final String CRAWL_PATTERN = ".*\\.(jar|zip|war|ear|rar|tar.gz)";
private final Reports reports;
private final Licenses licenses;
private final Layout layout;
private final Platform platform;
private final Configuration configuration;
private final FileSystem fileSystem;
private final IOSystem ioSystem;
private final TentaclesResources tentaclesResources;
private final Templates templates;
public Main(final String... args) throws Exception {
this(new Configuration(args), Platform.aPlatform());
}
public Main(final Configuration configuration, final Platform platform)
throws Exception {
this(configuration, platform, new Templates(platform), new Layout(
platform, configuration));
}
public Main(final Configuration configuration, final Platform platform,
final Templates templates, final Layout layout) throws Exception {
this.platform = platform;
this.configuration = configuration;
this.layout = layout;
this.fileSystem = platform.getFileSystem();
this.ioSystem = platform.getIoSystem();
this.tentaclesResources = platform.getTentaclesResources();
this.templates = templates;
this.reports = new Reports();
log.info("Remote repository: "
+ this.configuration.getStagingRepositoryURI());
log.info("Local root directory: " + this.layout.getLocalRootDirectory());
this.tentaclesResources.copyTo("legal/style.css",
new File(this.layout.getOutputDirectory(), "style.css"));
this.licenses = loadLicensesFrom(platform);
}
public static void main(final String[] args) throws Exception {
log.info("Launching Apache Tentacles ...");
if(args == null || args.length < 1) {
log.error("Error: Input parameter missing - you did not specify any component to run Apache Tentacles on.");
log.error("Please launch Apache Tentacles with an URI to work on such as 'https://repository.apache.org/content/repositories/orgapachecreadur-1000/'.");
} else {
new Main(args).main();
}
}
private void main() throws Exception {
unpackContents(mirrorRepositoryFrom(this.configuration));
reportOn(archivesIn(this.layout.getRepositoryDirectory()));
}
private List<Archive> archivesIn(final File repository) {
final List<File> jars = this.fileSystem.documentsFrom(repository);
final List<Archive> archives = new ArrayList<Archive>();
for (final File file : jars) {
final Archive archive =
new Archive(file, this.fileSystem, this.layout);
archives.add(archive);
}
return archives;
}
private void reportOn(final List<Archive> archives) throws IOException {
this.templates
.template("legal/archives.vm")
.add("archives", archives)
.add("reports", this.reports)
.write(new File(this.layout.getOutputDirectory(),
"archives.html"));
reportLicenses(archives);
reportNotices(archives);
reportDeclaredLicenses(archives);
reportDeclaredNotices(archives);
}
private void reportLicenses(final List<Archive> archives)
throws IOException {
initLicenses(archives);
this.templates
.template("legal/licenses.vm")
.add("licenses", getLicenses(archives))
.add("reports", this.reports)
.write(new File(this.layout.getOutputDirectory(),
"licenses.html"));
}
private void initLicenses(final List<Archive> archives) throws IOException {
final Map<License, License> licenses = new HashMap<License, License>();
for (final Archive archive : archives) {
final List<File> files =
this.fileSystem.licensesFrom(archive.contentsDirectory());
for (final File file : files) {
final License license = this.licenses.from(file);
License existing = licenses.get(license);
if (existing == null) {
licenses.put(license, license);
existing = license;
}
existing.getLocations().add(file);
existing.getArchives().add(archive);
archive.getLicenses().add(existing);
}
}
}
private Collection<License> getLicenses(final List<Archive> archives) {
final Set<License> licenses = new LinkedHashSet<License>();
for (final Archive archive : archives) {
licenses.addAll(archive.getLicenses());
}
return licenses;
}
private void reportDeclaredLicenses(final List<Archive> archives)
throws IOException {
for (final Archive archive : archives) {
classifyLicenses(archive);
}
for (final Archive archive : archives) {
this.templates
.template("legal/archive-licenses.vm")
.add("archive", archive)
.add("reports", this.reports)
.write(new File(this.layout.getOutputDirectory(),
this.reports.licenses(archive)));
}
}
private void classifyLicenses(final Archive archive) throws IOException {
final Set<License> undeclared =
new HashSet<License>(archive.getLicenses());
final File contents = archive.contentsDirectory();
final List<File> files = this.fileSystem.licensesDeclaredIn(contents);
for (final File file : files) {
undeclared.remove(this.licenses.from(file));
}
archive.getOtherLicenses().addAll(undeclared);
final Set<License> declared =
new HashSet<License>(archive.getLicenses());
declared.removeAll(undeclared);
archive.getDeclaredLicenses().addAll(declared);
for (final License license : undeclared) {
for (final License declare : declared) {
if (license.implies(declare)) {
archive.getOtherLicenses().remove(license);
}
}
}
}
private void reportDeclaredNotices(final List<Archive> archives)
throws IOException {
for (final Archive archive : archives) {
final Set<Notice> undeclared =
new HashSet<Notice>(archive.getNotices());
final File contents = archive.contentsDirectory();
final List<File> files =
this.fileSystem.noticesDeclaredIn(contents);
for (final File file : files) {
final Notice notice = new Notice(this.ioSystem.slurp(file));
undeclared.remove(notice);
}
archive.getOtherNotices().addAll(undeclared);
final Set<Notice> declared =
new HashSet<Notice>(archive.getNotices());
declared.removeAll(undeclared);
archive.getDeclaredNotices().addAll(declared);
for (final Notice notice : undeclared) {
for (final Notice declare : declared) {
if (notice.implies(declare)) {
archive.getOtherLicenses().remove(notice);
}
}
}
this.templates
.template("legal/archive-notices.vm")
.add("archive", archive)
.add("reports", this.reports)
.write(new File(this.layout.getOutputDirectory(),
this.reports.notices(archive)));
}
}
private void reportNotices(final List<Archive> archives) throws IOException {
final Map<Notice, Notice> notices = new HashMap<Notice, Notice>();
for (final Archive archive : archives) {
final List<File> noticeDocuments =
this.fileSystem.noticesOnly(archive.contentsDirectory());
for (final File file : noticeDocuments) {
final Notice notice = new Notice(this.ioSystem.slurp(file));
Notice existing = notices.get(notice);
if (existing == null) {
notices.put(notice, notice);
existing = notice;
}
existing.getLocations().add(file);
existing.getArchives().add(archive);
archive.getNotices().add(existing);
}
}
this.templates
.template("legal/notices.vm")
.add("notices", notices.values())
.add("reports", this.reports)
.write(new File(this.layout.getOutputDirectory(),
"notices.html"));
}
private void unpackContents(final Set<File> files) throws IOException {
for (final File file : files) {
unpack(file);
}
}
private Set<File> mirrorRepositoryFrom(final Configuration configuration)
throws IOException {
final Set<File> files = new HashSet<File>();
if (HTTP.isRepositoryFor(configuration)) {
final NexusClient client = new NexusClient(this.platform);
final Set<URI> resources =
client.crawl(configuration.getStagingRepositoryURI());
for (final URI uri : resources) {
if (!uri.getPath().matches(CRAWL_PATTERN)) {
continue;
}
files.add(client.download(uri, mirroredFrom(uri)));
}
} else if (LOCAL_FILE_SYSTEM.isRepositoryFor(configuration)) {
final File file = new File(configuration.getStagingRepositoryURI());
final List<File> collect =
this.platform.getFileSystem().archivesInPath(file,
configuration.getFileRepositoryPathNameFilter());
for (final File f : collect) {
files.add(copyToMirror(f));
}
}
return files;
}
private void unpack(final File archive) throws IOException {
log.info("Unpack " + archive);
try {
final ZipInputStream zip = this.ioSystem.unzip(archive);
final File contents =
new Archive(archive, this.fileSystem, this.layout)
.contentsDirectory();
try {
ZipEntry entry = null;
while ((entry = zip.getNextEntry()) != null) {
if (entry.isDirectory()) {
continue;
}
final String path = entry.getName();
final File fileEntry = new File(contents, path);
this.fileSystem.mkparent(fileEntry);
// Open the output file
this.ioSystem.copy(zip, fileEntry);
if (fileEntry.getName().endsWith(".jar")) {
unpack(fileEntry);
}
}
} finally {
this.ioSystem.close(zip);
}
} catch (final IOException e) {
log.error("Not a zip " + archive);
}
}
private File copyToMirror(final File src) throws IOException {
final URI uri = src.toURI();
final File file = mirroredFrom(uri);
log.info("Copy " + uri);
this.fileSystem.mkparent(file);
this.ioSystem.copy(this.ioSystem.read(src), file);
return file;
}
private File mirroredFrom(final URI uri) {
final String name =
uri.toString()
.replace(
this.configuration.getStagingRepositoryURI()
.toString(), "").replaceFirst("^/", "");
return new File(this.layout.getRepositoryDirectory(), name);
}
}
| 4,676 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/LicenseType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public enum LicenseType {
ASL_2_0("asl-2.0"), CPL_1_0("cpl-1.0"), CDDL_1_0("cddl-1.0");
public static Licenses loadLicensesFrom(final Platform platform)
throws IOException {
final Map<String, String> licenses =
new ConcurrentHashMap<String, String>();
for (final LicenseType type : LicenseType.values()) {
type.putTextInto(licenses, platform.getTentaclesResources());
}
return new Licenses(licenses, platform);
}
private final String resourceName;
private final String resourcePath;
private LicenseType(final String resourceName) {
this.resourceName = resourceName;
this.resourcePath = "licenses/" + getResourceName() + ".txt";
}
public String getResourceName() {
return this.resourceName;
}
public String getResourcePath() {
return this.resourcePath;
}
public String readText(final TentaclesResources tentaclesResources)
throws IOException {
return tentaclesResources.readText(getResourcePath()).trim();
}
public void putTextInto(final Map<String, String> licenseTextByName,
final TentaclesResources tentaclesResources) throws IOException {
licenseTextByName.put(getResourceName(), readText(tentaclesResources));
}
}
| 4,677 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/RepositoryType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.net.URI;
public enum RepositoryType {
HTTP("http"), LOCAL_FILE_SYSTEM("file:");
private final String prefix;
private RepositoryType(final String prefix) {
this.prefix = prefix;
}
public boolean isRepositoryFor(final Configuration configuration) {
return isTypeOf(configuration.getStagingRepositoryURI());
}
public boolean isTypeOf(final URI uri) {
return uri.toString().startsWith(this.prefix);
}
}
| 4,678 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Archive.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
import java.io.File;
import java.net.URI;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class Archive {
private final Layout layout;
private final FileSystem fileSystem;
private final URI uri;
private final File file;
private final Map<URI, URI> map;
private final Set<License> licenses = new HashSet<License>();
private final Set<Notice> notices = new HashSet<Notice>();
private final Set<License> declaredLicenses = new HashSet<License>();
private final Set<Notice> declaredNotices = new HashSet<Notice>();
private final Set<License> otherLicenses = new HashSet<License>();
private final Set<Notice> otherNotices = new HashSet<Notice>();
private Map<URI, URI> others;
public Archive(final File file, final FileSystem fileSystem,
final Layout layout) {
this.fileSystem = fileSystem;
this.layout = layout;
this.uri =
layout.getRepositoryDirectory().toURI()
.relativize(file.toURI());
this.file = file;
this.map = map();
}
public Set<License> getDeclaredLicenses() {
return this.declaredLicenses;
}
public Set<Notice> getDeclaredNotices() {
return this.declaredNotices;
}
public Set<License> getOtherLicenses() {
return this.otherLicenses;
}
public Set<Notice> getOtherNotices() {
return this.otherNotices;
}
public Set<License> getLicenses() {
return this.licenses;
}
public Set<Notice> getNotices() {
return this.notices;
}
public URI getUri() {
return this.uri;
}
public File getFile() {
return this.file;
}
public Map<URI, URI> getLegal() {
return this.map;
}
public Map<URI, URI> getOtherLegal() {
if (this.others == null) {
this.others = mapOther();
}
return this.others;
}
private Map<URI, URI> mapOther() {
final File jarContents = contentsDirectory();
final List<File> legal =
this.fileSystem.legalDocumentsUndeclaredIn(jarContents);
return buildMapFrom(jarContents, legal);
}
private Map<URI, URI> buildMapFrom(final File jarContents,
final List<File> legal) {
final Map<URI, URI> map = new LinkedHashMap<URI, URI>();
for (final File file : legal) {
final URI name = jarContents.toURI().relativize(file.toURI());
final URI link =
this.layout.getLocalRootDirectory().toURI()
.relativize(file.toURI());
map.put(name, link);
}
return map;
}
private Map<URI, URI> map() {
final File jarContents = contentsDirectory();
final List<File> legal =
this.fileSystem.legalDocumentsDeclaredIn(jarContents);
return buildMapFrom(jarContents, legal);
}
public File contentsDirectory() {
final File archiveDocument = getFile();
String path =
archiveDocument.getAbsolutePath().substring(
this.layout.getLocalRootDirectory().getAbsolutePath()
.length() + 1);
if (path.startsWith("repo/")) {
path = path.substring("repo/".length());
}
if (path.startsWith("content/")) {
path = path.substring("content/".length());
}
final File contents =
new File(this.layout.getContentRootDirectory(), path
+ ".contents");
this.fileSystem.mkdirs(contents);
return contents;
}
public URI contentsURI() {
return contentsDirectory().toURI();
}
} | 4,679 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/Reports.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles;
public class Reports {
public String licenses(final Archive archive) {
return archive.getUri().toString().replace('/', '.')
+ ".licenses.html";
}
public String notices(final Archive archive) {
return archive.getUri().toString().replace('/', '.')
+ ".notices.html";
}
} | 4,680 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/model/Archives.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.creadur.tentacles.model;
import javax.xml.bind.annotation.XmlRootElement;
import java.util.ArrayList;
import java.util.List;
/**
* To be marshalled to json as an archives.json file
* Will contain a summary list of the artifacts intended for tabular display
*/
@XmlRootElement
public class Archives {
private final List<Item> archives = new ArrayList<Item>();
/**
* Required for JAXB
*/
public Archives() {
}
public List<Item> getArchives() {
return archives;
}
public void add(Item item) {
archives.add(item);
}
public Item addItem() {
final Item item = new Item();
archives.add(item);
return item;
}
public static class Item {
/**
* Required for JAXB
*/
public Item() {
}
/**
* The path in the repo, minus the repo portion itself (relative)
*/
private String path;
/**
* Just the file name with no path
*/
private String name;
/**
* Jar, war, zip
*/
private String type;
/**
* Count of how many jars are in this archive
*/
private int jars;
/**
* Unique ID of the license file
*/
private String license;
/**
* Unique ID of the notice file
*/
private String notice;
public String getPath() {
return path;
}
public void setPath(String path) {
this.path = path;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public int getJars() {
return jars;
}
public void setJars(int jars) {
this.jars = jars;
}
public String getLicense() {
return license;
}
public void setLicense(String license) {
this.license = license;
}
public String getNotice() {
return notice;
}
public void setNotice(String notice) {
this.notice = notice;
}
public Item path(String path) {
this.path = path;
return this;
}
public Item name(String name) {
this.name = name;
return this;
}
public Item type(String type) {
this.type = type;
return this;
}
public Item jars(int jars) {
this.jars = jars;
return this;
}
public Item license(String license) {
this.license = license;
return this;
}
public Item notice(String notice) {
this.notice = notice;
return this;
}
}
}
| 4,681 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/FilesOnlyFilter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
import java.io.File;
import java.io.FileFilter;
final class FilesOnlyFilter implements FileFilter {
@Override
public boolean accept(final File pathname) {
return pathname.isFile();
}
} | 4,682 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/DeclaredFilter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
import java.io.File;
import java.io.FileFilter;
final class DeclaredFilter implements FileFilter {
private final File file;
DeclaredFilter(final File file) {
this.file = file;
}
@Override
public boolean accept(File file) {
while (file != null) {
if (file.equals(this.file)) {
break;
}
if (file.isDirectory() && file.getName().endsWith(".contents")) {
return false;
}
file = file.getParentFile();
}
return true;
}
} | 4,683 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/AndFilter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
import java.io.File;
import java.io.FileFilter;
import java.util.ArrayList;
import java.util.List;
final class AndFilter implements FileFilter {
List<FileFilter> filters = new ArrayList<FileFilter>();
AndFilter(final FileFilter... filters) {
for (final FileFilter filter : filters) {
this.filters.add(filter);
}
}
@Override
public boolean accept(final File file) {
for (final FileFilter filter : this.filters) {
if (!filter.accept(file)) {
return false;
}
}
return true;
}
} | 4,684 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/Filters.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
import java.io.File;
import java.io.FileFilter;
public class Filters {
private final FilesOnlyFilter filesOnly;
private final LicenseFilter licensesOnly;
private final NoticeFilter noticesOnly;
private final LegalFilter legalOnly;
public Filters() {
this.filesOnly = new FilesOnlyFilter();
this.licensesOnly = new LicenseFilter();
this.noticesOnly = new NoticeFilter();
this.legalOnly = new LegalFilter();
}
public FileFilter filesOnly() {
return this.filesOnly;
}
public FileFilter licensesOnly() {
return this.licensesOnly;
}
public FileFilter noticesOnly() {
return this.noticesOnly;
}
public FileFilter legalOnly() {
return this.legalOnly;
}
public FileFilter licensesDeclaredIn(final File contents) {
return new AndFilter(new DeclaredFilter(contents), new LicenseFilter());
}
public FileFilter noticesDeclaredIn(final File contents) {
return new AndFilter(new DeclaredFilter(contents), new NoticeFilter());
}
public FileFilter legalDocumentsUndeclaredIn(final File contents) {
return new AndFilter(new NotFilter(new DeclaredFilter(contents)),
new LegalFilter());
}
public FileFilter legalDocumentsDeclaredIn(final File contents) {
return new AndFilter(new DeclaredFilter(contents), new LegalFilter());
}
public FileFilter archivesInPath(final String repositoryPathNameFilter) {
return new IsArchiveInPathFilter(repositoryPathNameFilter);
}
}
| 4,685 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/ListOfFilesFilter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
import java.io.File;
import java.io.FileFilter;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public class ListOfFilesFilter implements FileFilter {
private List<String> listOfFiles;
ListOfFilesFilter(String... files) {
listOfFiles = Arrays.asList(files);
}
@Override
public boolean accept(File pathname) {
if (pathname.isDirectory()) {
return false;
}
return listOfFiles.contains(pathname.getName().toLowerCase());
}
/**
* @return an unmodifiable list of the files to filter for.
*/
public List<String> getListOfFiles() {
return Collections.unmodifiableList(listOfFiles);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Filtering for one of the following file names [");
builder.append(listOfFiles);
builder.append("]");
return builder.toString();
}
}
| 4,686 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/LegalFilter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
import java.io.File;
import java.io.FileFilter;
final class LegalFilter implements FileFilter {
private static final NoticeFilter notice = new NoticeFilter();
private static final LicenseFilter license = new LicenseFilter();
@Override
public boolean accept(final File pathname) {
return notice.accept(pathname) || license.accept(pathname);
}
} | 4,687 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/NotFilter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
import java.io.File;
import java.io.FileFilter;
final class NotFilter implements FileFilter {
private final FileFilter filter;
NotFilter(final FileFilter filter) {
this.filter = filter;
}
@Override
public boolean accept(final File pathname) {
return !this.filter.accept(pathname);
}
} | 4,688 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/NoticeFilter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
final class NoticeFilter extends ListOfFilesFilter {
public NoticeFilter() {
super("notice", "notice.txt");
}
} | 4,689 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/LicenseFilter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
final class LicenseFilter extends ListOfFilesFilter {
LicenseFilter() {
super("license", "license.txt", "licence", "licence.txt");
}
} | 4,690 |
0 | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles | Create_ds/creadur-tentacles/src/main/java/org/apache/creadur/tentacles/filter/IsArchiveInPathFilter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.creadur.tentacles.filter;
import java.io.File;
import java.io.FileFilter;
final class IsArchiveInPathFilter implements FileFilter {
private static final String MATCH_PATTERN = ".*\\.(jar|zip|war|ear|rar|tar.gz)";
private final String pathNameFilter;
IsArchiveInPathFilter(final String pathNameFilter) {
super();
this.pathNameFilter = pathNameFilter;
}
@Override
public boolean accept(final File pathname) {
final String path = pathname.getAbsolutePath();
return path.matches(this.pathNameFilter) && isValidArchive(path);
}
private boolean isValidArchive(final String path) {
return path.matches(MATCH_PATTERN);
}
}
| 4,691 |
0 | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect/utils/Account.java | package com.squareup.connect.utils;
import com.fasterxml.jackson.annotation.JsonProperty;
// Represent the account info
public class Account {
@JsonProperty("email")
public String email;
@JsonProperty("access_token")
public String accessToken;
@JsonProperty("location_id")
public String locationId;
@JsonProperty("application_id")
public String applicationId;
}
| 4,692 |
0 | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect/utils/APITest.java | package com.squareup.connect.utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Map;
import org.junit.BeforeClass;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
public class APITest {
protected static Map<String, Account> accounts;
@BeforeClass
public static void loadAccounts() throws IOException {
ObjectMapper mapper = new ObjectMapper();
byte[] jsonData = Files.readAllBytes(Paths.get("./travis-ci/accounts.json"));
accounts = mapper.readValue(jsonData, new TypeReference<Map<String, Account>>() { });
}
}
| 4,693 |
0 | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect/models/RefundTest.java | package com.squareup.connect.models;
import java.util.ArrayList;
import java.util.Arrays;
import org.junit.Test;
public class RefundTest {
/**
* additionalRecipientsTest
*
* Tests that the refund object has an AdditionalRecipients field.
*/
@Test
public void additionalRecipientsTest() {
AdditionalRecipient recipient = new AdditionalRecipient();
recipient.setLocationId("location");
recipient.setDescription("description");
Money money = new Money();
money.setAmount(1L);
money.setCurrency("USD");
recipient.setAmountMoney(money);
new Refund().setAdditionalRecipients(new ArrayList<>(
Arrays.asList(recipient)
));
}
}
| 4,694 |
0 | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect/models/CreateCheckoutRequestTest.java | package com.squareup.connect.models;
import java.util.ArrayList;
import java.util.Arrays;
import org.junit.Test;
public class CreateCheckoutRequestTest {
/**
* additionalRecipientsTest
*
* Tests that the CreateCheckoutRequest object has an AdditionalRecipients field.
*/
@Test
public void additionalRecipientsTest() {
ChargeRequestAdditionalRecipient recipient = new ChargeRequestAdditionalRecipient();
recipient.setLocationId("location");
recipient.setDescription("description");
Money money = new Money();
money.setAmount(1L);
money.setCurrency("USD");
recipient.setAmountMoney(money);
new CreateCheckoutRequest().setAdditionalRecipients(new ArrayList<>(
Arrays.asList(recipient)
));
}
}
| 4,695 |
0 | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect/api/V1ItemsApiTest.java | /*
* Square Connect API
* Client library for accessing the Square Connect APIs
*
* OpenAPI spec version: 2.0
* Contact: developers@squareup.com
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
package com.squareup.connect.api;
import com.squareup.connect.ApiException;
import com.squareup.connect.models.V1AdjustInventoryRequest;
import com.squareup.connect.models.V1InventoryEntry;
import com.squareup.connect.models.V1Item;
import com.squareup.connect.models.V1Category;
import com.squareup.connect.models.V1Discount;
import com.squareup.connect.models.V1Fee;
import com.squareup.connect.models.V1ModifierList;
import com.squareup.connect.models.V1ModifierOption;
import com.squareup.connect.models.V1Page;
import com.squareup.connect.models.V1Variation;
import com.squareup.connect.models.V1UpdateModifierListRequest;
import com.squareup.connect.models.V1PageCell;
import org.junit.Test;
import org.junit.Ignore;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* API tests for VItemsApi
*/
@Ignore
public class V1ItemsApiTest {
private final V1ItemsApi api = new V1ItemsApi();
/**
* Adjusts an item variation's current available inventory.
*
* Adjusts an item variation's current available inventory.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void adjustInventoryTest() throws ApiException {
String locationId = null;
String variationId = null;
V1AdjustInventoryRequest body = null;
V1InventoryEntry response = api.adjustInventory(locationId, variationId, body);
// TODO: test validations
}
/**
* Associates a fee with an item, meaning the fee is automatically applied to the item in Square Register.
*
* Associates a fee with an item, meaning the fee is automatically applied to the item in Square Register.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void applyFeeTest() throws ApiException {
String locationId = null;
String itemId = null;
String feeId = null;
V1Item response = api.applyFee(locationId, itemId, feeId);
// TODO: test validations
}
/**
* Associates a modifier list with an item, meaning modifier options from the list can be applied to the item.
*
* Associates a modifier list with an item, meaning modifier options from the list can be applied to the item.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void applyModifierListTest() throws ApiException {
String locationId = null;
String modifierListId = null;
String itemId = null;
V1Item response = api.applyModifierList(locationId, modifierListId, itemId);
// TODO: test validations
}
/**
* Creates an item category.
*
* Creates an item category.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createCategoryTest() throws ApiException {
String locationId = null;
V1Category body = null;
V1Category response = api.createCategory(locationId, body);
// TODO: test validations
}
/**
* Creates a discount.
*
* Creates a discount.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createDiscountTest() throws ApiException {
String locationId = null;
V1Discount body = null;
V1Discount response = api.createDiscount(locationId, body);
// TODO: test validations
}
/**
* Creates a fee (tax).
*
* Creates a fee (tax).
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createFeeTest() throws ApiException {
String locationId = null;
V1Fee body = null;
V1Fee response = api.createFee(locationId, body);
// TODO: test validations
}
/**
* Creates an item and at least one variation for it.
*
* Creates an item and at least one variation for it.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createItemTest() throws ApiException {
String locationId = null;
V1Item body = null;
V1Item response = api.createItem(locationId, body);
// TODO: test validations
}
/**
* Creates an item modifier list and at least one modifier option for it.
*
* Creates an item modifier list and at least one modifier option for it.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createModifierListTest() throws ApiException {
String locationId = null;
V1ModifierList body = null;
V1ModifierList response = api.createModifierList(locationId, body);
// TODO: test validations
}
/**
* Creates an item modifier option and adds it to a modifier list.
*
* Creates an item modifier option and adds it to a modifier list.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createModifierOptionTest() throws ApiException {
String locationId = null;
String modifierListId = null;
V1ModifierOption body = null;
V1ModifierOption response = api.createModifierOption(locationId, modifierListId, body);
// TODO: test validations
}
/**
* Creates a Favorites page in Square Register.
*
* Creates a Favorites page in Square Register.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createPageTest() throws ApiException {
String locationId = null;
V1Page body = null;
V1Page response = api.createPage(locationId, body);
// TODO: test validations
}
/**
* Creates an item variation for an existing item.
*
* Creates an item variation for an existing item.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createVariationTest() throws ApiException {
String locationId = null;
String itemId = null;
V1Variation body = null;
V1Variation response = api.createVariation(locationId, itemId, body);
// TODO: test validations
}
/**
* Deletes an existing item category.
*
* Deletes an existing item category.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deleteCategoryTest() throws ApiException {
String locationId = null;
String categoryId = null;
V1Category response = api.deleteCategory(locationId, categoryId);
// TODO: test validations
}
/**
* Deletes an existing discount.
*
* Deletes an existing discount.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deleteDiscountTest() throws ApiException {
String locationId = null;
String discountId = null;
V1Discount response = api.deleteDiscount(locationId, discountId);
// TODO: test validations
}
/**
* Deletes an existing fee (tax).
*
* Deletes an existing fee (tax).
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deleteFeeTest() throws ApiException {
String locationId = null;
String feeId = null;
V1Fee response = api.deleteFee(locationId, feeId);
// TODO: test validations
}
/**
* Deletes an existing item and all item variations associated with it.
*
* Deletes an existing item and all item variations associated with it.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deleteItemTest() throws ApiException {
String locationId = null;
String itemId = null;
V1Item response = api.deleteItem(locationId, itemId);
// TODO: test validations
}
/**
* Deletes an existing item modifier list and all modifier options associated with it.
*
* Deletes an existing item modifier list and all modifier options associated with it.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deleteModifierListTest() throws ApiException {
String locationId = null;
String modifierListId = null;
V1ModifierList response = api.deleteModifierList(locationId, modifierListId);
// TODO: test validations
}
/**
* Deletes an existing item modifier option from a modifier list.
*
* Deletes an existing item modifier option from a modifier list.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deleteModifierOptionTest() throws ApiException {
String locationId = null;
String modifierListId = null;
String modifierOptionId = null;
V1ModifierOption response = api.deleteModifierOption(locationId, modifierListId, modifierOptionId);
// TODO: test validations
}
/**
* Deletes an existing Favorites page and all of its cells.
*
* Deletes an existing Favorites page and all of its cells.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deletePageTest() throws ApiException {
String locationId = null;
String pageId = null;
V1Page response = api.deletePage(locationId, pageId);
// TODO: test validations
}
/**
* Deletes a cell from a Favorites page in Square Register.
*
* Deletes a cell from a Favorites page in Square Register.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deletePageCellTest() throws ApiException {
String locationId = null;
String pageId = null;
String row = null;
String column = null;
V1Page response = api.deletePageCell(locationId, pageId, row, column);
// TODO: test validations
}
/**
* Deletes an existing item variation from an item.
*
* Deletes an existing item variation from an item.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deleteVariationTest() throws ApiException {
String locationId = null;
String itemId = null;
String variationId = null;
V1Variation response = api.deleteVariation(locationId, itemId, variationId);
// TODO: test validations
}
/**
* Lists all of a location's item categories.
*
* Lists all of a location's item categories.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listCategoriesTest() throws ApiException {
String locationId = null;
List<V1Category> response = api.listCategories(locationId);
// TODO: test validations
}
/**
* Lists all of a location's discounts.
*
* Lists all of a location's discounts.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listDiscountsTest() throws ApiException {
String locationId = null;
List<V1Discount> response = api.listDiscounts(locationId);
// TODO: test validations
}
/**
* Lists all of a location's fees (taxes).
*
* Lists all of a location's fees (taxes).
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listFeesTest() throws ApiException {
String locationId = null;
List<V1Fee> response = api.listFees(locationId);
// TODO: test validations
}
/**
* Provides inventory information for all of a merchant's inventory-enabled item variations.
*
* Provides inventory information for all of a merchant's inventory-enabled item variations.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listInventoryTest() throws ApiException {
String locationId = null;
Integer limit = null;
String batchToken = null;
List<V1InventoryEntry> response = api.listInventory(locationId, limit, batchToken);
// TODO: test validations
}
/**
* Provides summary information for all of a location's items.
*
* Provides summary information for all of a location's items.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listItemsTest() throws ApiException {
String locationId = null;
String batchToken = null;
List<V1Item> response = api.listItems(locationId, batchToken);
// TODO: test validations
}
/**
* Lists all of a location's modifier lists.
*
* Lists all of a location's modifier lists.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listModifierListsTest() throws ApiException {
String locationId = null;
List<V1ModifierList> response = api.listModifierLists(locationId);
// TODO: test validations
}
/**
* Lists all of a location's Favorites pages in Square Register.
*
* Lists all of a location's Favorites pages in Square Register.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listPagesTest() throws ApiException {
String locationId = null;
List<V1Page> response = api.listPages(locationId);
// TODO: test validations
}
/**
* Removes a fee assocation from an item, meaning the fee is no longer automatically applied to the item in Square Register.
*
* Removes a fee assocation from an item, meaning the fee is no longer automatically applied to the item in Square Register.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void removeFeeTest() throws ApiException {
String locationId = null;
String itemId = null;
String feeId = null;
V1Item response = api.removeFee(locationId, itemId, feeId);
// TODO: test validations
}
/**
* Removes a modifier list association from an item, meaning modifier options from the list can no longer be applied to the item.
*
* Removes a modifier list association from an item, meaning modifier options from the list can no longer be applied to the item.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void removeModifierListTest() throws ApiException {
String locationId = null;
String modifierListId = null;
String itemId = null;
V1Item response = api.removeModifierList(locationId, modifierListId, itemId);
// TODO: test validations
}
/**
* Provides the details for a single item, including associated modifier lists and fees.
*
* Provides the details for a single item, including associated modifier lists and fees.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void retrieveItemTest() throws ApiException {
String locationId = null;
String itemId = null;
V1Item response = api.retrieveItem(locationId, itemId);
// TODO: test validations
}
/**
* Provides the details for a single modifier list.
*
* Provides the details for a single modifier list.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void retrieveModifierListTest() throws ApiException {
String locationId = null;
String modifierListId = null;
V1ModifierList response = api.retrieveModifierList(locationId, modifierListId);
// TODO: test validations
}
/**
* Modifies the details of an existing item category.
*
* Modifies the details of an existing item category.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateCategoryTest() throws ApiException {
String locationId = null;
String categoryId = null;
V1Category body = null;
V1Category response = api.updateCategory(locationId, categoryId, body);
// TODO: test validations
}
/**
* Modifies the details of an existing discount.
*
* Modifies the details of an existing discount.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateDiscountTest() throws ApiException {
String locationId = null;
String discountId = null;
V1Discount body = null;
V1Discount response = api.updateDiscount(locationId, discountId, body);
// TODO: test validations
}
/**
* Modifies the details of an existing fee (tax).
*
* Modifies the details of an existing fee (tax).
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateFeeTest() throws ApiException {
String locationId = null;
String feeId = null;
V1Fee body = null;
V1Fee response = api.updateFee(locationId, feeId, body);
// TODO: test validations
}
/**
* Modifies the core details of an existing item.
*
* Modifies the core details of an existing item.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateItemTest() throws ApiException {
String locationId = null;
String itemId = null;
V1Item body = null;
V1Item response = api.updateItem(locationId, itemId, body);
// TODO: test validations
}
/**
* Modifies the details of an existing item modifier list.
*
* Modifies the details of an existing item modifier list.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateModifierListTest() throws ApiException {
String locationId = null;
String modifierListId = null;
V1UpdateModifierListRequest body = null;
V1ModifierList response = api.updateModifierList(locationId, modifierListId, body);
// TODO: test validations
}
/**
* Modifies the details of an existing item modifier option.
*
* Modifies the details of an existing item modifier option.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateModifierOptionTest() throws ApiException {
String locationId = null;
String modifierListId = null;
String modifierOptionId = null;
V1ModifierOption body = null;
V1ModifierOption response = api.updateModifierOption(locationId, modifierListId, modifierOptionId, body);
// TODO: test validations
}
/**
* Modifies the details of a Favorites page in Square Register.
*
* Modifies the details of a Favorites page in Square Register.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updatePageTest() throws ApiException {
String locationId = null;
String pageId = null;
V1Page body = null;
V1Page response = api.updatePage(locationId, pageId, body);
// TODO: test validations
}
/**
* Modifies a cell of a Favorites page in Square Register.
*
* Modifies a cell of a Favorites page in Square Register.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updatePageCellTest() throws ApiException {
String locationId = null;
String pageId = null;
V1PageCell body = null;
V1Page response = api.updatePageCell(locationId, pageId, body);
// TODO: test validations
}
/**
* Modifies the details of an existing item variation.
*
* Modifies the details of an existing item variation.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateVariationTest() throws ApiException {
String locationId = null;
String itemId = null;
String variationId = null;
V1Variation body = null;
V1Variation response = api.updateVariation(locationId, itemId, variationId, body);
// TODO: test validations
}
}
| 4,696 |
0 | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect/api/V1LocationsApiTest.java | /*
* Square Connect API
* Client library for accessing the Square Connect APIs
*
* OpenAPI spec version: 2.0
* Contact: developers@squareup.com
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
package com.squareup.connect.api;
import com.squareup.connect.ApiException;
import com.squareup.connect.models.V1Merchant;
import org.junit.Test;
import org.junit.Ignore;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* API tests for VLocationsApi
*/
@Ignore
public class V1LocationsApiTest {
private final V1LocationsApi api = new V1LocationsApi();
/**
* Provides details for a business's locations, including their IDs.
*
* Provides details for a business's locations, including their IDs.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listLocationsTest() throws ApiException {
List<V1Merchant> response = api.listLocations();
// TODO: test validations
}
/**
* Get a business's information.
*
* Get a business's information.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void retrieveBusinessTest() throws ApiException {
V1Merchant response = api.retrieveBusiness();
// TODO: test validations
}
}
| 4,697 |
0 | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect/api/OrdersApiTest.java | /*
* Square Connect API
* Client library for accessing the Square Connect APIs
*
* OpenAPI spec version: 2.0
* Contact: developers@squareup.com
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
package com.squareup.connect.api;
import com.squareup.connect.ApiException;
import com.squareup.connect.models.BatchRetrieveOrdersRequest;
import com.squareup.connect.models.BatchRetrieveOrdersResponse;
import com.squareup.connect.models.CreateOrderRequest;
import com.squareup.connect.models.CreateOrderResponse;
import org.junit.Test;
import org.junit.Ignore;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* API tests for OrdersApi
*/
@Ignore
public class OrdersApiTest {
private final OrdersApi api = new OrdersApi();
/**
* BatchRetrieveOrders
*
* Retrieves a set of [Order](#type-order)s by their IDs. Only orders that have been successfully charged are included in the response. If any of the order IDs in the request do not exist, or are associated with uncharged orders, then those orders will not be included in the set of orders in the response. Note that in the future, uncharged orders may be returned by this endpoint.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void batchRetrieveOrdersTest() throws ApiException {
String locationId = null;
BatchRetrieveOrdersRequest body = null;
BatchRetrieveOrdersResponse response = api.batchRetrieveOrders(locationId, body);
// TODO: test validations
}
/**
* CreateOrder
*
* Creates an [Order](#type-order) that can then be referenced as `order_id` in a request to the [Charge](#endpoint-charge) endpoint. Orders specify products for purchase, along with discounts, taxes, and other settings to apply to the purchase. To associate a created order with a request to the Charge endpoint, provide the order's `id` in the `order_id` field of your request. You cannot modify an order after you create it. If you need to modify an order, instead create a new order with modified details. To learn more about the Orders API, see the [Orders API Overview](https://docs.connect.squareup.com/articles/orders-api-overview).
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createOrderTest() throws ApiException {
String locationId = null;
CreateOrderRequest body = null;
CreateOrderResponse response = api.createOrder(locationId, body);
// TODO: test validations
}
}
| 4,698 |
0 | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect | Create_ds/connect-java-sdk/src/test/java/com/squareup/connect/api/V1EmployeesApiTest.java | /*
* Square Connect API
* Client library for accessing the Square Connect APIs
*
* OpenAPI spec version: 2.0
* Contact: developers@squareup.com
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
package com.squareup.connect.api;
import com.squareup.connect.ApiException;
import com.squareup.connect.models.V1Employee;
import com.squareup.connect.models.V1EmployeeRole;
import com.squareup.connect.models.V1Timecard;
import com.squareup.connect.models.V1CashDrawerShift;
import com.squareup.connect.models.V1TimecardEvent;
import org.junit.Test;
import org.junit.Ignore;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* API tests for VEmployeesApi
*/
@Ignore
public class V1EmployeesApiTest {
private final V1EmployeesApi api = new V1EmployeesApi();
/**
* Creates an employee for a business.
*
* Creates an employee for a business.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createEmployeeTest() throws ApiException {
V1Employee body = null;
V1Employee response = api.createEmployee(body);
// TODO: test validations
}
/**
* Creates an employee role you can then assign to employees.
*
* Creates an employee role you can then assign to employees.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createEmployeeRoleTest() throws ApiException {
V1EmployeeRole employeeRole = null;
V1EmployeeRole response = api.createEmployeeRole(employeeRole);
// TODO: test validations
}
/**
* Creates a timecard for an employee. Each timecard corresponds to a single shift.
*
* Creates a timecard for an employee. Each timecard corresponds to a single shift.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createTimecardTest() throws ApiException {
V1Timecard body = null;
V1Timecard response = api.createTimecard(body);
// TODO: test validations
}
/**
* Deletes a timecard. Deleted timecards are still accessible from Connect API endpoints, but the value of their deleted field is set to true. See Handling deleted timecards for more information.
*
* Deletes a timecard. Deleted timecards are still accessible from Connect API endpoints, but the value of their deleted field is set to true. See Handling deleted timecards for more information.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void deleteTimecardTest() throws ApiException {
String timecardId = null;
Object response = api.deleteTimecard(timecardId);
// TODO: test validations
}
/**
* Provides the details for all of a location's cash drawer shifts during a date range. The date range you specify cannot exceed 90 days.
*
* Provides the details for all of a location's cash drawer shifts during a date range. The date range you specify cannot exceed 90 days.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listCashDrawerShiftsTest() throws ApiException {
String locationId = null;
String order = null;
String beginTime = null;
String endTime = null;
List<V1CashDrawerShift> response = api.listCashDrawerShifts(locationId, order, beginTime, endTime);
// TODO: test validations
}
/**
* Provides summary information for all of a business's employee roles.
*
* Provides summary information for all of a business's employee roles.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listEmployeeRolesTest() throws ApiException {
String order = null;
Integer limit = null;
String cursor = null;
List<V1EmployeeRole> response = api.listEmployeeRoles(order, limit, cursor);
// TODO: test validations
}
/**
* Provides summary information for all of a business's employees.
*
* Provides summary information for all of a business's employees.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listEmployeesTest() throws ApiException {
String order = null;
String beginUpdatedAt = null;
String endUpdatedAt = null;
String beginCreatedAt = null;
String endCreatedAt = null;
String status = null;
String externalId = null;
Integer limit = null;
String batchToken = null;
List<V1Employee> response = api.listEmployees(order, beginUpdatedAt, endUpdatedAt, beginCreatedAt, endCreatedAt, status, externalId, limit, batchToken);
// TODO: test validations
}
/**
* Provides summary information for all events associated with a particular timecard.
*
* Provides summary information for all events associated with a particular timecard.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listTimecardEventsTest() throws ApiException {
String timecardId = null;
List<V1TimecardEvent> response = api.listTimecardEvents(timecardId);
// TODO: test validations
}
/**
* Provides summary information for all of a business's employee timecards.
*
* Provides summary information for all of a business's employee timecards.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void listTimecardsTest() throws ApiException {
String order = null;
String employeeId = null;
String beginClockinTime = null;
String endClockinTime = null;
String beginClockoutTime = null;
String endClockoutTime = null;
String beginUpdatedAt = null;
String endUpdatedAt = null;
Boolean deleted = null;
Integer limit = null;
String cursor = null;
List<V1Timecard> response = api.listTimecards(order, employeeId, beginClockinTime, endClockinTime, beginClockoutTime, endClockoutTime, beginUpdatedAt, endUpdatedAt, deleted, limit, cursor);
// TODO: test validations
}
/**
* Provides the details for a single cash drawer shift, including all events that occurred during the shift.
*
* Provides the details for a single cash drawer shift, including all events that occurred during the shift.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void retrieveCashDrawerShiftTest() throws ApiException {
String locationId = null;
String shiftId = null;
V1CashDrawerShift response = api.retrieveCashDrawerShift(locationId, shiftId);
// TODO: test validations
}
/**
* Provides the details for a single employee.
*
* Provides the details for a single employee.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void retrieveEmployeeTest() throws ApiException {
String employeeId = null;
V1Employee response = api.retrieveEmployee(employeeId);
// TODO: test validations
}
/**
* Provides the details for a single employee role.
*
* Provides the details for a single employee role.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void retrieveEmployeeRoleTest() throws ApiException {
String roleId = null;
V1EmployeeRole response = api.retrieveEmployeeRole(roleId);
// TODO: test validations
}
/**
* Provides the details for a single timecard.
*
* Provides the details for a single timecard.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void retrieveTimecardTest() throws ApiException {
String timecardId = null;
V1Timecard response = api.retrieveTimecard(timecardId);
// TODO: test validations
}
/**
* V1 UpdateEmployee
*
*
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateEmployeeTest() throws ApiException {
String employeeId = null;
V1Employee body = null;
V1Employee response = api.updateEmployee(employeeId, body);
// TODO: test validations
}
/**
* Modifies the details of an employee role.
*
* Modifies the details of an employee role.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateEmployeeRoleTest() throws ApiException {
String roleId = null;
V1EmployeeRole body = null;
V1EmployeeRole response = api.updateEmployeeRole(roleId, body);
// TODO: test validations
}
/**
* Modifies a timecard's details. This creates an API_EDIT event for the timecard. You can view a timecard's event history with the List Timecard Events endpoint.
*
* Modifies a timecard's details. This creates an API_EDIT event for the timecard. You can view a timecard's event history with the List Timecard Events endpoint.
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void updateTimecardTest() throws ApiException {
String timecardId = null;
V1Timecard body = null;
V1Timecard response = api.updateTimecard(timecardId, body);
// TODO: test validations
}
}
| 4,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.